repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
rg314/autoballs
[ "21fab5c810f18c0d50c23051928d3bb86fbc6941" ]
[ "autoballs/network/dataloader.py" ]
[ "from autoballs.utils import get_img_from_seg\r\n\r\nimport cv2\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport albumentations as albu\r\n\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.data import Dataset as BaseDataset\r\n\r\nclass Dataset(BaseDataset):\r\n \"\"\"Read images, apply augmentation and preprocessing transformations. \r\n Adopted for CamVid example\r\n \r\n Args:\r\n images_dir (str): path to images folder\r\n masks_dir (str): path to segmentation masks folder\r\n class_values (list): values of classes to extract from segmentation mask\r\n augmentation (albumentations.Compose): data transfromation pipeline \r\n (e.g. flip, scale, etc.)\r\n preprocessing (albumentations.Compose): data preprocessing \r\n (e.g. noralization, shape manipulation, etc.)\r\n \r\n \"\"\"\r\n \r\n CLASSES = ['background', 'cell', 'balls']\r\n \r\n def __init__(\r\n self, \r\n image_dir,\r\n masks_dir, \r\n classes=None, \r\n augmentation=None, \r\n preprocessing=None,\r\n in_channels=3,\r\n size=256,\r\n test=True\r\n ):\r\n self.ids = len(masks_dir)\r\n self.masks_fps = masks_dir\r\n self.images_fps = image_dir\r\n self.in_channels = in_channels\r\n self.test = test\r\n self.size = size\r\n \r\n # convert str names to class values on masks\r\n self.class_values = [self.CLASSES.index(cls.lower()) for cls in classes]\r\n \r\n self.augmentation = augmentation\r\n self.preprocessing = preprocessing\r\n \r\n def __getitem__(self, i):\r\n \r\n\r\n # read data\r\n image = cv2.imread(self.images_fps[i])\r\n image = np.asarray(image)[:,:,:3]\r\n\r\n if not self.test:\r\n mask = cv2.imread(self.masks_fps[i], 0)\r\n mask = 1.0 * (mask > 0)\r\n mask = mask.reshape(mask.shape[0], mask.shape[1],1 )\r\n \r\n else:\r\n mask = np.zeros((self.size, self.size, 1))\r\n\r\n # cv2.imwrite('test.png', mask)\r\n # image = image.reshape(image.shape + (1,))\r\n \r\n\r\n # extract certain classes from mask (e.g. cars)\r\n # masks = [(mask == v) for v in self.class_values]\r\n # mask = np.stack(masks, axis=-1).astype('float')\r\n\r\n\r\n # apply augmentations\r\n if self.augmentation:\r\n sample = self.augmentation(image=image, mask=mask)\r\n image, mask = sample['image'], sample['mask']\r\n \r\n # apply preprocessing\r\n if self.preprocessing:\r\n sample = self.preprocessing(image=image, mask=mask) \r\n image, mask = sample['image'], sample['mask']\r\n\r\n if self.in_channels != 3:\r\n image = image[:1,:,:]\r\n \r\n return image, mask\r\n \r\n def __len__(self):\r\n return self.ids\r\n\r\n\r\ndef get_training_augmentation(size):\r\n SIZE = size\r\n train_transform = [\r\n albu.Resize(SIZE,SIZE),\r\n\r\n albu.HorizontalFlip(p=0.5),\r\n albu.RandomRotate90(p=0.5),\r\n albu.Rotate(p=0.5),\r\n\r\n\r\n\r\n # albu.ShiftScaleRotate(scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),\r\n\r\n # albu.PadIfNeeded(min_height=SIZE, min_width=SIZE, always_apply=True, border_mode=0),\r\n\r\n # albu.IAAAdditiveGaussianNoise(p=0.2),\r\n # albu.IAAPerspective(p=0.5),\r\n\r\n\r\n ]\r\n return albu.Compose(train_transform)\r\n\r\n\r\ndef get_validation_augmentation(size=256):\r\n \"\"\"Add paddings to make image shape divisible by 32\"\"\"\r\n SIZE = size\r\n test_transform = [\r\n albu.Resize(SIZE,SIZE),\r\n ]\r\n return albu.Compose(test_transform)\r\n\r\ndef get_test_augmentation(size=256):\r\n \"\"\"Add paddings to make image shape divisible by 32\"\"\"\r\n SIZE = size\r\n test_transform = [\r\n albu.Resize(SIZE,SIZE),\r\n ]\r\n return albu.Compose(test_transform)\r\n\r\n\r\ndef to_tensor(x, **kwargs):\r\n return x.transpose(2, 0, 1).astype('float32')\r\n\r\n\r\ndef get_preprocessing(preprocessing_fn):\r\n \"\"\"Construct preprocessing transform\r\n \r\n Args:\r\n preprocessing_fn (callbale): data normalization function \r\n (can be specific for each pretrained neural network)\r\n Return:\r\n transform: albumentations.Compose\r\n \r\n \"\"\"\r\n \r\n _transform = [\r\n albu.Lambda(image=preprocessing_fn),\r\n albu.Lambda(image=to_tensor, mask=to_tensor),\r\n ]\r\n return albu.Compose(_transform)" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
beneisner/pytorch_geometric
[ "53d44a96bd2de2753b1ab1d7153c026c92606a81", "befb6c616c1069381c0bdff4baf6e023fea587d6", "53d44a96bd2de2753b1ab1d7153c026c92606a81" ]
[ "torch_geometric/datasets/ged_dataset.py", "examples/graph_sage_unsup.py", "torch_geometric/datasets/flickr.py" ]
[ "from typing import Optional, Callable, List\n\nimport os\nimport os.path as osp\nimport glob\nimport pickle\n\nimport torch\nimport torch.nn.functional as F\nimport networkx as nx\nfrom torch_geometric.data import (InMemoryDataset, Data, download_url,\n extract_zip, extract_tar)\nfrom torch_geometric.utils import to_undirected\n\n\nclass GEDDataset(InMemoryDataset):\n r\"\"\"The GED datasets from the `\"Graph Edit Distance Computation via Graph\n Neural Networks\" <https://arxiv.org/abs/1808.05689>`_ paper.\n GEDs can be accessed via the global attributes :obj:`ged` and\n :obj:`norm_ged` for all train/train graph pairs and all train/test graph\n pairs:\n\n .. code-block:: python\n\n dataset = GEDDataset(root, name=\"LINUX\")\n data1, data2 = dataset[0], dataset[1]\n ged = dataset.ged[data1.i, data2.i] # GED between `data1` and `data2`.\n\n Note that GEDs are not available if both graphs are from the test set.\n For evaluation, it is recommended to pair up each graph from the test set\n with each graph in the training set.\n\n .. note::\n\n :obj:`ALKANE` is missing GEDs for train/test graph pairs since they are\n not provided in the `official datasets\n <https://github.com/yunshengb/SimGNN>`_.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset (one of :obj:`\"AIDS700nef\"`,\n :obj:`\"LINUX\"`, :obj:`\"ALKANE\"`, :obj:`\"IMDBMulti\"`).\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n \"\"\"\n\n url = 'https://drive.google.com/uc?export=download&id={}'\n\n datasets = {\n 'AIDS700nef': {\n 'id': '10czBPJDEzEDI2tq7Z7mkBjLhj55F-a2z',\n 'extract': extract_zip,\n 'pickle': '1OpV4bCHjBkdpqI6H5Mg0-BqlA2ee2eBW',\n },\n 'LINUX': {\n 'id': '1nw0RRVgyLpit4V4XFQyDy0pI6wUEXSOI',\n 'extract': extract_tar,\n 'pickle': '14FDm3NSnrBvB7eNpLeGy5Bz6FjuCSF5v',\n },\n 'ALKANE': {\n 'id': '1-LmxaWW3KulLh00YqscVEflbqr0g4cXt',\n 'extract': extract_tar,\n 'pickle': '15BpvMuHx77-yUGYgM27_sQett02HQNYu',\n },\n 'IMDBMulti': {\n 'id': '12QxZ7EhYA7pJiF4cO-HuE8szhSOWcfST',\n 'extract': extract_zip,\n 'pickle': '1wy9VbZvZodkixxVIOuRllC-Lp-0zdoYZ',\n },\n }\n\n # List of atoms contained in the AIDS700nef dataset:\n types = [\n 'O', 'S', 'C', 'N', 'Cl', 'Br', 'B', 'Si', 'Hg', 'I', 'Bi', 'P', 'F',\n 'Cu', 'Ho', 'Pd', 'Ru', 'Pt', 'Sn', 'Li', 'Ga', 'Tb', 'As', 'Co', 'Pb',\n 'Sb', 'Se', 'Ni', 'Te'\n ]\n\n def __init__(self, root: str, name: str, train: bool = True,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n pre_filter: Optional[Callable] = None):\n self.name = name\n assert self.name in self.datasets.keys()\n super().__init__(root, transform, pre_transform, pre_filter)\n path = self.processed_paths[0] if train else self.processed_paths[1]\n self.data, self.slices = torch.load(path)\n path = osp.join(self.processed_dir, f'{self.name}_ged.pt')\n self.ged = torch.load(path)\n path = osp.join(self.processed_dir, f'{self.name}_norm_ged.pt')\n self.norm_ged = torch.load(path)\n\n @property\n def raw_file_names(self) -> List[str]:\n return [osp.join(self.name, s) for s in ['train', 'test']]\n\n @property\n def processed_file_names(self) -> List[str]:\n return [f'{self.name}_{s}.pt' for s in ['training', 'test']]\n\n def download(self):\n name = self.datasets[self.name]['id']\n path = download_url(self.url.format(name), self.raw_dir)\n self.datasets[self.name]['extract'](path, self.raw_dir)\n os.unlink(path)\n\n name = self.datasets[self.name]['pickle']\n path = download_url(self.url.format(name), self.raw_dir)\n os.rename(path, osp.join(self.raw_dir, self.name, 'ged.pickle'))\n\n def process(self):\n ids, Ns = [], []\n for r_path, p_path in zip(self.raw_paths, self.processed_paths):\n names = glob.glob(osp.join(r_path, '*.gexf'))\n # Get the graph IDs given by the file name:\n ids.append(sorted([int(i.split(os.sep)[-1][:-5]) for i in names]))\n\n data_list = []\n # Convert graphs in .gexf format to a NetworkX Graph:\n for i, idx in enumerate(ids[-1]):\n i = i if len(ids) == 1 else i + len(ids[0])\n G = nx.read_gexf(osp.join(r_path, f'{idx}.gexf'))\n mapping = {name: j for j, name in enumerate(G.nodes())}\n G = nx.relabel_nodes(G, mapping)\n Ns.append(G.number_of_nodes())\n\n edge_index = torch.tensor(list(G.edges)).t().contiguous()\n if edge_index.numel() == 0:\n edge_index = torch.empty((2, 0), dtype=torch.long)\n edge_index = to_undirected(edge_index, num_nodes=Ns[-1])\n\n data = Data(edge_index=edge_index, i=i)\n data.num_nodes = Ns[-1]\n\n # Create a one-hot encoded feature matrix denoting the atom\n # type for the AIDS700nef dataset:\n if self.name == 'AIDS700nef':\n x = torch.zeros(data.num_nodes, dtype=torch.long)\n for node, info in G.nodes(data=True):\n x[int(node)] = self.types.index(info['type'])\n data.x = F.one_hot(x, num_classes=len(self.types)).to(\n torch.float)\n\n if self.pre_filter is not None and not self.pre_filter(data):\n continue\n\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n\n data_list.append(data)\n\n torch.save(self.collate(data_list), p_path)\n\n assoc = {idx: i for i, idx in enumerate(ids[0])}\n assoc.update({idx: i + len(ids[0]) for i, idx in enumerate(ids[1])})\n\n path = osp.join(self.raw_dir, self.name, 'ged.pickle')\n mat = torch.full((len(assoc), len(assoc)), float('inf'))\n with open(path, 'rb') as f:\n obj = pickle.load(f)\n xs, ys, gs = [], [], []\n for (x, y), g in obj.items():\n xs += [assoc[x]]\n ys += [assoc[y]]\n gs += [g]\n x, y = torch.tensor(xs), torch.tensor(ys)\n g = torch.tensor(gs, dtype=torch.float)\n mat[x, y], mat[y, x] = g, g\n\n path = osp.join(self.processed_dir, f'{self.name}_ged.pt')\n torch.save(mat, path)\n\n # Calculate the normalized GEDs:\n N = torch.tensor(Ns, dtype=torch.float)\n norm_mat = mat / (0.5 * (N.view(-1, 1) + N.view(1, -1)))\n\n path = osp.join(self.processed_dir, f'{self.name}_norm_ged.pt')\n torch.save(norm_mat, path)\n\n def __repr__(self) -> str:\n return f'{self.name}({len(self)})'\n", "import os.path as osp\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_cluster import random_walk\nfrom sklearn.linear_model import LogisticRegression\n\nimport torch_geometric.transforms as T\nfrom torch_geometric.nn import SAGEConv\nfrom torch_geometric.datasets import Planetoid\nfrom torch_geometric.data import NeighborSampler as RawNeighborSampler\n\nEPS = 1e-15\n\ndataset = 'Cora'\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', dataset)\ndataset = Planetoid(path, dataset, transform=T.NormalizeFeatures())\ndata = dataset[0]\n\n\nclass NeighborSampler(RawNeighborSampler):\n def sample(self, batch):\n batch = torch.tensor(batch)\n row, col, _ = self.adj_t.coo()\n\n # For each node in `batch`, we sample a direct neighbor (as positive\n # example) and a random node (as negative example):\n pos_batch = random_walk(row, col, batch, walk_length=1,\n coalesced=False)[:, 1]\n\n neg_batch = torch.randint(0, self.adj_t.size(1), (batch.numel(), ),\n dtype=torch.long)\n\n batch = torch.cat([batch, pos_batch, neg_batch], dim=0)\n return super(NeighborSampler, self).sample(batch)\n\n\ntrain_loader = NeighborSampler(data.edge_index, sizes=[10, 10], batch_size=256,\n shuffle=True, num_nodes=data.num_nodes)\n\n\nclass SAGE(nn.Module):\n def __init__(self, in_channels, hidden_channels, num_layers):\n super(SAGE, self).__init__()\n self.num_layers = num_layers\n self.convs = nn.ModuleList()\n for i in range(num_layers):\n in_channels = in_channels if i == 0 else hidden_channels\n self.convs.append(SAGEConv(in_channels, hidden_channels))\n\n def forward(self, x, adjs):\n for i, (edge_index, _, size) in enumerate(adjs):\n x_target = x[:size[1]] # Target nodes are always placed first.\n x = self.convs[i]((x, x_target), edge_index)\n if i != self.num_layers - 1:\n x = x.relu()\n x = F.dropout(x, p=0.5, training=self.training)\n return x\n\n def full_forward(self, x, edge_index):\n for i, conv in enumerate(self.convs):\n x = conv(x, edge_index)\n if i != self.num_layers - 1:\n x = x.relu()\n x = F.dropout(x, p=0.5, training=self.training)\n return x\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = SAGE(data.num_node_features, hidden_channels=64, num_layers=2)\nmodel = model.to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\nx, edge_index = data.x.to(device), data.edge_index.to(device)\n\n\ndef train():\n model.train()\n\n total_loss = 0\n for batch_size, n_id, adjs in train_loader:\n # `adjs` holds a list of `(edge_index, e_id, size)` tuples.\n adjs = [adj.to(device) for adj in adjs]\n optimizer.zero_grad()\n\n out = model(x[n_id], adjs)\n out, pos_out, neg_out = out.split(out.size(0) // 3, dim=0)\n\n pos_loss = F.logsigmoid((out * pos_out).sum(-1)).mean()\n neg_loss = F.logsigmoid(-(out * neg_out).sum(-1)).mean()\n loss = -pos_loss - neg_loss\n loss.backward()\n optimizer.step()\n\n total_loss += float(loss) * out.size(0)\n\n return total_loss / data.num_nodes\n\n\[email protected]_grad()\ndef test():\n model.eval()\n out = model.full_forward(x, edge_index).cpu()\n\n clf = LogisticRegression()\n clf.fit(out[data.train_mask], data.y[data.train_mask])\n\n val_acc = clf.score(out[data.val_mask], data.y[data.val_mask])\n test_acc = clf.score(out[data.test_mask], data.y[data.test_mask])\n\n return val_acc, test_acc\n\n\nfor epoch in range(1, 51):\n loss = train()\n val_acc, test_acc = test()\n print(f'Epoch: {epoch:03d}, Loss: {loss:.4f}, '\n f'Val: {val_acc:.4f}, Test: {test_acc:.4f}')\n", "from typing import Optional, Callable, List\n\nimport json\nimport os.path as osp\n\nimport torch\nimport numpy as np\nimport scipy.sparse as sp\nfrom google_drive_downloader import GoogleDriveDownloader as gdd\nfrom torch_geometric.data import InMemoryDataset, Data\n\n\nclass Flickr(InMemoryDataset):\n r\"\"\"The Flickr dataset from the `\"GraphSAINT: Graph Sampling Based\n Inductive Learning Method\" <https://arxiv.org/abs/1907.04931>`_ paper,\n containing descriptions and common properties of images.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n \"\"\"\n\n adj_full_id = '1crmsTbd1-2sEXsGwa2IKnIB7Zd3TmUsy'\n feats_id = '1join-XdvX3anJU_MLVtick7MgeAQiWIZ'\n class_map_id = '1uxIkbtg5drHTsKt-PAsZZ4_yJmgFmle9'\n role_id = '1htXCtuktuCW8TR8KiKfrFDAxUgekQoV7'\n\n def __init__(self, root: str, transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None):\n super().__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_file_names(self) -> List[str]:\n return ['adj_full.npz', 'feats.npy', 'class_map.json', 'role.json']\n\n @property\n def processed_file_names(self) -> str:\n return 'data.pt'\n\n def download(self):\n path = osp.join(self.raw_dir, 'adj_full.npz')\n gdd.download_file_from_google_drive(self.adj_full_id, path)\n\n path = osp.join(self.raw_dir, 'feats.npy')\n gdd.download_file_from_google_drive(self.feats_id, path)\n\n path = osp.join(self.raw_dir, 'class_map.json')\n gdd.download_file_from_google_drive(self.class_map_id, path)\n\n path = osp.join(self.raw_dir, 'role.json')\n gdd.download_file_from_google_drive(self.role_id, path)\n\n def process(self):\n f = np.load(osp.join(self.raw_dir, 'adj_full.npz'))\n adj = sp.csr_matrix((f['data'], f['indices'], f['indptr']), f['shape'])\n adj = adj.tocoo()\n row = torch.from_numpy(adj.row).to(torch.long)\n col = torch.from_numpy(adj.col).to(torch.long)\n edge_index = torch.stack([row, col], dim=0)\n\n x = np.load(osp.join(self.raw_dir, 'feats.npy'))\n x = torch.from_numpy(x).to(torch.float)\n\n ys = [-1] * x.size(0)\n with open(osp.join(self.raw_dir, 'class_map.json')) as f:\n class_map = json.load(f)\n for key, item in class_map.items():\n ys[int(key)] = item\n y = torch.tensor(ys)\n\n with open(osp.join(self.raw_dir, 'role.json')) as f:\n role = json.load(f)\n\n train_mask = torch.zeros(x.size(0), dtype=torch.bool)\n train_mask[torch.tensor(role['tr'])] = True\n\n val_mask = torch.zeros(x.size(0), dtype=torch.bool)\n val_mask[torch.tensor(role['va'])] = True\n\n test_mask = torch.zeros(x.size(0), dtype=torch.bool)\n test_mask[torch.tensor(role['te'])] = True\n\n data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask,\n val_mask=val_mask, test_mask=test_mask)\n\n data = data if self.pre_transform is None else self.pre_transform(data)\n\n torch.save(self.collate([data]), self.processed_paths[0])\n" ]
[ [ "torch.empty", "torch.zeros", "torch.load", "torch.tensor", "torch.save" ], [ "sklearn.linear_model.LogisticRegression", "torch.cat", "torch.nn.functional.dropout", "torch.nn.ModuleList", "torch.tensor", "torch.no_grad", "torch.cuda.is_available" ], [ "torch.load", "torch.from_numpy", "scipy.sparse.csr_matrix", "torch.tensor", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
mwshinn/paranoidscientist
[ "8dcb745f1f6164c74788c5c4eb003db99c42bbe7" ]
[ "paranoid/types/numeric.py" ]
[ "# Copyright 2018 Max Shinn <[email protected]>\n# \n# This file is part of Paranoid Scientist, and is available under the\n# MIT license. Please see LICENSE.txt in the root directory for more\n# information.\n\n__all__ = ['Numeric', 'ExtendedReal', 'Number', 'Integer', 'Natural0', 'Natural1', 'Range', 'RangeClosedOpen', 'RangeOpenClosed', 'RangeOpen', 'Positive0', 'Positive', 'NDArray']\nimport math\nfrom .base import Type, TypeFactory\n\ntry:\n import numpy as np\n NUMERIC_TYPES = (int, float, np.integer, np.floating)\n USE_NUMPY = True\nexcept ImportError:\n print(\"Warning: numpy not found. Numpy support disabled.\")\n NUMERIC_TYPES = (int, float)\n USE_NUMPY = False\n\nclass Numeric(Type):\n \"\"\"Any integer or float, including inf, -inf, and nan.\"\"\"\n def test(self, v):\n assert isinstance(v, NUMERIC_TYPES), \"Invalid numeric\"\n def test_numpy(self, v):\n assert isinstance(v.dtype.type(), np.floating) or \\\n isinstance(v.dtype.type(), np.integer), \"Invalid datatype\"\n def generate(self):\n # Check infinity, nan, 0, +/- numbers, a float, a small/big number\n yield math.inf # Check infs\n yield -math.inf\n yield math.nan # nan\n yield 0\n yield 1\n yield -1\n yield 3.141 # A float\n yield 1e-10 # A small number\n yield 1e10 # A big number\n if USE_NUMPY:\n yield np.inf\n yield -np.inf\n yield np.nan\n yield np.int0(0)\n yield np.uint16(1)\n yield np.int0(-1)\n yield np.float16(3.141)\n yield np.float64(.01)\n\nclass ExtendedReal(Type):\n \"\"\"Any integer or float, excluding nan.\"\"\"\n def test(self, v):\n assert isinstance(v, NUMERIC_TYPES), \"Invalid numeric\"\n assert not math.isnan(v), \"Number cannot be nan\"\n def test_numpy(self, v):\n assert isinstance(v.dtype.type(), np.floating) or \\\n isinstance(v.dtype.type(), np.integer), \"Invalid datatype\"\n assert not np.any(np.isnan(v)), \"Number cannot be nan\"\n def generate(self):\n # Check infinity, nan, 0, +/- numbers, a float, a small/big number\n yield math.inf # Check infs\n yield -math.inf\n yield 0\n yield 1\n yield -1\n yield 3.141 # A float\n yield 1e-10 # A small number\n yield 1e10 # A big number\n if USE_NUMPY:\n yield np.inf\n yield -np.inf\n yield np.int0(0)\n yield np.uint16(1)\n yield np.int0(-1)\n yield np.float16(3.141)\n yield np.float64(.01)\n\nclass Number(Type):\n \"\"\"Any integer or float, excluding inf, -inf, and nan.\"\"\"\n def test(self, v):\n assert isinstance(v, NUMERIC_TYPES), \"Invalid number\"\n assert math.isfinite(v), \"Number must not be nan or inf\"\n def test_numpy(self, v):\n assert isinstance(v.dtype.type(), np.floating) or \\\n isinstance(v.dtype.type(), np.integer), \"Invalid datatype\"\n assert np.all(np.isfinite(v)), \"Number cannot be nan or inf\"\n def generate(self):\n yield 0\n yield 1\n yield -1\n yield 3.141 # A float\n yield 1e-10 # A small number\n yield 1e10 # A large number\n if USE_NUMPY:\n yield np.int0(0)\n yield np.uint16(1)\n yield np.int0(-1)\n yield np.float16(3.141)\n yield np.float64(.01)\n yield np.float64(10)\n\nclass Integer(Type):\n \"\"\"Any integer.\"\"\"\n def test(self, v):\n assert isinstance(v, NUMERIC_TYPES), \"Invalid number\"\n assert not math.isinf(v), \"Number must be finite\"\n assert not math.isnan(v), \"Number cannot be nan\"\n assert v // 1 == v, \"Invalid integer\"\n def test_numpy(self, v):\n assert isinstance(v.dtype.type(), np.floating) or \\\n isinstance(v.dtype.type(), np.integer), \"Invalid datatype\"\n assert np.all(np.isfinite(v)), \"Number cannot be nan or inf\"\n assert np.all(v // 1 == v), \"Invalid integer\"\n def generate(self):\n yield -100\n yield -1\n yield 0\n yield 1\n yield 100\n if USE_NUMPY:\n yield np.int16(-10)\n yield np.int8(-1)\n yield np.int64(0)\n yield np.uint0(1)\n \nclass Natural0(Integer):\n \"\"\"Any natural number including 0.\"\"\"\n def test(self, v):\n super().test(v)\n assert v >= 0, \"Must be greater than or equal to 0\"\n def test_numpy(self, v):\n super().test_numpy(v)\n assert np.all(v >= 0), \"Must be greater than or equal to 0\"\n def generate(self):\n yield 0\n yield 1\n yield 10\n yield 100\n if USE_NUMPY:\n yield np.int16(10)\n yield np.int8(4)\n yield np.int64(0)\n yield np.uint0(1)\n\nclass Natural1(Integer):\n \"\"\"Any natural number excluding 0.\"\"\"\n def test(self, v):\n super().test(v)\n assert v > 0, \"Must be greater than 0\"\n def test_numpy(self, v):\n super().test_numpy(v)\n assert np.all(v > 0), \"Must be greater than 0\"\n def generate(self):\n yield 1\n yield 2\n yield 10\n yield 100\n if USE_NUMPY:\n yield np.int16(10)\n yield np.int8(4)\n yield np.int64(5)\n yield np.uint0(1)\n\nclass Range(Number):\n \"\"\"Any integer or float from `low` to `high`, inclusive.\n\n Note that this does NOT include correction for floating point\n roundoff errors. This is because, if there are floating point\n roundoff errors, some code may fail.\n \"\"\"\n def __init__(self, low, high):\n super().__init__(low, high)\n assert low in ExtendedReal() and high in ExtendedReal(), \"Invalid bounds\"\n assert low < high, \\\n \"Low %s must be strictly greater than high %s\" % (low, high)\n assert not (math.isinf(low) and math.isinf(high)), \\\n \"Both bounds can't be inf\"\n self.low = low if low is not None else -math.inf\n self.high = high if low is not None else math.inf\n def test(self, v):\n super().test(v)\n assert self.low <= v <= self.high, \"Value %f must be greater\" \\\n \"than %f and less than %f\" % (v, self.low, self.high)\n def test_numpy(self, v):\n super().test_numpy(v)\n assert np.all(self.low <= v), \"Values %s must be greater\" \\\n \"than %f\" % (repr(v), self.low)\n assert np.all(v <= self.high), \"Values %s must be less\" \\\n \"than %f\" % (repr(v), self.high)\n def generate(self):\n EPSILON = 1e-5\n if not math.isinf(self.low):\n yield self.low\n yield self.low + EPSILON\n if not math.isinf(self.high):\n yield self.high\n yield self.high - EPSILON\n if not (math.isinf(self.low) or math.isinf(self.high)):\n l = self.low\n h = self.high\n yield l + (h-l)*.25\n yield l + (h-l)*.5\n yield l + (h-l)*.75\n\nclass RangeClosedOpen(Range):\n \"\"\"A half open interval from `low` (closed) to `high` (open).\"\"\"\n def test(self, v):\n super().test(v)\n assert v != self.high, \"Value must be strictly less than %f\" % self.high\n def test_numpy(self, v):\n super().test_numpy(v)\n assert np.all(v != self.high), \"Values must be strictly less than %f\" % self.high\n def generate(self):\n for v in super().generate():\n if v != self.high:\n yield v\n\nclass RangeOpenClosed(Range):\n \"\"\"A half open interval from `low` (open) to `high` (closed).\"\"\"\n def test(self, v):\n super().test(v)\n assert v != self.low, \"Value must be strictly greater than %f\" % self.low\n def test_numpy(self, v):\n super().test_numpy(v)\n assert np.all(v != self.low), \"Values must be strictly greater than %f\" % self.low\n def generate(self):\n for v in super().generate():\n if v != self.low:\n yield v\n\nclass RangeOpen(Range):\n \"\"\"Any number in the open interval from `low` to `high`.\"\"\"\n def test(self, v):\n super().test(v)\n assert v != self.low, \"Value must be strictly greater than %f\" % self.low\n assert v != self.high, \"Value must be strictly less than %f\" % self.high\n def test_numpy(self, v):\n super().test_numpy(v)\n assert np.all(v != self.low), \"Value must be strictly greater than %f\" % self.low\n assert np.all(v != self.high), \"Value must be strictly less than %f\" % self.high\n def generate(self):\n for v in Range.generate(self):\n if not v in [self.low, self.high]:\n yield v\n\nclass Positive0(Number):\n \"\"\"A positive number, including zero.\"\"\"\n def test(self, v):\n super().test(v)\n assert v >= 0, \"Value must be non-negative\"\n def test_numpy(self, v):\n super().test_numpy(v)\n assert np.all(v >= 0), \"Values must be non-negative\"\n def generate(self):\n yield 4.3445 # A float\n yield 1\n yield 10\n yield 0\n\nclass Positive(Number):\n \"\"\"A positive number, excluding zero.\"\"\"\n def test(self, v):\n super().test(v)\n assert v > 0, \"Value must be positive\"\n def test_numpy(self, v):\n super().test_numpy(v)\n assert np.all(v > 0), \"Values must be positive\"\n def generate(self):\n yield 4.3445 # A float\n yield 1\n yield 10\n\nclass NDArray(Type):\n \"\"\"A numpy ndarray of dimension `d` and type `t`.\"\"\"\n def __init__(self, d=None, t=None):\n super().__init__(d=d, t=t)\n assert USE_NUMPY, \"Numpy support not enabled\"\n if d is not None:\n assert (d in Integer()) and d>0, \"Invalid dimension\"\n # TODO support non-numeric types\n \n # Create a function for testing whether values are an instance\n # of the specified type. If no type is specified, the\n # function should always return true. If a type is specified,\n # check if it has the special \"test_numpy\" method. If so, use\n # that on the full array. (This can substantially improve\n # performance.) If not, test each value individually.\n if t is not None:\n assert isinstance(TypeFactory(t), Type)\n self.type = TypeFactory(t)\n if hasattr(self.type, \"test_numpy\"):\n self.testfunc = self.type.test_numpy\n else:\n def testfunc(x):\n for xv in x.flatten():\n assert xv in self.type, \\\n \"Array value %s is not of type %s\" % (xv, repr(self.type))\n self.testfunc = testfunc\n else:\n self.type = None\n self.testfunc = lambda x : True\n self.d = d\n def test(self, v):\n super().test(v)\n assert isinstance(v, np.ndarray), \"V is not an NDArray, it is a \" + str(type(v))\n if self.d is not None:\n assert len(v.shape) == self.d\n self.testfunc(v)\n def generate(self):\n # TODO fix, and more of these\n if self.type:\n vals = [e for e in self.type.generate()]\n else:\n vals = [3, 4, 5, 6, 7, 8, 9, 10]\n if self.d:\n dimspecs = [tuple([5]*self.d)]\n else:\n dimspecs = [(20,), (5,5), (3,3,3), (200,)]\n # Check basic values\n if not self.type or 0 in self.type:\n yield np.zeros(dimspecs[0], dtype=np.float64)\n if not self.type or 1 in self.type:\n yield np.ones(dimspecs[0], dtype=np.int32)\n if not self.type or -1 in self.type:\n yield -np.ones(dimspecs[0])\n if not self.type or np.nan in self.type:\n yield np.ones(dimspecs[0])*np.nan\n if not self.type or np.inf in self.type:\n yield np.ones(dimspecs[0])*np.inf\n if not self.type or -np.inf in self.type:\n yield np.ones(dimspecs[0])*-np.inf\n # Check all dimensions\n for d in dimspecs:\n yield np.tile(vals[0], d)\n # Check for arrays with not a single value\n lenneeded = int(np.prod(dimspecs[0]))\n copies = int(np.ceil(lenneeded/len(vals)))\n yield np.reshape((vals*copies)[0:lenneeded], dimspecs[0])\n\n" ]
[ [ "numpy.int0", "numpy.isfinite", "numpy.reshape", "numpy.isnan", "numpy.float16", "numpy.int8", "numpy.tile", "numpy.int16", "numpy.all", "numpy.int64", "numpy.ones", "numpy.uint16", "numpy.float64", "numpy.prod", "numpy.uint0", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TaehoLi/ssd_pytorch
[ "30554d2211f24770b562a14de12650515613b52a" ]
[ "train.py" ]
[ "from data import *\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiBoxLoss\nfrom ssd import build_ssd\nimport os\nimport sys\nimport time\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\nimport visdom\n\nviz = visdom.Visdom()\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Single Shot MultiBox Detector Training With Pytorch')\ntrain_set = parser.add_mutually_exclusive_group()\nparser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],\n type=str, help='VOC or COCO')\nparser.add_argument('--dataset_root', default=VOC_ROOT,\n help='Dataset root directory path')\nparser.add_argument('--basenet', default='vgg16_reducedfc.pth',\n help='Pretrained base model')\nparser.add_argument('--batch_size', default=32, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from')\nparser.add_argument('--start_iter', default=0, type=int,\n help='Resume training at this iter')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n help='Momentum value for optim')\nparser.add_argument('--weight_decay', default=5e-4, type=float,\n help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float,\n help='Gamma update for SGD')\nparser.add_argument('--visdom', default=False, type=str2bool,\n help='Use visdom for loss visualization')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models')\nargs = parser.parse_args()\n\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nif not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n\ndef train():\n if args.dataset == 'COCO':\n if args.dataset_root == VOC_ROOT:\n if not os.path.exists(COCO_ROOT):\n parser.error('Must specify dataset_root if specifying dataset')\n print(\"WARNING: Using default COCO dataset_root because \" +\n \"--dataset_root was not specified.\")\n args.dataset_root = COCO_ROOT\n cfg = coco\n dataset = COCODetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n elif args.dataset == 'VOC':\n if args.dataset_root == COCO_ROOT:\n parser.error('Must specify dataset if specifying dataset_root')\n cfg = voc\n dataset = VOCDetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n\n #if args.visdom:\n #import visdom\n #viz = visdom.Visdom()\n\n ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])\n net = ssd_net\n\n if args.cuda:\n net = torch.nn.DataParallel(ssd_net)\n cudnn.benchmark = True\n\n if args.resume:\n print('Resuming training, loading {}...'.format(args.resume))\n ssd_net.load_weights(args.resume)\n else:\n vgg_weights = torch.load(args.save_folder + args.basenet)\n print('Loading base network...')\n ssd_net.vgg.load_state_dict(vgg_weights)\n\n if args.cuda:\n net = net.cuda()\n\n if not args.resume:\n print('Initializing weights...')\n # initialize newly added layers' weights with xavier method\n ssd_net.extras.apply(weights_init)\n ssd_net.loc.apply(weights_init)\n ssd_net.conf.apply(weights_init)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, args.cuda)\n\n net.train()\n # loss counters\n loc_loss = 0\n conf_loss = 0\n epoch = 0\n print('Loading the dataset...')\n\n epoch_size = len(dataset) // args.batch_size\n print('Training SSD on:', dataset.name)\n print('Using the specified args:')\n print(args)\n\n step_index = 0\n\n if args.visdom:\n vis_title = 'SSD.PyTorch on ' + dataset.name\n vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']\n iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)\n epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n # create batch iterator\n batch_iterator = iter(data_loader)\n for iteration in range(args.start_iter, cfg['max_iter']):\n if args.visdom and iteration != 0 and (iteration % epoch_size == 0):\n update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,\n 'append', epoch_size)\n # reset epoch loss counters\n loc_loss = 0\n conf_loss = 0\n epoch += 1\n\n if iteration in cfg['lr_steps']:\n step_index += 1\n adjust_learning_rate(optimizer, args.gamma, step_index)\n\n # load train data\n images, targets = next(batch_iterator)\n\n if args.cuda:\n images = Variable(images.cuda())\n targets = [Variable(ann.cuda(), volatile=True) for ann in targets]\n else:\n images = Variable(images)\n targets = [Variable(ann, volatile=True) for ann in targets]\n # forward\n t0 = time.time()\n out = net(images)\n # backprop\n optimizer.zero_grad()\n loss_l, loss_c = criterion(out, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n t1 = time.time()\n loc_loss += loss_l.data.item()\n conf_loss += loss_c.data.item()\n\n if iteration % 10 == 0:\n print('timer: %.4f sec.' % (t1 - t0))\n print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data.item()), end=' ')\n\n if args.visdom:\n update_vis_plot(iteration, loss_l.data.item(), loss_c.data.item(),\n iter_plot, epoch_plot, 'append')\n\n if iteration != 0 and iteration % 5000 == 0:\n print('Saving state, iter:', iteration)\n torch.save(ssd_net.state_dict(), 'weights/ssd300_COCO_' +\n repr(iteration) + '.pth')\n torch.save(ssd_net.state_dict(),\n args.save_folder + '' + args.dataset + '.pth')\n\n\ndef adjust_learning_rate(optimizer, gamma, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 at every\n specified step\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n lr = args.lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef xavier(param):\n init.xavier_uniform(param)\n\n\ndef weights_init(m):\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n\ndef create_vis_plot(_xlabel, _ylabel, _title, _legend):\n return viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 3)).cpu(),\n opts=dict(\n xlabel=_xlabel,\n ylabel=_ylabel,\n title=_title,\n legend=_legend\n )\n )\n\n\ndef update_vis_plot(iteration, loc, conf, window1, window2, update_type,\n epoch_size=1):\n viz.line(\n X=torch.ones((1, 3)).cpu() * iteration,\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,\n win=window1,\n update=update_type\n )\n # initialize epoch plot on first iteration\n if iteration == 0:\n viz.line(\n X=torch.zeros((1, 3)).cpu(),\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),\n win=window2,\n update=True\n )\n\n\nif __name__ == '__main__':\n train()\n" ]
[ [ "torch.set_default_tensor_type", "torch.ones", "torch.Tensor", "torch.load", "torch.zeros", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.nn.DataParallel", "torch.nn.init.xavier_uniform", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jrpespinas/numerical-analysis
[ "00fea39c4879893dd60e4a2b7b4dbcb5114234ea" ]
[ "systems_of_linear_equations/gaussian_elimination.py" ]
[ "\"\"\"Gaussian Elimination\"\"\"\n\nimport numpy as np\n\n\ndef gaussian_elimination(matrix: np.ndarray):\n return matrix\n\n\ndef main():\n matrix = np.array([[4, 8, -4, 4],\n [3, 8, 5, -11],\n [-2, 1, 12, -17]])\n\n values = gaussian_elimination(matrix)\n print(values)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sanketsaurav/flytekit
[ "f901aee721847c6264d44079d4fa31a75b8876e1" ]
[ "tests/flytekit/unit/common_tests/types/impl/test_schema.py" ]
[ "from __future__ import absolute_import\n\nimport collections as _collections\nimport os as _os\nimport pytest as _pytest\nimport pandas as _pd\nimport uuid as _uuid\nimport datetime as _datetime\nfrom flytekit.common.types.impl import schema as _schema_impl\nfrom flytekit.common.types import primitives as _primitives, blobs as _blobs\nfrom flytekit.common import utils as _utils\nfrom flytekit.models import types as _type_models, literals as _literal_models\nfrom flytekit.sdk import test_utils as _test_utils\nimport six.moves as _six_moves\n\n\ndef test_schema_type():\n _schema_impl.SchemaType()\n _schema_impl.SchemaType([])\n _schema_impl.SchemaType([\n ('a', _primitives.Integer),\n ('b', _primitives.String),\n ('c', _primitives.Float),\n ('d', _primitives.Boolean),\n ('e', _primitives.Datetime)\n ])\n\n with _pytest.raises(ValueError):\n _schema_impl.SchemaType({'a': _primitives.Integer})\n\n with _pytest.raises(TypeError):\n _schema_impl.SchemaType([('a', _blobs.Blob)])\n\n with _pytest.raises(ValueError):\n _schema_impl.SchemaType([('a', _primitives.Integer, 1)])\n\n _schema_impl.SchemaType([('1', _primitives.Integer)])\n with _pytest.raises(TypeError):\n _schema_impl.SchemaType([(1, _primitives.Integer)])\n\n with _pytest.raises(TypeError):\n _schema_impl.SchemaType([('1', [_primitives.Integer])])\n\n\nvalue_type_tuples = [\n ('abra', _primitives.Integer, [1, 2, 3, 4, 5]),\n ('CADABRA', _primitives.Float, [1.0, 2.0, 3.0, 4.0, 5.0]),\n ('HoCuS', _primitives.String, [\"A\", \"B\", \"C\", \"D\", \"E\"]),\n ('Pocus', _primitives.Boolean, [True, False, True, False]),\n (\n 'locusts',\n _primitives.Datetime,\n [\n _datetime.datetime(day=1, month=1, year=2017, hour=1, minute=1, second=1, microsecond=1) -\n _datetime.timedelta(days=i)\n for i in _six_moves.range(5)\n ]\n )\n]\n\n\n@_pytest.mark.parametrize(\"value_type_pair\", value_type_tuples)\ndef test_simple_read_and_write_with_different_types(value_type_pair):\n column_name, flyte_type, values = value_type_pair\n values = [tuple([value]) for value in values]\n schema_type = _schema_impl.SchemaType(columns=[(column_name, flyte_type)])\n\n with _test_utils.LocalTestFileSystem() as sandbox:\n with _utils.AutoDeletingTempDir(\"test\") as t:\n a = _schema_impl.Schema.create_at_known_location(t.name, mode='wb', schema_type=schema_type)\n assert a.local_path is None\n with a as writer:\n for _ in _six_moves.range(5):\n writer.write(_pd.DataFrame.from_records(values, columns=[column_name]))\n assert a.local_path.startswith(sandbox.name)\n assert a.local_path is None\n\n b = _schema_impl.Schema.create_at_known_location(t.name, mode='rb', schema_type=schema_type)\n assert b.local_path is None\n with b as reader:\n for df in reader.iter_chunks():\n for check, actual in _six_moves.zip(values, df[column_name].tolist()):\n assert check[0] == actual\n assert reader.read() is None\n reader.seek(0)\n df = reader.read(concat=True)\n for iter_count, actual in enumerate(df[column_name].tolist()):\n assert values[iter_count % len(values)][0] == actual\n assert b.local_path.startswith(sandbox.name)\n assert b.local_path is None\n\n\ndef test_datetime_coercion_explicitly():\n \"\"\"\n Sanity check that we're using a version of pyarrow that allows us to\n truncate timestamps\n \"\"\"\n dt = _datetime.datetime(day=1, month=1, year=2017, hour=1, minute=1, second=1, microsecond=1)\n values = [(dt,)]\n df = _pd.DataFrame.from_records(values, columns=['testname'])\n assert df['testname'][0] == dt\n\n with _utils.AutoDeletingTempDir('test') as tmpdir:\n tmpfile = tmpdir.get_named_tempfile('repro.parquet')\n df.to_parquet(tmpfile, coerce_timestamps='ms', allow_truncated_timestamps=True)\n df2 = _pd.read_parquet(tmpfile)\n\n dt2 = _datetime.datetime(day=1, month=1, year=2017, hour=1, minute=1, second=1)\n assert df2['testname'][0] == dt2\n\n\ndef test_datetime_coercion():\n values = [\n tuple([_datetime.datetime(day=1, month=1, year=2017, hour=1, minute=1, second=1, microsecond=1) -\n _datetime.timedelta(days=x)])\n for x in _six_moves.range(5)\n ]\n schema_type = _schema_impl.SchemaType(columns=[('testname', _primitives.Datetime)])\n\n with _test_utils.LocalTestFileSystem():\n with _utils.AutoDeletingTempDir(\"test\") as t:\n a = _schema_impl.Schema.create_at_known_location(t.name, mode='wb', schema_type=schema_type)\n with a as writer:\n for _ in _six_moves.range(5):\n # us to ms coercion segfaults unless we explicitly allow truncation.\n writer.write(\n _pd.DataFrame.from_records(values, columns=['testname']),\n coerce_timestamps='ms',\n allow_truncated_timestamps=True)\n\n # TODO: Uncomment when segfault bug is resolved\n # with _pytest.raises(Exception):\n # writer.write(\n # _pd.DataFrame.from_records(values, columns=['testname']),\n # coerce_timestamps='ms')\n\n b = _schema_impl.Schema.create_at_known_location(t.name, mode='wb', schema_type=schema_type)\n with b as writer:\n for _ in _six_moves.range(5):\n writer.write(_pd.DataFrame.from_records(values, columns=['testname']))\n\n\n@_pytest.mark.parametrize(\"value_type_pair\", value_type_tuples)\ndef test_fetch(value_type_pair):\n column_name, flyte_type, values = value_type_pair\n values = [tuple([value]) for value in values]\n schema_type = _schema_impl.SchemaType(columns=[(column_name, flyte_type)])\n\n with _utils.AutoDeletingTempDir(\"test\") as tmpdir:\n for i in _six_moves.range(3):\n _pd.DataFrame.from_records(values, columns=[column_name]).to_parquet(\n tmpdir.get_named_tempfile(str(i).zfill(6)), coerce_timestamps='us')\n\n with _utils.AutoDeletingTempDir(\"test2\") as local_dir:\n schema_obj = _schema_impl.Schema.fetch(\n tmpdir.name,\n local_path=local_dir.get_named_tempfile('schema_test'),\n schema_type=schema_type\n )\n with schema_obj as reader:\n for df in reader.iter_chunks():\n for check, actual in _six_moves.zip(values, df[column_name].tolist()):\n assert check[0] == actual\n assert reader.read() is None\n reader.seek(0)\n df = reader.read(concat=True)\n for iter_count, actual in enumerate(df[column_name].tolist()):\n assert values[iter_count % len(values)][0] == actual\n\n\n@_pytest.mark.parametrize(\"value_type_pair\", value_type_tuples)\ndef test_download(value_type_pair):\n column_name, flyte_type, values = value_type_pair\n values = [tuple([value]) for value in values]\n schema_type = _schema_impl.SchemaType(columns=[(column_name, flyte_type)])\n\n with _utils.AutoDeletingTempDir(\"test\") as tmpdir:\n for i in _six_moves.range(3):\n _pd.DataFrame.from_records(values, columns=[column_name]).to_parquet(\n tmpdir.get_named_tempfile(str(i).zfill(6)), coerce_timestamps='us')\n\n with _utils.AutoDeletingTempDir(\"test2\") as local_dir:\n schema_obj = _schema_impl.Schema(tmpdir.name, schema_type=schema_type)\n schema_obj.download(local_dir.get_named_tempfile(_uuid.uuid4().hex))\n with schema_obj as reader:\n for df in reader.iter_chunks():\n for check, actual in _six_moves.zip(values, df[column_name].tolist()):\n assert check[0] == actual\n assert reader.read() is None\n reader.seek(0)\n df = reader.read(concat=True)\n for iter_count, actual in enumerate(df[column_name].tolist()):\n assert values[iter_count % len(values)][0] == actual\n\n with _pytest.raises(Exception):\n schema_obj = _schema_impl.Schema(tmpdir.name, schema_type=schema_type)\n schema_obj.download()\n\n with _test_utils.LocalTestFileSystem():\n schema_obj = _schema_impl.Schema(tmpdir.name, schema_type=schema_type)\n schema_obj.download()\n with schema_obj as reader:\n for df in reader.iter_chunks():\n for check, actual in _six_moves.zip(values, df[column_name].tolist()):\n assert check[0] == actual\n assert reader.read() is None\n reader.seek(0)\n df = reader.read(concat=True)\n for iter_count, actual in enumerate(df[column_name].tolist()):\n assert values[iter_count % len(values)][0] == actual\n\n\ndef test_hive_queries(monkeypatch):\n def return_deterministic_uuid():\n class FakeUUID4(object):\n def __init__(self):\n self.hex = 'test_uuid'\n\n class Uuid(object):\n def uuid4(self):\n return FakeUUID4()\n\n return Uuid()\n\n monkeypatch.setattr(_schema_impl, '_uuid', return_deterministic_uuid())\n\n all_types = _schema_impl.SchemaType([\n ('a', _primitives.Integer),\n ('b', _primitives.String),\n ('c', _primitives.Float),\n ('d', _primitives.Boolean),\n ('e', _primitives.Datetime)\n ])\n\n with _test_utils.LocalTestFileSystem():\n df, query = _schema_impl.Schema.create_from_hive_query(\n \"SELECT a, b, c, d, e FROM some_place WHERE i = 0\",\n stage_query=\"CREATE TEMPORARY TABLE some_place AS SELECT * FROM some_place_original\",\n known_location=\"s3://my_fixed_path/\",\n schema_type=all_types\n )\n\n full_query = \"\"\"\n CREATE TEMPORARY TABLE some_place AS SELECT * FROM some_place_original;\n CREATE TEMPORARY TABLE test_uuid_tmp AS SELECT a, b, c, d, e FROM some_place WHERE i = 0;\n CREATE EXTERNAL TABLE test_uuid LIKE test_uuid_tmp STORED AS PARQUET;\n ALTER TABLE test_uuid SET LOCATION 's3://my_fixed_path/';\n INSERT OVERWRITE TABLE test_uuid\n SELECT\n a as a,\n b as b,\n CAST(c as double) c,\n d as d,\n e as e\n FROM test_uuid_tmp;\n DROP TABLE test_uuid;\n \"\"\"\n full_query = \" \".join(full_query.split())\n query = \" \".join(query.split())\n assert query == full_query\n\n # Test adding partition\n full_query = \"\"\"\n ALTER TABLE some_table ADD IF NOT EXISTS PARTITION (\n region = 'SEA',\n ds = '2017-01-01'\n ) LOCATION 's3://my_fixed_path/';\n ALTER TABLE some_table PARTITION (\n region = 'SEA',\n ds = '2017-01-01'\n ) SET LOCATION 's3://my_fixed_path/';\n \"\"\"\n query = df.get_write_partition_to_hive_table_query(\n 'some_table',\n partitions=_collections.OrderedDict([('region', 'SEA'), ('ds', '2017-01-01')]))\n full_query = \" \".join(full_query.split())\n query = \" \".join(query.split())\n assert query == full_query\n\n\ndef test_partial_column_read():\n with _test_utils.LocalTestFileSystem():\n a = _schema_impl.Schema.create_at_any_location(\n schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Integer)])\n )\n with a as writer:\n writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}))\n\n b = _schema_impl.Schema.fetch(\n a.uri,\n schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Integer)])\n )\n with b as reader:\n df = reader.read(columns=['b'])\n assert df.columns.values == ['b']\n assert df['b'].tolist() == [5, 6, 7, 8]\n\n\ndef test_casting():\n pass\n\n\ndef test_from_python_std():\n pass\n\n\ndef test_promote_from_model_schema_type():\n m = _type_models.SchemaType(\n [\n _type_models.SchemaType.SchemaColumn(\n \"a\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.BOOLEAN\n ),\n _type_models.SchemaType.SchemaColumn(\n \"b\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.DATETIME\n ),\n _type_models.SchemaType.SchemaColumn(\n \"c\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.DURATION\n ),\n _type_models.SchemaType.SchemaColumn(\n \"d\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.FLOAT\n ),\n _type_models.SchemaType.SchemaColumn(\n \"e\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.INTEGER\n ),\n _type_models.SchemaType.SchemaColumn(\n \"f\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.STRING\n ),\n ]\n )\n s = _schema_impl.SchemaType.promote_from_model(m)\n assert s.columns == m.columns\n assert s.sdk_columns['a'].to_flyte_literal_type() == _primitives.Boolean.to_flyte_literal_type()\n assert s.sdk_columns['b'].to_flyte_literal_type() == _primitives.Datetime.to_flyte_literal_type()\n assert s.sdk_columns['c'].to_flyte_literal_type() == _primitives.Timedelta.to_flyte_literal_type()\n assert s.sdk_columns['d'].to_flyte_literal_type() == _primitives.Float.to_flyte_literal_type()\n assert s.sdk_columns['e'].to_flyte_literal_type() == _primitives.Integer.to_flyte_literal_type()\n assert s.sdk_columns['f'].to_flyte_literal_type() == _primitives.String.to_flyte_literal_type()\n assert s == m\n\n\ndef test_promote_from_model_schema():\n m = _literal_models.Schema(\n \"s3://some/place/\",\n _type_models.SchemaType(\n [\n _type_models.SchemaType.SchemaColumn(\n \"a\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.BOOLEAN\n ),\n _type_models.SchemaType.SchemaColumn(\n \"b\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.DATETIME\n ),\n _type_models.SchemaType.SchemaColumn(\n \"c\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.DURATION\n ),\n _type_models.SchemaType.SchemaColumn(\n \"d\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.FLOAT\n ),\n _type_models.SchemaType.SchemaColumn(\n \"e\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.INTEGER\n ),\n _type_models.SchemaType.SchemaColumn(\n \"f\",\n _type_models.SchemaType.SchemaColumn.SchemaColumnType.STRING\n ),\n ]\n )\n )\n\n s = _schema_impl.Schema.promote_from_model(m)\n assert s.uri == \"s3://some/place/\"\n assert s.type.sdk_columns['a'].to_flyte_literal_type() == _primitives.Boolean.to_flyte_literal_type()\n assert s.type.sdk_columns['b'].to_flyte_literal_type() == _primitives.Datetime.to_flyte_literal_type()\n assert s.type.sdk_columns['c'].to_flyte_literal_type() == _primitives.Timedelta.to_flyte_literal_type()\n assert s.type.sdk_columns['d'].to_flyte_literal_type() == _primitives.Float.to_flyte_literal_type()\n assert s.type.sdk_columns['e'].to_flyte_literal_type() == _primitives.Integer.to_flyte_literal_type()\n assert s.type.sdk_columns['f'].to_flyte_literal_type() == _primitives.String.to_flyte_literal_type()\n assert s == m\n\n\ndef test_create_at_known_location():\n with _test_utils.LocalTestFileSystem():\n with _utils.AutoDeletingTempDir('test') as wd:\n b = _schema_impl.Schema.create_at_known_location(wd.name, schema_type=_schema_impl.SchemaType())\n assert b.local_path is None\n assert b.remote_location == wd.name + \"/\"\n assert b.mode == 'wb'\n\n with b as w:\n w.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}))\n\n df = _pd.read_parquet(_os.path.join(wd.name, \"000000\"))\n assert list(df['a']) == [1, 2, 3, 4]\n assert list(df['b']) == [5, 6, 7, 8]\n\n\ndef test_generic_schema_read():\n with _test_utils.LocalTestFileSystem():\n a = _schema_impl.Schema.create_at_any_location(\n schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Integer)])\n )\n with a as writer:\n writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}))\n\n b = _schema_impl.Schema.fetch(\n a.remote_prefix,\n schema_type=_schema_impl.SchemaType([]))\n with b as reader:\n df = reader.read()\n assert df.columns.values.tolist() == ['a', 'b']\n assert df['a'].tolist() == [1, 2, 3, 4]\n assert df['b'].tolist() == [5, 6, 7, 8]\n\n\ndef test_extra_schema_read():\n with _test_utils.LocalTestFileSystem():\n a = _schema_impl.Schema.create_at_any_location(\n schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Integer)])\n )\n with a as writer:\n writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}))\n\n b = _schema_impl.Schema.fetch(\n a.remote_prefix,\n schema_type=_schema_impl.SchemaType([('a', _primitives.Integer)]))\n with b as reader:\n df = reader.read(concat=True, truncate_extra_columns=False)\n assert df.columns.values.tolist() == ['a', 'b']\n assert df['a'].tolist() == [1, 2, 3, 4]\n assert df['b'].tolist() == [5, 6, 7, 8]\n\n with b as reader:\n df = reader.read(concat=True)\n assert df.columns.values.tolist() == ['a']\n assert df['a'].tolist() == [1, 2, 3, 4]\n\n\ndef test_normal_schema_read_with_fastparquet():\n with _test_utils.LocalTestFileSystem():\n a = _schema_impl.Schema.create_at_any_location(\n schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Boolean)])\n )\n with a as writer:\n writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [False, True, True, False]}))\n\n import os as _os\n original_engine = _os.getenv('PARQUET_ENGINE')\n _os.environ['PARQUET_ENGINE'] = 'fastparquet'\n\n b = _schema_impl.Schema.fetch(\n a.remote_prefix,\n schema_type=_schema_impl.SchemaType([]))\n\n with b as reader:\n df = reader.read()\n assert df['a'].tolist() == [1, 2, 3, 4]\n assert _pd.api.types.is_bool_dtype(df.dtypes['b'])\n assert df['b'].tolist() == [False, True, True, False]\n\n if original_engine is None:\n del _os.environ['PARQUET_ENGINE']\n else:\n _os.environ['PARQUET_ENGINE'] = original_engine\n\n\ndef test_type_promoted_schema_read_with_fastparquet():\n with _test_utils.LocalTestFileSystem():\n a = _schema_impl.Schema.create_at_any_location(\n schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Boolean)])\n )\n with a as writer:\n writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [None, True, None, False]}))\n\n import os as _os\n original_engine = _os.getenv('PARQUET_ENGINE')\n _os.environ['PARQUET_ENGINE'] = 'fastparquet'\n\n b = _schema_impl.Schema.fetch(\n a.remote_prefix,\n schema_type=_schema_impl.SchemaType([]))\n\n with b as reader:\n df = reader.read()\n assert df['a'].tolist() == [1, 2, 3, 4]\n assert _pd.api.types.is_object_dtype(df.dtypes['b'])\n assert df['b'].tolist() == [None, True, None, False]\n\n if original_engine is None:\n del _os.environ['PARQUET_ENGINE']\n else:\n _os.environ['PARQUET_ENGINE'] = original_engine\n\n\ndef test_schema_read_consistency_between_two_engines():\n with _test_utils.LocalTestFileSystem():\n a = _schema_impl.Schema.create_at_any_location(\n schema_type=_schema_impl.SchemaType([('a', _primitives.Integer), ('b', _primitives.Boolean)])\n )\n with a as writer:\n writer.write(_pd.DataFrame.from_dict({'a': [1, 2, 3, 4], 'b': [None, True, None, False]}))\n\n import os as _os\n original_engine = _os.getenv('PARQUET_ENGINE')\n _os.environ['PARQUET_ENGINE'] = 'fastparquet'\n\n b = _schema_impl.Schema.fetch(\n a.remote_prefix,\n schema_type=_schema_impl.SchemaType([]))\n\n with b as b_reader:\n b_df = b_reader.read()\n _os.environ['PARQUET_ENGINE'] = 'pyarrow'\n\n c = _schema_impl.Schema.fetch(\n a.remote_prefix,\n schema_type=_schema_impl.SchemaType([]))\n with c as c_reader:\n c_df = c_reader.read()\n assert b_df.equals(c_df)\n\n if original_engine is None:\n del _os.environ['PARQUET_ENGINE']\n else:\n _os.environ['PARQUET_ENGINE'] = original_engine\n" ]
[ [ "pandas.api.types.is_object_dtype", "pandas.read_parquet", "pandas.DataFrame.from_dict", "pandas.DataFrame.from_records", "pandas.api.types.is_bool_dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
vietawake/mmSegmentation
[ "1f643d6d81708ebf5726c48f66d02c70fe99fe00", "1f643d6d81708ebf5726c48f66d02c70fe99fe00", "1f643d6d81708ebf5726c48f66d02c70fe99fe00", "1f643d6d81708ebf5726c48f66d02c70fe99fe00", "1f643d6d81708ebf5726c48f66d02c70fe99fe00", "1f643d6d81708ebf5726c48f66d02c70fe99fe00" ]
[ "mmseg/models/decode_heads/decode_head.py", "mmseg/datasets/pipelines/loading.py", "mmseg/models/utils/inverted_residual.py", "mmseg/models/decode_heads/dnl_head.py", "tests/test_models/test_backbones/test_vit.py", "mmseg/core/evaluation/metrics.py" ]
[ "from abc import ABCMeta, abstractmethod\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom mmcv.cnn import normal_init\r\nfrom mmcv.runner import auto_fp16, force_fp32\r\n\r\nfrom mmseg.core import build_pixel_sampler\r\nfrom mmseg.ops import resize\r\nfrom ..builder import build_loss\r\nfrom ..losses import accuracy\r\n\r\n\r\nclass BaseDecodeHead(nn.Module, metaclass=ABCMeta):\r\n \"\"\"Base class for BaseDecodeHead.\r\n\r\n Args:\r\n in_channels (int|Sequence[int]): Input channels.\r\n channels (int): Channels after modules, before conv_seg.\r\n num_classes (int): Number of classes.\r\n dropout_ratio (float): Ratio of dropout layer. Default: 0.1.\r\n conv_cfg (dict|None): Config of conv layers. Default: None.\r\n norm_cfg (dict|None): Config of norm layers. Default: None.\r\n act_cfg (dict): Config of activation layers.\r\n Default: dict(type='ReLU')\r\n in_index (int|Sequence[int]): Input feature index. Default: -1\r\n input_transform (str|None): Transformation type of input features.\r\n Options: 'resize_concat', 'multiple_select', None.\r\n 'resize_concat': Multiple feature maps will be resize to the\r\n same size as first one and than concat together.\r\n Usually used in FCN head of HRNet.\r\n 'multiple_select': Multiple feature maps will be bundle into\r\n a list and passed into decode head.\r\n None: Only one select feature map is allowed.\r\n Default: None.\r\n loss_decode (dict): Config of decode loss.\r\n Default: dict(type='CrossEntropyLoss').\r\n ignore_index (int | None): The label index to be ignored. When using\r\n masked BCE loss, ignore_index should be set to None. Default: 255\r\n sampler (dict|None): The config of segmentation map sampler.\r\n Default: None.\r\n align_corners (bool): align_corners argument of F.interpolate.\r\n Default: False.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n in_channels,\r\n channels,\r\n *,\r\n num_classes,\r\n dropout_ratio=0.1,\r\n conv_cfg=None,\r\n norm_cfg=None,\r\n act_cfg=dict(type='ReLU'),\r\n in_index=-1,\r\n input_transform=None,\r\n loss_decode=dict(\r\n type='CrossEntropyLoss',\r\n use_sigmoid=False,\r\n loss_weight=1.0),\r\n ignore_index=255,\r\n sampler=None,\r\n align_corners=False):\r\n super(BaseDecodeHead, self).__init__()\r\n self._init_inputs(in_channels, in_index, input_transform)\r\n self.channels = channels\r\n self.num_classes = num_classes\r\n self.dropout_ratio = dropout_ratio\r\n self.conv_cfg = conv_cfg\r\n self.norm_cfg = norm_cfg\r\n self.act_cfg = act_cfg\r\n self.in_index = in_index\r\n self.loss_decode = build_loss(loss_decode)\r\n self.ignore_index = ignore_index\r\n self.align_corners = align_corners\r\n if sampler is not None:\r\n self.sampler = build_pixel_sampler(sampler, context=self)\r\n else:\r\n self.sampler = None\r\n\r\n self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)\r\n if dropout_ratio > 0:\r\n self.dropout = nn.Dropout2d(dropout_ratio)\r\n else:\r\n self.dropout = None\r\n self.fp16_enabled = False\r\n\r\n def extra_repr(self):\r\n \"\"\"Extra repr.\"\"\"\r\n s = f'input_transform={self.input_transform}, ' \\\r\n f'ignore_index={self.ignore_index}, ' \\\r\n f'align_corners={self.align_corners}'\r\n return s\r\n\r\n def _init_inputs(self, in_channels, in_index, input_transform):\r\n \"\"\"Check and initialize input transforms.\r\n\r\n The in_channels, in_index and input_transform must match.\r\n Specifically, when input_transform is None, only single feature map\r\n will be selected. So in_channels and in_index must be of type int.\r\n When input_transform\r\n\r\n Args:\r\n in_channels (int|Sequence[int]): Input channels.\r\n in_index (int|Sequence[int]): Input feature index.\r\n input_transform (str|None): Transformation type of input features.\r\n Options: 'resize_concat', 'multiple_select', None.\r\n 'resize_concat': Multiple feature maps will be resize to the\r\n same size as first one and than concat together.\r\n Usually used in FCN head of HRNet.\r\n 'multiple_select': Multiple feature maps will be bundle into\r\n a list and passed into decode head.\r\n None: Only one select feature map is allowed.\r\n \"\"\"\r\n\r\n if input_transform is not None:\r\n assert input_transform in ['resize_concat', 'multiple_select']\r\n self.input_transform = input_transform\r\n self.in_index = in_index\r\n if input_transform is not None:\r\n assert isinstance(in_channels, (list, tuple))\r\n assert isinstance(in_index, (list, tuple))\r\n assert len(in_channels) == len(in_index)\r\n if input_transform == 'resize_concat':\r\n self.in_channels = sum(in_channels)\r\n else:\r\n self.in_channels = in_channels\r\n else:\r\n assert isinstance(in_channels, int)\r\n assert isinstance(in_index, int)\r\n self.in_channels = in_channels\r\n\r\n def init_weights(self):\r\n \"\"\"Initialize weights of classification layer.\"\"\"\r\n normal_init(self.conv_seg, mean=0, std=0.01)\r\n\r\n def _transform_inputs(self, inputs):\r\n \"\"\"Transform inputs for decoder.\r\n\r\n Args:\r\n inputs (list[Tensor]): List of multi-level img features.\r\n\r\n Returns:\r\n Tensor: The transformed inputs\r\n \"\"\"\r\n\r\n if self.input_transform == 'resize_concat':\r\n inputs = [inputs[i] for i in self.in_index]\r\n upsampled_inputs = [\r\n resize(\r\n input=x,\r\n size=inputs[0].shape[2:],\r\n mode='bilinear',\r\n align_corners=self.align_corners) for x in inputs\r\n ]\r\n inputs = torch.cat(upsampled_inputs, dim=1)\r\n elif self.input_transform == 'multiple_select':\r\n inputs = [inputs[i] for i in self.in_index]\r\n else:\r\n inputs = inputs[self.in_index]\r\n\r\n return inputs\r\n\r\n @auto_fp16()\r\n @abstractmethod\r\n def forward(self, inputs):\r\n \"\"\"Placeholder of forward function.\"\"\"\r\n pass\r\n\r\n def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):\r\n \"\"\"Forward function for training.\r\n Args:\r\n inputs (list[Tensor]): List of multi-level img features.\r\n img_metas (list[dict]): List of image info dict where each dict\r\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\r\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\r\n For details on the values of these keys see\r\n `mmseg/datasets/pipelines/formatting.py:Collect`.\r\n gt_semantic_seg (Tensor): Semantic segmentation masks\r\n used if the architecture supports semantic segmentation task.\r\n train_cfg (dict): The training config.\r\n\r\n Returns:\r\n dict[str, Tensor]: a dictionary of loss components\r\n \"\"\"\r\n seg_logits = self.forward(inputs)\r\n losses = self.losses(seg_logits, gt_semantic_seg)\r\n return losses\r\n\r\n def forward_test(self, inputs, img_metas, test_cfg):\r\n \"\"\"Forward function for testing.\r\n\r\n Args:\r\n inputs (list[Tensor]): List of multi-level img features.\r\n img_metas (list[dict]): List of image info dict where each dict\r\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\r\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\r\n For details on the values of these keys see\r\n `mmseg/datasets/pipelines/formatting.py:Collect`.\r\n test_cfg (dict): The testing config.\r\n\r\n Returns:\r\n Tensor: Output segmentation map.\r\n \"\"\"\r\n return self.forward(inputs)\r\n\r\n def cls_seg(self, feat):\r\n \"\"\"Classify each pixel.\"\"\"\r\n if self.dropout is not None:\r\n feat = self.dropout(feat)\r\n output = self.conv_seg(feat)\r\n return output\r\n\r\n @force_fp32(apply_to=('seg_logit', ))\r\n def losses(self, seg_logit, seg_label):\r\n \"\"\"Compute segmentation loss.\"\"\"\r\n loss = dict()\r\n seg_logit = resize(\r\n input=seg_logit,\r\n size=seg_label.shape[2:],\r\n mode='bilinear',\r\n align_corners=self.align_corners)\r\n if self.sampler is not None:\r\n seg_weight = self.sampler.sample(seg_logit, seg_label)\r\n else:\r\n seg_weight = None\r\n seg_label = seg_label.squeeze(1)\r\n loss['loss_seg'] = self.loss_decode(\r\n seg_logit,\r\n seg_label,\r\n weight=seg_weight,\r\n ignore_index=self.ignore_index)\r\n loss['acc_seg'] = accuracy(seg_logit, seg_label)\r\n return loss\r\n", "import os.path as osp\r\n\r\nimport mmcv\r\nimport numpy as np\r\n\r\nfrom ..builder import PIPELINES\r\n\r\n\r\[email protected]_module()\r\nclass LoadImageFromFile(object):\r\n \"\"\"Load an image from file.\r\n\r\n Required keys are \"img_prefix\" and \"img_info\" (a dict that must contain the\r\n key \"filename\"). Added or updated keys are \"filename\", \"img\", \"img_shape\",\r\n \"ori_shape\" (same as `img_shape`), \"pad_shape\" (same as `img_shape`),\r\n \"scale_factor\" (1.0) and \"img_norm_cfg\" (means=0 and stds=1).\r\n\r\n Args:\r\n to_float32 (bool): Whether to convert the loaded image to a float32\r\n numpy array. If set to False, the loaded image is an uint8 array.\r\n Defaults to False.\r\n color_type (str): The flag argument for :func:`mmcv.imfrombytes`.\r\n Defaults to 'color'.\r\n file_client_args (dict): Arguments to instantiate a FileClient.\r\n See :class:`mmcv.fileio.FileClient` for details.\r\n Defaults to ``dict(backend='disk')``.\r\n imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:\r\n 'cv2'\r\n \"\"\"\r\n\r\n def __init__(self,\r\n to_float32=False,\r\n color_type='color',\r\n file_client_args=dict(backend='disk'),\r\n imdecode_backend='cv2'):\r\n self.to_float32 = to_float32\r\n self.color_type = color_type\r\n self.file_client_args = file_client_args.copy()\r\n self.file_client = None\r\n self.imdecode_backend = imdecode_backend\r\n\r\n def __call__(self, results):\r\n \"\"\"Call functions to load image and get image meta information.\r\n\r\n Args:\r\n results (dict): Result dict from :obj:`mmseg.CustomDataset`.\r\n\r\n Returns:\r\n dict: The dict contains loaded image and meta information.\r\n \"\"\"\r\n\r\n if self.file_client is None:\r\n self.file_client = mmcv.FileClient(**self.file_client_args)\r\n\r\n if results.get('img_prefix') is not None:\r\n filename = osp.join(results['img_prefix'],\r\n results['img_info']['filename'])\r\n else:\r\n filename = results['img_info']['filename']\r\n img_bytes = self.file_client.get(filename)\r\n img = mmcv.imfrombytes(\r\n img_bytes, flag=self.color_type, backend=self.imdecode_backend)\r\n if self.to_float32:\r\n img = img.astype(np.float32)\r\n\r\n results['filename'] = filename\r\n results['ori_filename'] = results['img_info']['filename']\r\n results['img'] = img\r\n results['img_shape'] = img.shape\r\n results['ori_shape'] = img.shape\r\n # Set initial values for default meta_keys\r\n results['pad_shape'] = img.shape\r\n results['scale_factor'] = 1.0\r\n num_channels = 1 if len(img.shape) < 3 else img.shape[2]\r\n results['img_norm_cfg'] = dict(\r\n mean=np.zeros(num_channels, dtype=np.float32),\r\n std=np.ones(num_channels, dtype=np.float32),\r\n to_rgb=False)\r\n return results\r\n\r\n def __repr__(self):\r\n repr_str = self.__class__.__name__\r\n repr_str += f'(to_float32={self.to_float32},'\r\n repr_str += f\"color_type='{self.color_type}',\"\r\n repr_str += f\"imdecode_backend='{self.imdecode_backend}')\"\r\n return repr_str\r\n\r\n\r\[email protected]_module()\r\nclass LoadAnnotations(object):\r\n \"\"\"Load annotations for semantic segmentation.\r\n\r\n Args:\r\n reduce_zero_label (bool): Whether reduce all label value by 1.\r\n Usually used for datasets where 0 is background label.\r\n Default: False.\r\n file_client_args (dict): Arguments to instantiate a FileClient.\r\n See :class:`mmcv.fileio.FileClient` for details.\r\n Defaults to ``dict(backend='disk')``.\r\n imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:\r\n 'pillow'\r\n \"\"\"\r\n\r\n def __init__(self,\r\n reduce_zero_label=False,\r\n file_client_args=dict(backend='disk'),\r\n imdecode_backend='pillow'):\r\n self.reduce_zero_label = reduce_zero_label\r\n self.file_client_args = file_client_args.copy()\r\n self.file_client = None\r\n self.imdecode_backend = imdecode_backend\r\n\r\n def __call__(self, results):\r\n \"\"\"Call function to load multiple types annotations.\r\n\r\n Args:\r\n results (dict): Result dict from :obj:`mmseg.CustomDataset`.\r\n\r\n Returns:\r\n dict: The dict contains loaded semantic segmentation annotations.\r\n \"\"\"\r\n\r\n if self.file_client is None:\r\n self.file_client = mmcv.FileClient(**self.file_client_args)\r\n\r\n if results.get('seg_prefix', None) is not None:\r\n filename = osp.join(results['seg_prefix'],\r\n results['ann_info']['seg_map'])\r\n else:\r\n filename = results['ann_info']['seg_map']\r\n img_bytes = self.file_client.get(filename)\r\n gt_semantic_seg = mmcv.imfrombytes(\r\n img_bytes, flag='unchanged',\r\n backend=self.imdecode_backend).squeeze().astype(np.uint8)\r\n # modify if custom classes\r\n if results.get('label_map', None) is not None:\r\n for old_id, new_id in results['label_map'].items():\r\n gt_semantic_seg[gt_semantic_seg == old_id] = new_id\r\n # reduce zero_label\r\n if self.reduce_zero_label:\r\n # avoid using underflow conversion\r\n gt_semantic_seg[gt_semantic_seg == 0] = 255\r\n gt_semantic_seg = gt_semantic_seg - 1\r\n gt_semantic_seg[gt_semantic_seg == 254] = 255\r\n results['gt_semantic_seg'] = gt_semantic_seg\r\n results['seg_fields'].append('gt_semantic_seg')\r\n return results\r\n\r\n def __repr__(self):\r\n repr_str = self.__class__.__name__\r\n repr_str += f'(reduce_zero_label={self.reduce_zero_label},'\r\n repr_str += f\"imdecode_backend='{self.imdecode_backend}')\"\r\n return repr_str\r\n", "from mmcv.cnn import ConvModule\r\nfrom torch import nn\r\nfrom torch.utils import checkpoint as cp\r\n\r\nfrom .se_layer import SELayer\r\n\r\n\r\nclass InvertedResidual(nn.Module):\r\n \"\"\"InvertedResidual block for MobileNetV2.\r\n\r\n Args:\r\n in_channels (int): The input channels of the InvertedResidual block.\r\n out_channels (int): The output channels of the InvertedResidual block.\r\n stride (int): Stride of the middle (first) 3x3 convolution.\r\n expand_ratio (int): Adjusts number of channels of the hidden layer\r\n in InvertedResidual by this amount.\r\n dilation (int): Dilation rate of depthwise conv. Default: 1\r\n conv_cfg (dict): Config dict for convolution layer.\r\n Default: None, which means using conv2d.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: dict(type='BN').\r\n act_cfg (dict): Config dict for activation layer.\r\n Default: dict(type='ReLU6').\r\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\r\n memory while slowing down the training speed. Default: False.\r\n\r\n Returns:\r\n Tensor: The output tensor.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n in_channels,\r\n out_channels,\r\n stride,\r\n expand_ratio,\r\n dilation=1,\r\n conv_cfg=None,\r\n norm_cfg=dict(type='BN'),\r\n act_cfg=dict(type='ReLU6'),\r\n with_cp=False):\r\n super(InvertedResidual, self).__init__()\r\n self.stride = stride\r\n assert stride in [1, 2], f'stride must in [1, 2]. ' \\\r\n f'But received {stride}.'\r\n self.with_cp = with_cp\r\n self.use_res_connect = self.stride == 1 and in_channels == out_channels\r\n hidden_dim = int(round(in_channels * expand_ratio))\r\n\r\n layers = []\r\n if expand_ratio != 1:\r\n layers.append(\r\n ConvModule(\r\n in_channels=in_channels,\r\n out_channels=hidden_dim,\r\n kernel_size=1,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n act_cfg=act_cfg))\r\n layers.extend([\r\n ConvModule(\r\n in_channels=hidden_dim,\r\n out_channels=hidden_dim,\r\n kernel_size=3,\r\n stride=stride,\r\n padding=dilation,\r\n dilation=dilation,\r\n groups=hidden_dim,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n act_cfg=act_cfg),\r\n ConvModule(\r\n in_channels=hidden_dim,\r\n out_channels=out_channels,\r\n kernel_size=1,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n act_cfg=None)\r\n ])\r\n self.conv = nn.Sequential(*layers)\r\n\r\n def forward(self, x):\r\n\r\n def _inner_forward(x):\r\n if self.use_res_connect:\r\n return x + self.conv(x)\r\n else:\r\n return self.conv(x)\r\n\r\n if self.with_cp and x.requires_grad:\r\n out = cp.checkpoint(_inner_forward, x)\r\n else:\r\n out = _inner_forward(x)\r\n\r\n return out\r\n\r\n\r\nclass InvertedResidualV3(nn.Module):\r\n \"\"\"Inverted Residual Block for MobileNetV3.\r\n\r\n Args:\r\n in_channels (int): The input channels of this Module.\r\n out_channels (int): The output channels of this Module.\r\n mid_channels (int): The input channels of the depthwise convolution.\r\n kernel_size (int): The kernel size of the depthwise convolution.\r\n Default: 3.\r\n stride (int): The stride of the depthwise convolution. Default: 1.\r\n se_cfg (dict): Config dict for se layer. Default: None, which means no\r\n se layer.\r\n with_expand_conv (bool): Use expand conv or not. If set False,\r\n mid_channels must be the same with in_channels. Default: True.\r\n conv_cfg (dict): Config dict for convolution layer. Default: None,\r\n which means using conv2d.\r\n norm_cfg (dict): Config dict for normalization layer.\r\n Default: dict(type='BN').\r\n act_cfg (dict): Config dict for activation layer.\r\n Default: dict(type='ReLU').\r\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\r\n memory while slowing down the training speed. Default: False.\r\n\r\n Returns:\r\n Tensor: The output tensor.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n in_channels,\r\n out_channels,\r\n mid_channels,\r\n kernel_size=3,\r\n stride=1,\r\n se_cfg=None,\r\n with_expand_conv=True,\r\n conv_cfg=None,\r\n norm_cfg=dict(type='BN'),\r\n act_cfg=dict(type='ReLU'),\r\n with_cp=False):\r\n super(InvertedResidualV3, self).__init__()\r\n self.with_res_shortcut = (stride == 1 and in_channels == out_channels)\r\n assert stride in [1, 2]\r\n self.with_cp = with_cp\r\n self.with_se = se_cfg is not None\r\n self.with_expand_conv = with_expand_conv\r\n\r\n if self.with_se:\r\n assert isinstance(se_cfg, dict)\r\n if not self.with_expand_conv:\r\n assert mid_channels == in_channels\r\n\r\n if self.with_expand_conv:\r\n self.expand_conv = ConvModule(\r\n in_channels=in_channels,\r\n out_channels=mid_channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n act_cfg=act_cfg)\r\n self.depthwise_conv = ConvModule(\r\n in_channels=mid_channels,\r\n out_channels=mid_channels,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=kernel_size // 2,\r\n groups=mid_channels,\r\n conv_cfg=dict(\r\n type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg,\r\n norm_cfg=norm_cfg,\r\n act_cfg=act_cfg)\r\n\r\n if self.with_se:\r\n self.se = SELayer(**se_cfg)\r\n\r\n self.linear_conv = ConvModule(\r\n in_channels=mid_channels,\r\n out_channels=out_channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n conv_cfg=conv_cfg,\r\n norm_cfg=norm_cfg,\r\n act_cfg=None)\r\n\r\n def forward(self, x):\r\n\r\n def _inner_forward(x):\r\n out = x\r\n\r\n if self.with_expand_conv:\r\n out = self.expand_conv(out)\r\n\r\n out = self.depthwise_conv(out)\r\n\r\n if self.with_se:\r\n out = self.se(out)\r\n\r\n out = self.linear_conv(out)\r\n\r\n if self.with_res_shortcut:\r\n return x + out\r\n else:\r\n return out\r\n\r\n if self.with_cp and x.requires_grad:\r\n out = cp.checkpoint(_inner_forward, x)\r\n else:\r\n out = _inner_forward(x)\r\n\r\n return out\r\n", "import torch\r\nfrom mmcv.cnn import NonLocal2d\r\nfrom torch import nn\r\n\r\nfrom ..builder import HEADS\r\nfrom .fcn_head import FCNHead\r\n\r\n\r\nclass DisentangledNonLocal2d(NonLocal2d):\r\n \"\"\"Disentangled Non-Local Blocks.\r\n\r\n Args:\r\n temperature (float): Temperature to adjust attention. Default: 0.05\r\n \"\"\"\r\n\r\n def __init__(self, *arg, temperature, **kwargs):\r\n super().__init__(*arg, **kwargs)\r\n self.temperature = temperature\r\n self.conv_mask = nn.Conv2d(self.in_channels, 1, kernel_size=1)\r\n\r\n def embedded_gaussian(self, theta_x, phi_x):\r\n \"\"\"Embedded gaussian with temperature.\"\"\"\r\n\r\n # NonLocal2d pairwise_weight: [N, HxW, HxW]\r\n pairwise_weight = torch.matmul(theta_x, phi_x)\r\n if self.use_scale:\r\n # theta_x.shape[-1] is `self.inter_channels`\r\n pairwise_weight /= theta_x.shape[-1]**0.5\r\n pairwise_weight /= self.temperature\r\n pairwise_weight = pairwise_weight.softmax(dim=-1)\r\n return pairwise_weight\r\n\r\n def forward(self, x):\r\n # x: [N, C, H, W]\r\n n = x.size(0)\r\n\r\n # g_x: [N, HxW, C]\r\n g_x = self.g(x).view(n, self.inter_channels, -1)\r\n g_x = g_x.permute(0, 2, 1)\r\n\r\n # theta_x: [N, HxW, C], phi_x: [N, C, HxW]\r\n if self.mode == 'gaussian':\r\n theta_x = x.view(n, self.in_channels, -1)\r\n theta_x = theta_x.permute(0, 2, 1)\r\n if self.sub_sample:\r\n phi_x = self.phi(x).view(n, self.in_channels, -1)\r\n else:\r\n phi_x = x.view(n, self.in_channels, -1)\r\n elif self.mode == 'concatenation':\r\n theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)\r\n phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)\r\n else:\r\n theta_x = self.theta(x).view(n, self.inter_channels, -1)\r\n theta_x = theta_x.permute(0, 2, 1)\r\n phi_x = self.phi(x).view(n, self.inter_channels, -1)\r\n\r\n # subtract mean\r\n theta_x -= theta_x.mean(dim=-2, keepdim=True)\r\n phi_x -= phi_x.mean(dim=-1, keepdim=True)\r\n\r\n pairwise_func = getattr(self, self.mode)\r\n # pairwise_weight: [N, HxW, HxW]\r\n pairwise_weight = pairwise_func(theta_x, phi_x)\r\n\r\n # y: [N, HxW, C]\r\n y = torch.matmul(pairwise_weight, g_x)\r\n # y: [N, C, H, W]\r\n y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,\r\n *x.size()[2:])\r\n\r\n # unary_mask: [N, 1, HxW]\r\n unary_mask = self.conv_mask(x)\r\n unary_mask = unary_mask.view(n, 1, -1)\r\n unary_mask = unary_mask.softmax(dim=-1)\r\n # unary_x: [N, 1, C]\r\n unary_x = torch.matmul(unary_mask, g_x)\r\n # unary_x: [N, C, 1, 1]\r\n unary_x = unary_x.permute(0, 2, 1).contiguous().reshape(\r\n n, self.inter_channels, 1, 1)\r\n\r\n output = x + self.conv_out(y + unary_x)\r\n\r\n return output\r\n\r\n\r\[email protected]_module()\r\nclass DNLHead(FCNHead):\r\n \"\"\"Disentangled Non-Local Neural Networks.\r\n\r\n This head is the implementation of `DNLNet\r\n <https://arxiv.org/abs/2006.06668>`_.\r\n\r\n Args:\r\n reduction (int): Reduction factor of projection transform. Default: 2.\r\n use_scale (bool): Whether to scale pairwise_weight by\r\n sqrt(1/inter_channels). Default: False.\r\n mode (str): The nonlocal mode. Options are 'embedded_gaussian',\r\n 'dot_product'. Default: 'embedded_gaussian.'.\r\n temperature (float): Temperature to adjust attention. Default: 0.05\r\n \"\"\"\r\n\r\n def __init__(self,\r\n reduction=2,\r\n use_scale=True,\r\n mode='embedded_gaussian',\r\n temperature=0.05,\r\n **kwargs):\r\n super(DNLHead, self).__init__(num_convs=2, **kwargs)\r\n self.reduction = reduction\r\n self.use_scale = use_scale\r\n self.mode = mode\r\n self.temperature = temperature\r\n self.dnl_block = DisentangledNonLocal2d(\r\n in_channels=self.channels,\r\n reduction=self.reduction,\r\n use_scale=self.use_scale,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg,\r\n mode=self.mode,\r\n temperature=self.temperature)\r\n\r\n def forward(self, inputs):\r\n \"\"\"Forward function.\"\"\"\r\n x = self._transform_inputs(inputs)\r\n output = self.convs[0](x)\r\n output = self.dnl_block(output)\r\n output = self.convs[1](output)\r\n if self.concat_input:\r\n output = self.conv_cat(torch.cat([x, output], dim=1))\r\n output = self.cls_seg(output)\r\n return output\r\n", "import pytest\r\nimport torch\r\n\r\nfrom mmseg.models.backbones.vit import VisionTransformer\r\nfrom .utils import check_norm_state\r\n\r\n\r\ndef test_vit_backbone():\r\n with pytest.raises(TypeError):\r\n # pretrained must be a string path\r\n model = VisionTransformer()\r\n model.init_weights(pretrained=0)\r\n\r\n with pytest.raises(TypeError):\r\n # img_size must be int or tuple\r\n model = VisionTransformer(img_size=512.0)\r\n\r\n with pytest.raises(TypeError):\r\n # out_indices must be int ,list or tuple\r\n model = VisionTransformer(out_indices=1.)\r\n\r\n with pytest.raises(TypeError):\r\n # test upsample_pos_embed function\r\n x = torch.randn(1, 196)\r\n VisionTransformer.resize_pos_embed(x, 512, 512, 224, 224, 'bilinear')\r\n\r\n with pytest.raises(RuntimeError):\r\n # forward inputs must be [N, C, H, W]\r\n x = torch.randn(3, 30, 30)\r\n model = VisionTransformer()\r\n model(x)\r\n\r\n with pytest.raises(AssertionError):\r\n # out_shape must be 'NLC' or 'NCHW;'\r\n VisionTransformer(out_shape='NCL')\r\n\r\n # Test img_size isinstance int\r\n imgs = torch.randn(1, 3, 224, 224)\r\n model = VisionTransformer(img_size=224)\r\n model.init_weights()\r\n model(imgs)\r\n\r\n # Test norm_eval = True\r\n model = VisionTransformer(norm_eval=True)\r\n model.train()\r\n\r\n # Test ViT backbone with input size of 224 and patch size of 16\r\n model = VisionTransformer()\r\n model.init_weights()\r\n model.train()\r\n\r\n assert check_norm_state(model.modules(), True)\r\n\r\n # Test large size input image\r\n imgs = torch.randn(1, 3, 256, 256)\r\n feat = model(imgs)\r\n assert feat[-1].shape == (1, 768, 16, 16)\r\n\r\n # Test small size input image\r\n imgs = torch.randn(1, 3, 32, 32)\r\n feat = model(imgs)\r\n assert feat[-1].shape == (1, 768, 2, 2)\r\n\r\n imgs = torch.randn(1, 3, 224, 224)\r\n feat = model(imgs)\r\n assert feat[-1].shape == (1, 768, 14, 14)\r\n\r\n # Test with_cp=True\r\n model = VisionTransformer(with_cp=True)\r\n imgs = torch.randn(1, 3, 224, 224)\r\n feat = model(imgs)\r\n assert feat[-1].shape == (1, 768, 14, 14)\r\n\r\n # Test with_cls_token=False\r\n model = VisionTransformer(with_cls_token=False)\r\n imgs = torch.randn(1, 3, 224, 224)\r\n feat = model(imgs)\r\n assert feat[-1].shape == (1, 768, 14, 14)\r\n\r\n # Test final reshape arg\r\n imgs = torch.randn(1, 3, 224, 224)\r\n model = VisionTransformer(out_shape='NLC')\r\n feat = model(imgs)\r\n assert feat[-1].shape == (1, 196, 768)\r\n", "from collections import OrderedDict\r\n\r\nimport mmcv\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\ndef f_score(precision, recall, beta=1):\r\n \"\"\"calcuate the f-score value.\r\n\r\n Args:\r\n precision (float | torch.Tensor): The precision value.\r\n recall (float | torch.Tensor): The recall value.\r\n beta (int): Determines the weight of recall in the combined score.\r\n Default: False.\r\n\r\n Returns:\r\n [torch.tensor]: The f-score value.\r\n \"\"\"\r\n score = (1 + beta**2) * (precision * recall) / (\r\n (beta**2 * precision) + recall)\r\n return score\r\n\r\n\r\ndef intersect_and_union(pred_label,\r\n label,\r\n num_classes,\r\n ignore_index,\r\n label_map=dict(),\r\n reduce_zero_label=False):\r\n \"\"\"Calculate intersection and Union.\r\n\r\n Args:\r\n pred_label (ndarray | str): Prediction segmentation map\r\n or predict result filename.\r\n label (ndarray | str): Ground truth segmentation map\r\n or label filename.\r\n num_classes (int): Number of categories.\r\n ignore_index (int): Index that will be ignored in evaluation.\r\n label_map (dict): Mapping old labels to new labels. The parameter will\r\n work only when label is str. Default: dict().\r\n reduce_zero_label (bool): Wether ignore zero label. The parameter will\r\n work only when label is str. Default: False.\r\n\r\n Returns:\r\n torch.Tensor: The intersection of prediction and ground truth\r\n histogram on all classes.\r\n torch.Tensor: The union of prediction and ground truth histogram on\r\n all classes.\r\n torch.Tensor: The prediction histogram on all classes.\r\n torch.Tensor: The ground truth histogram on all classes.\r\n \"\"\"\r\n\r\n if isinstance(pred_label, str):\r\n pred_label = torch.from_numpy(np.load(pred_label))\r\n else:\r\n pred_label = torch.from_numpy((pred_label))\r\n\r\n if isinstance(label, str):\r\n label = torch.from_numpy(\r\n mmcv.imread(label, flag='unchanged', backend='pillow'))\r\n else:\r\n label = torch.from_numpy(label)\r\n\r\n if label_map is not None:\r\n for old_id, new_id in label_map.items():\r\n label[label == old_id] = new_id\r\n if reduce_zero_label:\r\n label[label == 0] = 255\r\n label = label - 1\r\n label[label == 254] = 255\r\n\r\n mask = (label != ignore_index)\r\n pred_label = pred_label[mask]\r\n label = label[mask]\r\n\r\n intersect = pred_label[pred_label == label]\r\n area_intersect = torch.histc(\r\n intersect.float(), bins=(num_classes), min=0, max=num_classes - 1)\r\n area_pred_label = torch.histc(\r\n pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1)\r\n area_label = torch.histc(\r\n label.float(), bins=(num_classes), min=0, max=num_classes - 1)\r\n area_union = area_pred_label + area_label - area_intersect\r\n return area_intersect, area_union, area_pred_label, area_label\r\n\r\n\r\ndef total_intersect_and_union(results,\r\n gt_seg_maps,\r\n num_classes,\r\n ignore_index,\r\n label_map=dict(),\r\n reduce_zero_label=False):\r\n \"\"\"Calculate Total Intersection and Union.\r\n\r\n Args:\r\n results (list[ndarray] | list[str]): List of prediction segmentation\r\n maps or list of prediction result filenames.\r\n gt_seg_maps (list[ndarray] | list[str]): list of ground truth\r\n segmentation maps or list of label filenames.\r\n num_classes (int): Number of categories.\r\n ignore_index (int): Index that will be ignored in evaluation.\r\n label_map (dict): Mapping old labels to new labels. Default: dict().\r\n reduce_zero_label (bool): Wether ignore zero label. Default: False.\r\n\r\n Returns:\r\n ndarray: The intersection of prediction and ground truth histogram\r\n on all classes.\r\n ndarray: The union of prediction and ground truth histogram on all\r\n classes.\r\n ndarray: The prediction histogram on all classes.\r\n ndarray: The ground truth histogram on all classes.\r\n \"\"\"\r\n num_imgs = len(results)\r\n assert len(gt_seg_maps) == num_imgs\r\n total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)\r\n total_area_union = torch.zeros((num_classes, ), dtype=torch.float64)\r\n total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64)\r\n total_area_label = torch.zeros((num_classes, ), dtype=torch.float64)\r\n for i in range(num_imgs):\r\n area_intersect, area_union, area_pred_label, area_label = \\\r\n intersect_and_union(\r\n results[i], gt_seg_maps[i], num_classes, ignore_index,\r\n label_map, reduce_zero_label)\r\n total_area_intersect += area_intersect\r\n total_area_union += area_union\r\n total_area_pred_label += area_pred_label\r\n total_area_label += area_label\r\n return total_area_intersect, total_area_union, total_area_pred_label, \\\r\n total_area_label\r\n\r\n\r\ndef mean_iou(results,\r\n gt_seg_maps,\r\n num_classes,\r\n ignore_index,\r\n nan_to_num=None,\r\n label_map=dict(),\r\n reduce_zero_label=False):\r\n \"\"\"Calculate Mean Intersection and Union (mIoU)\r\n\r\n Args:\r\n results (list[ndarray] | list[str]): List of prediction segmentation\r\n maps or list of prediction result filenames.\r\n gt_seg_maps (list[ndarray] | list[str]): list of ground truth\r\n segmentation maps or list of label filenames.\r\n num_classes (int): Number of categories.\r\n ignore_index (int): Index that will be ignored in evaluation.\r\n nan_to_num (int, optional): If specified, NaN values will be replaced\r\n by the numbers defined by the user. Default: None.\r\n label_map (dict): Mapping old labels to new labels. Default: dict().\r\n reduce_zero_label (bool): Wether ignore zero label. Default: False.\r\n\r\n Returns:\r\n dict[str, float | ndarray]:\r\n <aAcc> float: Overall accuracy on all images.\r\n <Acc> ndarray: Per category accuracy, shape (num_classes, ).\r\n <IoU> ndarray: Per category IoU, shape (num_classes, ).\r\n \"\"\"\r\n iou_result = eval_metrics(\r\n results=results,\r\n gt_seg_maps=gt_seg_maps,\r\n num_classes=num_classes,\r\n ignore_index=ignore_index,\r\n metrics=['mIoU'],\r\n nan_to_num=nan_to_num,\r\n label_map=label_map,\r\n reduce_zero_label=reduce_zero_label)\r\n return iou_result\r\n\r\n\r\ndef mean_dice(results,\r\n gt_seg_maps,\r\n num_classes,\r\n ignore_index,\r\n nan_to_num=None,\r\n label_map=dict(),\r\n reduce_zero_label=False):\r\n \"\"\"Calculate Mean Dice (mDice)\r\n\r\n Args:\r\n results (list[ndarray] | list[str]): List of prediction segmentation\r\n maps or list of prediction result filenames.\r\n gt_seg_maps (list[ndarray] | list[str]): list of ground truth\r\n segmentation maps or list of label filenames.\r\n num_classes (int): Number of categories.\r\n ignore_index (int): Index that will be ignored in evaluation.\r\n nan_to_num (int, optional): If specified, NaN values will be replaced\r\n by the numbers defined by the user. Default: None.\r\n label_map (dict): Mapping old labels to new labels. Default: dict().\r\n reduce_zero_label (bool): Wether ignore zero label. Default: False.\r\n\r\n Returns:\r\n dict[str, float | ndarray]: Default metrics.\r\n <aAcc> float: Overall accuracy on all images.\r\n <Acc> ndarray: Per category accuracy, shape (num_classes, ).\r\n <Dice> ndarray: Per category dice, shape (num_classes, ).\r\n \"\"\"\r\n\r\n dice_result = eval_metrics(\r\n results=results,\r\n gt_seg_maps=gt_seg_maps,\r\n num_classes=num_classes,\r\n ignore_index=ignore_index,\r\n metrics=['mDice'],\r\n nan_to_num=nan_to_num,\r\n label_map=label_map,\r\n reduce_zero_label=reduce_zero_label)\r\n return dice_result\r\n\r\n\r\ndef mean_fscore(results,\r\n gt_seg_maps,\r\n num_classes,\r\n ignore_index,\r\n nan_to_num=None,\r\n label_map=dict(),\r\n reduce_zero_label=False,\r\n beta=1):\r\n \"\"\"Calculate Mean Intersection and Union (mIoU)\r\n\r\n Args:\r\n results (list[ndarray] | list[str]): List of prediction segmentation\r\n maps or list of prediction result filenames.\r\n gt_seg_maps (list[ndarray] | list[str]): list of ground truth\r\n segmentation maps or list of label filenames.\r\n num_classes (int): Number of categories.\r\n ignore_index (int): Index that will be ignored in evaluation.\r\n nan_to_num (int, optional): If specified, NaN values will be replaced\r\n by the numbers defined by the user. Default: None.\r\n label_map (dict): Mapping old labels to new labels. Default: dict().\r\n reduce_zero_label (bool): Wether ignore zero label. Default: False.\r\n beta (int): Determines the weight of recall in the combined score.\r\n Default: False.\r\n\r\n\r\n Returns:\r\n dict[str, float | ndarray]: Default metrics.\r\n <aAcc> float: Overall accuracy on all images.\r\n <Fscore> ndarray: Per category recall, shape (num_classes, ).\r\n <Precision> ndarray: Per category precision, shape (num_classes, ).\r\n <Recall> ndarray: Per category f-score, shape (num_classes, ).\r\n \"\"\"\r\n fscore_result = eval_metrics(\r\n results=results,\r\n gt_seg_maps=gt_seg_maps,\r\n num_classes=num_classes,\r\n ignore_index=ignore_index,\r\n metrics=['mFscore'],\r\n nan_to_num=nan_to_num,\r\n label_map=label_map,\r\n reduce_zero_label=reduce_zero_label,\r\n beta=beta)\r\n return fscore_result\r\n\r\n\r\ndef eval_metrics(results,\r\n gt_seg_maps,\r\n num_classes,\r\n ignore_index,\r\n metrics=['mIoU'],\r\n nan_to_num=None,\r\n label_map=dict(),\r\n reduce_zero_label=False,\r\n beta=1):\r\n \"\"\"Calculate evaluation metrics\r\n Args:\r\n results (list[ndarray] | list[str]): List of prediction segmentation\r\n maps or list of prediction result filenames.\r\n gt_seg_maps (list[ndarray] | list[str]): list of ground truth\r\n segmentation maps or list of label filenames.\r\n num_classes (int): Number of categories.\r\n ignore_index (int): Index that will be ignored in evaluation.\r\n metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.\r\n nan_to_num (int, optional): If specified, NaN values will be replaced\r\n by the numbers defined by the user. Default: None.\r\n label_map (dict): Mapping old labels to new labels. Default: dict().\r\n reduce_zero_label (bool): Wether ignore zero label. Default: False.\r\n Returns:\r\n float: Overall accuracy on all images.\r\n ndarray: Per category accuracy, shape (num_classes, ).\r\n ndarray: Per category evaluation metrics, shape (num_classes, ).\r\n \"\"\"\r\n if isinstance(metrics, str):\r\n metrics = [metrics]\r\n allowed_metrics = ['mIoU', 'mDice', 'mFscore']\r\n if not set(metrics).issubset(set(allowed_metrics)):\r\n raise KeyError('metrics {} is not supported'.format(metrics))\r\n\r\n total_area_intersect, total_area_union, total_area_pred_label, \\\r\n total_area_label = total_intersect_and_union(\r\n results, gt_seg_maps, num_classes, ignore_index, label_map,\r\n reduce_zero_label)\r\n all_acc = total_area_intersect.sum() / total_area_label.sum()\r\n ret_metrics = OrderedDict({'aAcc': all_acc})\r\n for metric in metrics:\r\n if metric == 'mIoU':\r\n iou = total_area_intersect / total_area_union\r\n acc = total_area_intersect / total_area_label\r\n ret_metrics['IoU'] = iou\r\n ret_metrics['Acc'] = acc\r\n elif metric == 'mDice':\r\n dice = 2 * total_area_intersect / (\r\n total_area_pred_label + total_area_label)\r\n acc = total_area_intersect / total_area_label\r\n ret_metrics['Dice'] = dice\r\n ret_metrics['Acc'] = acc\r\n elif metric == 'mFscore':\r\n precision = total_area_intersect / total_area_pred_label\r\n recall = total_area_intersect / total_area_label\r\n f_value = torch.tensor(\r\n [f_score(x[0], x[1], beta) for x in zip(precision, recall)])\r\n ret_metrics['Fscore'] = f_value\r\n ret_metrics['Precision'] = precision\r\n ret_metrics['Recall'] = recall\r\n\r\n ret_metrics = {\r\n metric: value.numpy()\r\n for metric, value in ret_metrics.items()\r\n }\r\n if nan_to_num is not None:\r\n ret_metrics = OrderedDict({\r\n metric: np.nan_to_num(metric_value, nan=nan_to_num)\r\n for metric, metric_value in ret_metrics.items()\r\n })\r\n return ret_metrics\r\n" ]
[ [ "torch.nn.Conv2d", "torch.nn.Dropout2d", "torch.cat" ], [ "numpy.zeros", "numpy.ones" ], [ "torch.nn.Sequential", "torch.utils.checkpoint.checkpoint" ], [ "torch.matmul", "torch.nn.Conv2d", "torch.cat" ], [ "torch.randn" ], [ "numpy.load", "torch.from_numpy", "numpy.nan_to_num", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AMLab-Amsterdam/DataAugmentationInterventions
[ "78ce67174db487e9697b9a842e69818305bb41ef" ]
[ "synthetic_data/plot.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\n\nfeat_x2_0_inter = np.array([0.34256017,\t0.31460512,\t0.27144957,\t0.24856855,\t0.22719437])\nfeat_x2_0_inter_ste = np.array([0.004312400818,\t0.003773893416,\t0.002982698083,\t0.00233306855,\t0.002138502002])\nfeat_x2_1_inter = np.array([0.35977444,\t0.3302486,\t0.2858478,\t0.26235148,\t0.23809971])\nfeat_x2_1_inter_ste = np.array([0.004517972469,\t0.004006842971,\t0.003241877258,\t0.002627826631,\t0.002335606366])\nfeat_x2_2_inter = np.array([0.387529,\t0.35519564,\t0.30843478,\t0.28350833,\t0.2540046])\nfeat_x2_2_inter_ste = np.array([0.004769945741,\t0.00426604718,\t0.00358707428,\t0.003047236502,\t0.002536125779])\nfeat_x2_3_inter = np.array([0.4165158,\t0.38317177,\t0.33215567,\t0.3049404,\t0.27241272])\nfeat_x2_3_inter_ste = np.array([0.005206080675,\t0.004714588821,\t0.003986877203,\t0.003410176337,\t0.002820271552])\nfeat_x2_4_inter = np.array([0.44910964,\t0.41258878,\t0.35587674,\t0.3253371,\t0.29092044])\nfeat_x2_4_inter_ste = np.array([0.005780712962,\t0.005148547292,\t0.004204738736,\t0.003574062288,\t0.003055044413])\nERM = np.array([0.37845063, 0.37845063, 0.37845063, 0.37845063, 0.37845063])\nERM_ste = np.array([0.004980756044, 0.004980756044, 0.004980756044, 0.004980756044, 0.004980756044])\n\nERM_x2_only = np.array([0.22802237, 0.22802237, 0.22802237, 0.22802237, 0.22802237])\nERM_ste_x2_only = np.array([0.0021790754795074463, 0.0021790754795074463, 0.0021790754795074463, 0.0021790754795074463, 0.0021790754795074463])\n\nx = [1, 2, 3, 4, 5]\n\nfig, ax = plt.subplots()\nplt.plot(x, ERM, label='ERM')\n# plt.fill_between(x, ERM - ERM_ste, ERM + ERM_ste, alpha=0.1)\nmarkers, caps, bars = ax.errorbar(x, feat_x2_0_inter, yerr=feat_x2_0_inter_ste, label='augmentation on 0 dims of $h_y$')\n# plt.fill_between(x, feat_x2_0_inter - feat_x2_0_inter_ste, feat_x2_0_inter + feat_x2_0_inter_ste, alpha=0.1)\nmarkers, caps, bars = ax.errorbar(x, feat_x2_1_inter, yerr=feat_x2_1_inter_ste, label='augmentation on 1 dim of $h_y$')\n# plt.fill_between(x, feat_x2_1_inter - feat_x2_1_inter_ste, feat_x2_1_inter + feat_x2_1_inter_ste, alpha=0.1)\nmarkers, caps, bars = ax.errorbar(x, feat_x2_2_inter, yerr=feat_x2_2_inter_ste, label='augmentation on 2 dims of $h_y$')\n# plt.fill_between(x, feat_x2_2_inter - feat_x2_2_inter_ste, feat_x2_2_inter + feat_x2_2_inter_ste, alpha=0.1)\nmarkers, caps, bars = ax.errorbar(x, feat_x2_3_inter, yerr=feat_x2_3_inter_ste, label='augmentation on 3 dims of $h_y$')\n# plt.fill_between(x, feat_x2_3_inter - feat_x2_3_inter_ste, feat_x2_3_inter + feat_x2_3_inter_ste, alpha=0.1)\nmarkers, caps, bars = ax.errorbar(x, feat_x2_4_inter, yerr=feat_x2_4_inter_ste, label='augmentation on 4 dims of $h_y$')\nplt.plot(x, ERM_x2_only, label='ERM using only $h_y$')\n# plt.fill_between(x, feat_x2_4_inter - feat_x2_4_inter_ste, feat_x2_4_inter + feat_x2_4_inter_ste, alpha=0.1)\nplt.xticks(x) # Set locations and labels\nplt.legend()\nplt.ylabel('$MSE$')\nplt.xlabel('num of dims of $h_d$ w/ augmentation')\nplt.savefig('toy_data_comparison.png', bbox_inches='tight', dpi=300)" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Githubowy-Juliusz/SRCNN
[ "6306f7fd87809c189f7aadb48c050d4f520a269f" ]
[ "images_utils.py" ]
[ "import numpy as np\nimport cv2 as cv\nfrom psnr import psnr_numpy\n\n\ndef create_low_res_images(images: np.ndarray) -> np.ndarray:\n\timages_low_res = tuple(create_low_res_image(image) for image in images)\n\treturn np.array(images_low_res)\n\ndef create_low_res_image(image: np.ndarray) -> np.ndarray:\n\treturn resize_up(resize_down(image))\n\ndef calculate_psnr_with_bicubic(images: np.ndarray) -> float:\n\tlow_res_images = create_low_res_images(images) / 255.0\n\timages = images / 255.0\n\treturn psnr_numpy(low_res_images, images)\n\ndef add_salt_and_pepper(image: np.ndarray) -> np.ndarray:\n\tpepper = np.random.random(image.shape[:2]) > 0.01\n\tsalt = (np.random.random(image.shape[:2]) > 0.99) * 254 + 1\n\tpepper = np.stack((pepper, pepper, pepper), axis=2)\n\tsalt = np.stack((salt, salt, salt), axis=2)\n\timg = image * pepper\n\timg = img * salt\n\timg[img > 255] = 255\n\treturn img.astype(np.uint8)\n\ndef add_noise(image: np.ndarray) -> np.ndarray:\n\tnoise = np.random.random(image.shape) * 50 - 25\n\timg = image + noise\n\timg[img > 255] = 255\n\timg[img < 0] = 0\n\treturn img.astype(np.uint8)\n\ndef resize_down(image) -> np.ndarray:\n\treturn cv.resize(image, (image.shape[1] // 2, image.shape[0] // 2), interpolation=cv.INTER_CUBIC)\n\ndef resize_up(image) -> np.ndarray:\n\treturn cv.resize(image, (image.shape[1] * 2, image.shape[0] * 2), interpolation=cv.INTER_CUBIC)" ]
[ [ "numpy.array", "numpy.random.random", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jbrockmendel/statsmodels
[ "61155fff1883ffb49d252ae22b2638f73b24ab21" ]
[ "statsmodels/tsa/arima_model.py" ]
[ "# Note: The information criteria add 1 to the number of parameters\n# whenever the model has an AR or MA term since, in principle,\n# the variance could be treated as a free parameter and restricted\n# This code does not allow this, but it adds consistency with other\n# packages such as gretl and X12-ARIMA\n\nfrom __future__ import absolute_import\nfrom statsmodels.compat.python import string_types, range, long\n# for 2to3 with extensions\n\nfrom datetime import datetime\n\nimport numpy as np\nfrom scipy import optimize\nfrom scipy.stats import t, norm\nfrom scipy.signal import lfilter\nfrom numpy import dot, log, zeros, pi\nfrom numpy.linalg import inv\n\nfrom statsmodels.tools.decorators import cache_readonly\nimport statsmodels.tsa.base.tsa_model as tsbase\nimport statsmodels.base.wrapper as wrap\nfrom statsmodels.regression.linear_model import yule_walker, OLS\nfrom statsmodels.tsa.tsatools import (lagmat, add_trend,\n _ar_transparams, _ar_invtransparams,\n _ma_transparams, _ma_invtransparams,\n unintegrate, unintegrate_levels)\nfrom statsmodels.tsa.vector_ar import util\nfrom statsmodels.tsa.ar_model import AR\nfrom statsmodels.tsa.arima_process import arma2ma\nfrom statsmodels.tools.numdiff import approx_hess_cs, approx_fprime_cs\nfrom statsmodels.tsa.kalmanf import KalmanFilter\n\n_armax_notes = r\"\"\"\n Notes\n -----\n If exogenous variables are given, then the model that is fit is\n\n .. math::\n\n \\phi(L)(y_t - X_t\\beta) = \\theta(L)\\epsilon_t\n\n where :math:`\\phi` and :math:`\\theta` are polynomials in the lag\n operator, :math:`L`. This is the regression model with ARMA errors,\n or ARMAX model. This specification is used, whether or not the model\n is fit using conditional sum of square or maximum-likelihood, using\n the `method` argument in\n :meth:`statsmodels.tsa.arima_model.%(Model)s.fit`. Therefore, for\n now, `css` and `mle` refer to estimation methods only. This may\n change for the case of the `css` model in future versions.\n\"\"\"\n\n_arma_params = \\\n\"\"\"endog : array-like\n The endogenous variable.\n order : iterable\n The (p,q) order of the model for the number of AR parameters,\n differences, and MA parameters to use.\n exog : array-like, optional\n An optional array of exogenous variables. This should *not* include a\n constant or trend. You can specify this in the `fit` method.\"\"\"\n\n_arma_model = \"Autoregressive Moving Average ARMA(p,q) Model\"\n\n_arima_model = \"Autoregressive Integrated Moving Average ARIMA(p,d,q) Model\"\n\n_arima_params = \\\n\"\"\"endog : array-like\n The endogenous variable.\n order : iterable\n The (p,d,q) order of the model for the number of AR parameters,\n differences, and MA parameters to use.\n exog : array-like, optional\n An optional array of exogenous variables. This should *not* include a\n constant or trend. You can specify this in the `fit` method.\"\"\"\n\n_predict_notes = \"\"\"\n Notes\n -----\n Use the results predict method instead.\n\"\"\"\n\n_results_notes = \"\"\"\n Notes\n -----\n It is recommended to use dates with the time-series models, as the\n below will probably make clear. However, if ARIMA is used without\n dates and/or `start` and `end` are given as indices, then these\n indices are in terms of the *original*, undifferenced series. Ie.,\n given some undifferenced observations::\n\n 1970Q1, 1\n 1970Q2, 1.5\n 1970Q3, 1.25\n 1970Q4, 2.25\n 1971Q1, 1.2\n 1971Q2, 4.1\n\n 1970Q1 is observation 0 in the original series. However, if we fit an\n ARIMA(p,1,q) model then we lose this first observation through\n differencing. Therefore, the first observation we can forecast (if\n using exact MLE) is index 1. In the differenced series this is index\n 0, but we refer to it as 1 from the original series.\n\"\"\"\n\n_predict = \"\"\"\n %(Model)s model in-sample and out-of-sample prediction\n\n Parameters\n ----------\n %(params)s\n start : int, str, or datetime\n Zero-indexed observation number at which to start forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type.\n end : int, str, or datetime\n Zero-indexed observation number at which to end forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type. However, if the dates index does not\n have a fixed frequency, end must be an integer index if you\n want out of sample prediction.\n exog : array-like, optional\n If the model is an ARMAX and out-of-sample forecasting is\n requested, exog must be given. Note that you'll need to pass\n `k_ar` additional lags for any exogenous variables. E.g., if you\n fit an ARMAX(2, q) model and want to predict 5 steps, you need 7\n observations to do this.\n dynamic : bool, optional\n The `dynamic` keyword affects in-sample prediction. If dynamic\n is False, then the in-sample lagged values are used for\n prediction. If `dynamic` is True, then in-sample forecasts are\n used in place of lagged dependent variables. The first forecasted\n value is `start`.\n %(extra_params)s\n\n Returns\n -------\n %(returns)s\n %(extra_section)s\n\"\"\"\n\n_predict_returns = \"\"\"predict : array\n The predicted values.\n\n\"\"\"\n\n_arma_predict = _predict % {\"Model\" : \"ARMA\",\n \"params\" : \"\"\"params : array-like\n The fitted parameters of the model.\"\"\",\n \"extra_params\" : \"\",\n \"returns\" : _predict_returns,\n \"extra_section\" : _predict_notes}\n\n_arma_results_predict = _predict % {\"Model\" : \"ARMA\", \"params\" : \"\",\n \"extra_params\" : \"\",\n \"returns\" : _predict_returns,\n \"extra_section\" : _results_notes}\n_arima_extras = \"\"\"typ : str {'linear', 'levels'}\n\n - 'linear' : Linear prediction in terms of the differenced\n endogenous variables.\n - 'levels' : Predict the levels of the original endogenous\n variables.\\n\"\"\"\n\n_arima_predict = _predict % {\"Model\" : \"ARIMA\",\n \"params\" : \"\"\"params : array-like\n The fitted parameters of the model.\"\"\",\n \"extra_params\" : _arima_extras,\n \"returns\" : _predict_returns,\n \"extra_section\" : _predict_notes}\n\n_arima_results_predict = _predict % {\"Model\" : \"ARIMA\",\n \"params\" : \"\",\n \"extra_params\" :_arima_extras,\n \"returns\" : _predict_returns,\n \"extra_section\" : _results_notes}\n\n_arima_plot_predict_example = \"\"\" Examples\n --------\n >>> import statsmodels.api as sm\n >>> import matplotlib.pyplot as plt\n >>> import pandas as pd\n >>>\n >>> dta = sm.datasets.sunspots.load_pandas().data[['SUNACTIVITY']]\n >>> dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')\n >>> res = sm.tsa.ARMA(dta, (3, 0)).fit()\n >>> fig, ax = plt.subplots()\n >>> ax = dta.loc['1950':].plot(ax=ax)\n >>> fig = res.plot_predict('1990', '2012', dynamic=True, ax=ax,\n ... plot_insample=False)\n >>> plt.show()\n\n .. plot:: plots/arma_predict_plot.py\n\"\"\"\n\n_plot_extras = \"\"\"alpha : float, optional\n The confidence intervals for the forecasts are (1 - alpha)%\n plot_insample : bool, optional\n Whether to plot the in-sample series. Default is True.\n ax : matplotlib.Axes, optional\n Existing axes to plot with.\"\"\"\n\n_plot_predict = (\"\"\"\n Plot forecasts\n \"\"\" + '\\n'.join(_predict.split('\\n')[2:])) % {\n \"params\" : \"\",\n \"extra_params\" : _plot_extras,\n \"returns\" : \"\"\"fig : matplotlib.Figure\n The plotted Figure instance\"\"\",\n \"extra_section\" : ('\\n' + _arima_plot_predict_example +\n '\\n' + _results_notes)\n }\n\n_arima_plot_predict = (\"\"\"\n Plot forecasts\n \"\"\" + '\\n'.join(_predict.split('\\n')[2:])) % {\n \"params\" : \"\",\n \"extra_params\" : _plot_extras,\n \"returns\" : \"\"\"fig : matplotlib.Figure\n The plotted Figure instance\"\"\",\n \"extra_section\" : ('\\n' + _arima_plot_predict_example +\n '\\n' +\n '\\n'.join(_results_notes.split('\\n')[:3]) +\n (\"\"\"\n This is hard-coded to only allow plotting of the forecasts in levels.\n\"\"\") +\n '\\n'.join(_results_notes.split('\\n')[3:]))\n }\n\n\ndef cumsum_n(x, n):\n for _ in range(n):\n x = np.cumsum(x)\n\n return x\n\n\ndef _check_arima_start(start, k_ar, k_diff, method, dynamic):\n if start < 0:\n raise ValueError(\"The start index %d of the original series \"\n \"has been differenced away\" % start)\n elif (dynamic or 'mle' not in method) and start < k_ar:\n raise ValueError(\"Start must be >= k_ar for conditional MLE \"\n \"or dynamic forecast. Got %d\" % start)\n\n\ndef _get_predict_out_of_sample(endog, p, q, k_trend, k_exog, start, errors,\n trendparam, exparams, arparams, maparams, steps,\n method, exog=None):\n \"\"\"\n Returns endog, resid, mu of appropriate length for out of sample\n prediction.\n \"\"\"\n if q:\n resid = np.zeros(q)\n if start and 'mle' in method or (start == p and not start == 0):\n resid[:q] = errors[start-q:start]\n elif start:\n resid[:q] = errors[start-q-p:start-p]\n else:\n resid[:q] = errors[-q:]\n else:\n resid = None\n\n y = endog\n if k_trend == 1:\n # use expectation not constant\n if k_exog > 0:\n #TODO: technically should only hold for MLE not\n # conditional model. See #274.\n # ensure 2-d for conformability\n if np.ndim(exog) == 1 and k_exog == 1:\n # have a 1d series of observations -> 2d\n exog = exog[:, None]\n elif np.ndim(exog) == 1:\n # should have a 1d row of exog -> 2d\n if len(exog) != k_exog:\n raise ValueError(\"1d exog given and len(exog) != k_exog\")\n exog = exog[None, :]\n X = lagmat(np.dot(exog, exparams), p, original='in', trim='both')\n mu = trendparam * (1 - arparams.sum())\n # arparams were reversed in unpack for ease later\n mu = mu + (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]\n else:\n mu = trendparam * (1 - arparams.sum())\n mu = np.array([mu]*steps)\n elif k_exog > 0:\n X = np.dot(exog, exparams)\n #NOTE: you shouldn't have to give in-sample exog!\n X = lagmat(X, p, original='in', trim='both')\n mu = (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]\n else:\n mu = np.zeros(steps)\n\n endog = np.zeros(p + steps - 1)\n\n if p and start:\n endog[:p] = y[start-p:start]\n elif p:\n endog[:p] = y[-p:]\n\n return endog, resid, mu\n\n\ndef _arma_predict_out_of_sample(params, steps, errors, p, q, k_trend, k_exog,\n endog, exog=None, start=0, method='mle'):\n (trendparam, exparams,\n arparams, maparams) = _unpack_params(params, (p, q), k_trend,\n k_exog, reverse=True)\n endog, resid, mu = _get_predict_out_of_sample(endog, p, q, k_trend, k_exog,\n start, errors, trendparam,\n exparams, arparams,\n maparams, steps, method,\n exog)\n\n forecast = np.zeros(steps)\n if steps == 1:\n if q:\n return mu[0] + np.dot(arparams, endog[:p]) + np.dot(maparams,\n resid[:q])\n else:\n return mu[0] + np.dot(arparams, endog[:p])\n\n if q:\n i = 0 # if q == 1\n else:\n i = -1\n\n for i in range(min(q, steps - 1)):\n fcast = (mu[i] + np.dot(arparams, endog[i:i + p]) +\n np.dot(maparams[:q - i], resid[i:i + q]))\n forecast[i] = fcast\n endog[i+p] = fcast\n\n for i in range(i + 1, steps - 1):\n fcast = mu[i] + np.dot(arparams, endog[i:i+p])\n forecast[i] = fcast\n endog[i+p] = fcast\n\n #need to do one more without updating endog\n forecast[steps - 1] = mu[steps - 1] + np.dot(arparams, endog[steps - 1:])\n return forecast\n\n\ndef _arma_predict_in_sample(start, end, endog, resid, k_ar, method):\n \"\"\"\n Pre- and in-sample fitting for ARMA.\n \"\"\"\n if 'mle' in method:\n fittedvalues = endog - resid # get them all then trim\n else:\n fittedvalues = endog[k_ar:] - resid\n\n fv_start = start\n if 'mle' not in method:\n fv_start -= k_ar # start is in terms of endog index\n fv_end = min(len(fittedvalues), end + 1)\n return fittedvalues[fv_start:fv_end]\n\n\ndef _unpack_params(params, order, k_trend, k_exog, reverse=False):\n p, q = order\n k = k_trend + k_exog\n maparams = params[k+p:]\n arparams = params[k:k+p]\n trend = params[:k_trend]\n exparams = params[k_trend:k]\n if reverse:\n return trend, exparams, arparams[::-1], maparams[::-1]\n return trend, exparams, arparams, maparams\n\n\ndef _unpack_order(order):\n k_ar, k_ma, k = order\n k_lags = max(k_ar, k_ma+1)\n return k_ar, k_ma, order, k_lags\n\n\ndef _make_arma_names(data, k_trend, order, exog_names):\n k_ar, k_ma = order\n exog_names = exog_names or []\n ar_lag_names = util.make_lag_names([data.ynames], k_ar, 0)\n ar_lag_names = [''.join(('ar.', i)) for i in ar_lag_names]\n ma_lag_names = util.make_lag_names([data.ynames], k_ma, 0)\n ma_lag_names = [''.join(('ma.', i)) for i in ma_lag_names]\n trend_name = util.make_lag_names('', 0, k_trend)\n\n # ensure exog_names stays unchanged when the `fit` method\n # is called multiple times.\n if k_ma ==0 and k_ar ==0:\n if len(exog_names) != 0:\n return exog_names\n elif (exog_names[-k_ma:] == ma_lag_names ) and \\\n exog_names[-(k_ar+k_ma):-k_ma] == ar_lag_names and \\\n (not exog_names or not trend_name or trend_name[0] == exog_names[0]):\n return exog_names\n\n exog_names = trend_name + exog_names + ar_lag_names + ma_lag_names\n return exog_names\n\n\ndef _make_arma_exog(endog, exog, trend):\n k_trend = 1 # overwritten if no constant\n if exog is None and trend == 'c': # constant only\n exog = np.ones((len(endog), 1))\n elif exog is not None and trend == 'c': # constant plus exogenous\n exog = add_trend(exog, trend='c', prepend=True)\n elif exog is not None and trend == 'nc':\n # make sure it's not holding constant from last run\n if exog.var() == 0:\n exog = None\n k_trend = 0\n if trend == 'nc':\n k_trend = 0\n return k_trend, exog\n\n\ndef _check_estimable(nobs, n_params):\n if nobs <= n_params:\n raise ValueError(\"Insufficient degrees of freedom to estimate\")\n\n\nclass ARMA(tsbase.TimeSeriesModel):\n\n __doc__ = tsbase._tsa_doc % {\"model\" : _arma_model,\n \"params\" : _arma_params, \"extra_params\" : \"\",\n \"extra_sections\" : _armax_notes %\n {\"Model\" : \"ARMA\"}}\n\n def __init__(self, endog, order, exog=None, dates=None, freq=None,\n missing='none'):\n super(ARMA, self).__init__(endog, exog, dates, freq, missing=missing)\n # GH 2575\n _endog = endog if hasattr(endog, 'ndim') else np.asarray(endog)\n if (_endog.ndim == 2 and _endog.shape[1] != 1) or _endog.ndim > 2:\n raise ValueError('endog must be 1-d or 2-d with 1 column')\n exog = self.data.exog # get it after it's gone through processing\n _check_estimable(len(self.endog), sum(order))\n self.k_ar = k_ar = order[0]\n self.k_ma = k_ma = order[1]\n self.k_lags = max(k_ar, k_ma+1)\n if exog is not None:\n if exog.ndim == 1:\n exog = exog[:, None]\n k_exog = exog.shape[1] # number of exog. variables excl. const\n else:\n k_exog = 0\n self.k_exog = k_exog\n\n def _fit_start_params_hr(self, order, start_ar_lags=None):\n \"\"\"\n Get starting parameters for fit.\n\n Parameters\n ----------\n order : iterable\n (p,q,k) - AR lags, MA lags, and number of exogenous variables\n including the constant.\n start_ar_lags : int, optional\n If start_ar_lags is not None, rather than fitting an AR process\n according to best BIC, fits an AR process with a lag length equal\n to start_ar_lags.\n\n Returns\n -------\n start_params : array\n A first guess at the starting parameters.\n\n Notes\n -----\n If necessary, fits an AR process with the laglength start_ar_lags, or\n selected according to best BIC if start_ar_lags is None. Obtain the\n residuals. Then fit an ARMA(p,q) model via OLS using these residuals\n for a first approximation. Uses a separate OLS regression to find the\n coefficients of exogenous variables.\n\n References\n ----------\n Hannan, E.J. and Rissanen, J. 1982. \"Recursive estimation of mixed\n autoregressive-moving average order.\" `Biometrika`. 69.1.\n\n Durbin, J. 1960. \"The Fitting of Time-Series Models.\"\n `Review of the International Statistical Institute`. Vol. 28, No. 3\n \"\"\"\n p, q, k = order\n start_params = zeros((p+q+k))\n # make copy of endog because overwritten\n endog = np.array(self.endog, np.float64)\n exog = self.exog\n if k != 0:\n ols_params = OLS(endog, exog).fit().params\n start_params[:k] = ols_params\n endog -= np.dot(exog, ols_params).squeeze()\n if q != 0:\n if p != 0:\n # make sure we don't run into small data problems in AR fit\n nobs = len(endog)\n if start_ar_lags is None:\n maxlag = int(round(12*(nobs/100.)**(1/4.)))\n if maxlag >= nobs:\n maxlag = nobs - 1\n armod = AR(endog).fit(ic='bic', trend='nc', maxlag=maxlag)\n else:\n if start_ar_lags >= nobs:\n start_ar_lags = nobs - 1\n armod = AR(endog).fit(trend='nc', maxlag=start_ar_lags)\n arcoefs_tmp = armod.params\n p_tmp = armod.k_ar\n # it's possible in small samples that optimal lag-order\n # doesn't leave enough obs. No consistent way to fix.\n if p_tmp + q >= len(endog):\n raise ValueError(\"Proper starting parameters cannot\"\n \" be found for this order with this \"\n \"number of observations. Use the \"\n \"start_params argument, or set \"\n \"start_ar_lags to an integer less than \"\n \"len(endog) - q.\")\n resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,\n trim='both'),\n arcoefs_tmp)\n if p < p_tmp + q:\n endog_start = p_tmp + q - p\n resid_start = 0\n else:\n endog_start = 0\n resid_start = p - p_tmp - q\n lag_endog = lagmat(endog, p, 'both')[endog_start:]\n lag_resid = lagmat(resid, q, 'both')[resid_start:]\n # stack ar lags and resids\n X = np.column_stack((lag_endog, lag_resid))\n coefs = OLS(endog[max(p_tmp + q, p):], X).fit().params\n start_params[k:k+p+q] = coefs\n else:\n start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]\n if q == 0 and p != 0:\n arcoefs = yule_walker(endog, order=p)[0]\n start_params[k:k+p] = arcoefs\n\n # check AR coefficients\n if p and not np.all(np.abs(np.roots(np.r_[1, -start_params[k:k + p]]\n )) < 1):\n raise ValueError(\"The computed initial AR coefficients are not \"\n \"stationary\\nYou should induce stationarity, \"\n \"choose a different model order, or you can\\n\"\n \"pass your own start_params.\")\n # check MA coefficients\n elif q and not np.all(np.abs(np.roots(np.r_[1, start_params[k + p:]]\n )) < 1):\n raise ValueError(\"The computed initial MA coefficients are not \"\n \"invertible\\nYou should induce invertibility, \"\n \"choose a different model order, or you can\\n\"\n \"pass your own start_params.\")\n\n # check MA coefficients\n return start_params\n\n def _fit_start_params(self, order, method, start_ar_lags=None):\n if method != 'css-mle': # use Hannan-Rissanen to get start params\n start_params = self._fit_start_params_hr(order, start_ar_lags)\n else: # use CSS to get start params\n func = lambda params: -self.loglike_css(params)\n #start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?\n start_params = self._fit_start_params_hr(order, start_ar_lags)\n if self.transparams:\n start_params = self._invtransparams(start_params)\n bounds = [(None,)*2]*sum(order)\n mlefit = optimize.fmin_l_bfgs_b(func, start_params,\n approx_grad=True, m=12,\n pgtol=1e-7, factr=1e3,\n bounds=bounds, iprint=-1)\n start_params = mlefit[0]\n if self.transparams:\n start_params = self._transparams(start_params)\n return start_params\n\n def score(self, params):\n \"\"\"\n Compute the score function at params.\n\n Notes\n -----\n This is a numerical approximation.\n \"\"\"\n return approx_fprime_cs(params, self.loglike, args=(False,))\n\n def hessian(self, params):\n \"\"\"\n Compute the Hessian at params,\n\n Notes\n -----\n This is a numerical approximation.\n \"\"\"\n return approx_hess_cs(params, self.loglike, args=(False,))\n\n def _transparams(self, params):\n \"\"\"\n Transforms params to induce stationarity/invertability.\n\n Reference\n ---------\n Jones(1980)\n \"\"\"\n k_ar, k_ma = self.k_ar, self.k_ma\n k = self.k_exog + self.k_trend\n newparams = np.zeros_like(params)\n\n # just copy exogenous parameters\n if k != 0:\n newparams[:k] = params[:k]\n\n # AR Coeffs\n if k_ar != 0:\n newparams[k:k+k_ar] = _ar_transparams(params[k:k+k_ar].copy())\n\n # MA Coeffs\n if k_ma != 0:\n newparams[k+k_ar:] = _ma_transparams(params[k+k_ar:].copy())\n return newparams\n\n def _invtransparams(self, start_params):\n \"\"\"\n Inverse of the Jones reparameterization\n \"\"\"\n k_ar, k_ma = self.k_ar, self.k_ma\n k = self.k_exog + self.k_trend\n newparams = start_params.copy()\n arcoefs = newparams[k:k+k_ar]\n macoefs = newparams[k+k_ar:]\n # AR coeffs\n if k_ar != 0:\n newparams[k:k+k_ar] = _ar_invtransparams(arcoefs)\n\n # MA coeffs\n if k_ma != 0:\n newparams[k+k_ar:k+k_ar+k_ma] = _ma_invtransparams(macoefs)\n return newparams\n\n def _get_prediction_index(self, start, end, dynamic, index=None):\n method = getattr(self, 'method', 'mle')\n k_ar = getattr(self, 'k_ar', 0)\n k_diff = getattr(self, 'k_diff', 0)\n\n if start is None:\n if 'mle' in method and not dynamic:\n start = 0\n else:\n start = k_ar\n start = self._index[start]\n\n start, end, out_of_sample, prediction_index = (\n super(ARMA, self)._get_prediction_index(start, end, index))\n\n # This replaces the _validate() call\n if 'mle' not in method and start < k_ar - k_diff:\n raise ValueError(\"Start must be >= k_ar for conditional \"\n \"MLE or dynamic forecast. Got %s\" % start)\n # Other validation\n _check_arima_start(start, k_ar, k_diff, method, dynamic)\n\n return start, end, out_of_sample, prediction_index\n\n def geterrors(self, params):\n \"\"\"\n Get the errors of the ARMA process.\n\n Parameters\n ----------\n params : array-like\n The fitted ARMA parameters\n order : array-like\n 3 item iterable, with the number of AR, MA, and exogenous\n parameters, including the trend\n \"\"\"\n\n # start, end, out_of_sample, prediction_index = (\n # self._get_prediction_index(start, end, index))\n params = np.asarray(params)\n k_ar, k_ma = self.k_ar, self.k_ma\n k = self.k_exog + self.k_trend\n\n method = getattr(self, 'method', 'mle')\n if 'mle' in method: # use KalmanFilter to get errors\n (y, k, nobs, k_ar, k_ma, k_lags, newparams, Z_mat, m, R_mat,\n T_mat, paramsdtype) = KalmanFilter._init_kalman_state(params,\n self)\n\n errors = KalmanFilter.geterrors(y, k, k_ar, k_ma, k_lags, nobs,\n Z_mat, m, R_mat, T_mat,\n paramsdtype)\n if isinstance(errors, tuple):\n errors = errors[0] # non-cython version returns a tuple\n else: # use scipy.signal.lfilter\n y = self.endog.copy()\n k = self.k_exog + self.k_trend\n if k > 0:\n y -= dot(self.exog, params[:k])\n\n k_ar = self.k_ar\n k_ma = self.k_ma\n\n (trendparams, exparams,\n arparams, maparams) = _unpack_params(params, (k_ar, k_ma),\n self.k_trend, self.k_exog,\n reverse=False)\n b, a = np.r_[1, -arparams], np.r_[1, maparams]\n zi = zeros((max(k_ar, k_ma)))\n for i in range(k_ar):\n zi[i] = sum(-b[:i+1][::-1]*y[:i+1])\n e = lfilter(b, a, y, zi=zi)\n errors = e[0][k_ar:]\n return errors.squeeze()\n\n def predict(self, params, start=None, end=None, exog=None, dynamic=False):\n method = getattr(self, 'method', 'mle') # don't assume fit\n #params = np.asarray(params)\n\n # will return an index of a date\n start, end, out_of_sample, _ = (\n self._get_prediction_index(start, end, dynamic))\n\n if out_of_sample and (exog is None and self.k_exog > 0):\n raise ValueError(\"You must provide exog for ARMAX\")\n\n endog = self.endog\n resid = self.geterrors(params)\n k_ar = self.k_ar\n\n if exog is not None:\n # Note: we ignore currently the index of exog if it is available\n exog = np.asarray(exog)\n if self.k_exog == 1 and exog.ndim == 1:\n exog = exog[:, None]\n\n if out_of_sample != 0 and self.k_exog > 0:\n # we need the last k_ar exog for the lag-polynomial\n if self.k_exog > 0 and k_ar > 0 and not dynamic:\n # need the last k_ar exog for the lag-polynomial\n exog = np.vstack((self.exog[-k_ar:, self.k_trend:], exog))\n\n if dynamic:\n if self.k_exog > 0:\n # need the last k_ar exog for the lag-polynomial\n exog_insample = self.exog[start - k_ar:, self.k_trend:]\n if exog is not None:\n exog = np.vstack((exog_insample, exog))\n else:\n exog = exog_insample\n #TODO: now that predict does dynamic in-sample it should\n # also return error estimates and confidence intervals\n # but how? len(endog) is not tot_obs\n out_of_sample += end - start + 1\n return _arma_predict_out_of_sample(params, out_of_sample, resid,\n k_ar, self.k_ma, self.k_trend,\n self.k_exog, endog, exog,\n start, method)\n\n predictedvalues = _arma_predict_in_sample(start, end, endog, resid,\n k_ar, method)\n if out_of_sample:\n forecastvalues = _arma_predict_out_of_sample(params, out_of_sample,\n resid, k_ar,\n self.k_ma,\n self.k_trend,\n self.k_exog, endog,\n exog, method=method)\n predictedvalues = np.r_[predictedvalues, forecastvalues]\n return predictedvalues\n predict.__doc__ = _arma_predict\n\n def loglike(self, params, set_sigma2=True):\n \"\"\"\n Compute the log-likelihood for ARMA(p,q) model\n\n Notes\n -----\n Likelihood used depends on the method set in fit\n \"\"\"\n method = self.method\n if method in ['mle', 'css-mle']:\n return self.loglike_kalman(params, set_sigma2)\n elif method == 'css':\n return self.loglike_css(params, set_sigma2)\n else:\n raise ValueError(\"Method %s not understood\" % method)\n\n def loglike_kalman(self, params, set_sigma2=True):\n \"\"\"\n Compute exact loglikelihood for ARMA(p,q) model by the Kalman Filter.\n \"\"\"\n return KalmanFilter.loglike(params, self, set_sigma2)\n\n def loglike_css(self, params, set_sigma2=True):\n \"\"\"\n Conditional Sum of Squares likelihood function.\n \"\"\"\n k_ar = self.k_ar\n k_ma = self.k_ma\n k = self.k_exog + self.k_trend\n y = self.endog.copy().astype(params.dtype)\n nobs = self.nobs\n # how to handle if empty?\n if self.transparams:\n newparams = self._transparams(params)\n else:\n newparams = params\n if k > 0:\n y -= dot(self.exog, newparams[:k])\n # the order of p determines how many zeros errors to set for lfilter\n b, a = np.r_[1, -newparams[k:k + k_ar]], np.r_[1, newparams[k + k_ar:]]\n zi = np.zeros((max(k_ar, k_ma)), dtype=params.dtype)\n for i in range(k_ar):\n zi[i] = sum(-b[:i + 1][::-1] * y[:i + 1])\n errors = lfilter(b, a, y, zi=zi)[0][k_ar:]\n\n ssr = np.dot(errors, errors)\n sigma2 = ssr/nobs\n if set_sigma2:\n self.sigma2 = sigma2\n llf = -nobs/2.*(log(2*pi) + log(sigma2)) - ssr/(2*sigma2)\n return llf\n\n def fit(self, start_params=None, trend='c', method=\"css-mle\",\n transparams=True, solver='lbfgs', maxiter=500, full_output=1,\n disp=5, callback=None, start_ar_lags=None, **kwargs):\n \"\"\"\n Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.\n\n Parameters\n ----------\n start_params : array-like, optional\n Starting parameters for ARMA(p,q). If None, the default is given\n by ARMA._fit_start_params. See there for more information.\n transparams : bool, optional\n Whether or not to transform the parameters to ensure stationarity.\n Uses the transformation suggested in Jones (1980). If False,\n no checking for stationarity or invertibility is done.\n method : str {'css-mle','mle','css'}\n This is the loglikelihood to maximize. If \"css-mle\", the\n conditional sum of squares likelihood is maximized and its values\n are used as starting values for the computation of the exact\n likelihood via the Kalman filter. If \"mle\", the exact likelihood\n is maximized via the Kalman Filter. If \"css\" the conditional sum\n of squares likelihood is maximized. All three methods use\n `start_params` as starting parameters. See above for more\n information.\n trend : str {'c','nc'}\n Whether to include a constant or not. 'c' includes constant,\n 'nc' no constant.\n solver : str or None, optional\n Solver to be used. The default is 'lbfgs' (limited memory\n Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',\n 'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -\n (conjugate gradient), 'ncg' (non-conjugate gradient), and\n 'powell'. By default, the limited memory BFGS uses m=12 to\n approximate the Hessian, projected gradient tolerance of 1e-8 and\n factr = 1e2. You can change these by using kwargs.\n maxiter : int, optional\n The maximum number of function evaluations. Default is 500.\n tol : float\n The convergence tolerance. Default is 1e-08.\n full_output : bool, optional\n If True, all output from solver will be available in\n the Results object's mle_retvals attribute. Output is dependent\n on the solver. See Notes for more information.\n disp : int, optional\n If True, convergence information is printed. For the default\n l_bfgs_b solver, disp controls the frequency of the output during\n the iterations. disp < 0 means no output in this case.\n callback : function, optional\n Called after each iteration as callback(xk) where xk is the current\n parameter vector.\n start_ar_lags : int, optional\n Parameter for fitting start_params. When fitting start_params,\n residuals are obtained from an AR fit, then an ARMA(p,q) model is\n fit via OLS using these residuals. If start_ar_lags is None, fit\n an AR process according to best BIC. If start_ar_lags is not None,\n fits an AR process with a lag length equal to start_ar_lags.\n See ARMA._fit_start_params_hr for more information.\n kwargs\n See Notes for keyword arguments that can be passed to fit.\n\n Returns\n -------\n statsmodels.tsa.arima_model.ARMAResults class\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModel.fit : for more information\n on using the solvers.\n ARMAResults : results class returned by fit\n\n Notes\n -----\n If fit by 'mle', it is assumed for the Kalman Filter that the initial\n unknown state is zero, and that the initial variance is\n P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,\n r, order = 'F')\n\n \"\"\"\n k_ar = self.k_ar\n k_ma = self.k_ma\n\n # enforce invertibility\n self.transparams = transparams\n\n endog, exog = self.endog, self.exog\n k_exog = self.k_exog\n self.nobs = len(endog) # this is overwritten if method is 'css'\n\n # (re)set trend and handle exogenous variables\n # always pass original exog\n k_trend, exog = _make_arma_exog(endog, self.exog, trend)\n\n # Check has something to estimate\n if k_ar == 0 and k_ma == 0 and k_trend == 0 and k_exog == 0:\n raise ValueError(\"Estimation requires the inclusion of least one \"\n \"AR term, MA term, a constant or an exogenous \"\n \"variable.\")\n\n # check again now that we know the trend\n _check_estimable(len(endog), k_ar + k_ma + k_exog + k_trend)\n\n self.k_trend = k_trend\n self.exog = exog # overwrites original exog from __init__\n\n # (re)set names for this model\n self.exog_names = _make_arma_names(self.data, k_trend, (k_ar, k_ma),\n self.exog_names)\n k = k_trend + k_exog\n\n # choose objective function\n if k_ma == 0 and k_ar == 0:\n method = \"css\" # Always CSS when no AR or MA terms\n\n self.method = method = method.lower()\n\n # adjust nobs for css\n if method == 'css':\n self.nobs = len(self.endog) - k_ar\n\n if start_params is not None:\n start_params = np.asarray(start_params)\n\n else: # estimate starting parameters\n start_params = self._fit_start_params((k_ar, k_ma, k), method,\n start_ar_lags)\n\n if transparams: # transform initial parameters to ensure invertibility\n start_params = self._invtransparams(start_params)\n\n if solver == 'lbfgs':\n kwargs.setdefault('pgtol', 1e-8)\n kwargs.setdefault('factr', 1e2)\n kwargs.setdefault('m', 12)\n kwargs.setdefault('approx_grad', True)\n mlefit = super(ARMA, self).fit(start_params, method=solver,\n maxiter=maxiter,\n full_output=full_output, disp=disp,\n callback=callback, **kwargs)\n params = mlefit.params\n\n if transparams: # transform parameters back\n params = self._transparams(params)\n\n self.transparams = False # so methods don't expect transf.\n\n normalized_cov_params = None # TODO: fix this\n armafit = ARMAResults(self, params, normalized_cov_params)\n armafit.mle_retvals = mlefit.mle_retvals\n armafit.mle_settings = mlefit.mle_settings\n return ARMAResultsWrapper(armafit)\n\n # base class of \"from_formula\" is \"class Model(object)\"\n @classmethod\n def from_formula(cls, formula, data, subset=None, drop_cols=None, *args, **kwargs):\n raise NotImplementedError(\"from_formula is not supported\"\n \" for ARMA models.\")\n\n\n#NOTE: the length of endog changes when we give a difference to fit\n#so model methods are not the same on unfit models as fit ones\n#starting to think that order of model should be put in instantiation...\nclass ARIMA(ARMA):\n __doc__ = tsbase._tsa_doc % {\"model\" : _arima_model,\n \"params\" : _arima_params, \"extra_params\" : \"\",\n \"extra_sections\" : _armax_notes %\n {\"Model\" : \"ARIMA\"}}\n\n def __new__(cls, endog, order, exog=None, dates=None, freq=None,\n missing='none'):\n p, d, q = order\n if d == 0: # then we just use an ARMA model\n return ARMA(endog, (p, q), exog, dates, freq, missing)\n else:\n mod = super(ARIMA, cls).__new__(cls)\n mod.__init__(endog, order, exog, dates, freq, missing)\n return mod\n\n def __getnewargs__(self):\n # using same defaults as in __init__\n dates = getattr(self, 'dates', None)\n freq = getattr(self, 'freq', None)\n missing = getattr(self, 'missing', 'none')\n return ((self.endog),\n (self.k_lags, self.k_diff, self.k_ma),\n self.exog, dates, freq, missing)\n\n def __init__(self, endog, order, exog=None, dates=None, freq=None,\n missing='none'):\n p, d, q = order\n if d > 2:\n #NOTE: to make more general, need to address the d == 2 stuff\n # in the predict method\n raise ValueError(\"d > 2 is not supported\")\n super(ARIMA, self).__init__(endog, (p, q), exog, dates, freq, missing)\n self.k_diff = d\n self._first_unintegrate = unintegrate_levels(self.endog[:d], d)\n self.endog = np.diff(self.endog, n=d)\n #NOTE: will check in ARMA but check again since differenced now\n _check_estimable(len(self.endog), p+q)\n if exog is not None:\n self.exog = self.exog[d:]\n if d == 1:\n self.data.ynames = 'D.' + self.endog_names\n else:\n self.data.ynames = 'D{0:d}.'.format(d) + self.endog_names\n # what about exog, should we difference it automatically before\n # super call?\n\n # Reset index\n orig_length = len(self._index)\n new_length = self.endog.shape[0]\n if self.data.row_labels is not None:\n self.data._cache['row_labels'] = (\n self.data.row_labels[orig_length - new_length:])\n if self._index is not None:\n if self._index_generated:\n self._index = self._index[:-(orig_length - new_length)]\n else:\n self._index = self._index[orig_length - new_length:]\n\n def _get_prediction_index(self, start, end, dynamic, index=None):\n method = getattr(self, 'method', 'mle')\n k_ar = getattr(self, 'k_ar', 0)\n k_diff = getattr(self, 'k_diff', 0)\n if start is None:\n if 'mle' in method and not dynamic:\n start = 0\n else:\n start = k_ar\n start = self._index[start]\n elif isinstance(start, (int, long, np.integer)):\n start -= k_diff\n if start < 0:\n raise ValueError('The start index %d of the original series '\n ' has been differenced away' % start)\n if isinstance(end, (int, long, np.integer)):\n end -= k_diff\n\n start, end, out_of_sample, prediction_index = (\n super(ARIMA, self)._get_prediction_index(start, end, index))\n\n # From _get_predict_end\n if 'mle' not in self.method and not dynamic:\n end -= k_ar\n\n # This replaces the _validate() call\n if 'mle' not in method and start < k_ar - k_diff:\n raise ValueError(\"Start must be >= k_ar for conditional \"\n \"MLE or dynamic forecast. Got %s\" % start)\n # Other validation\n _check_arima_start(start, k_ar, k_diff, method, dynamic)\n\n return start, end, out_of_sample, prediction_index\n\n def fit(self, start_params=None, trend='c', method=\"css-mle\",\n transparams=True, solver='lbfgs', maxiter=500, full_output=1,\n disp=5, callback=None, start_ar_lags=None, **kwargs):\n \"\"\"\n Fits ARIMA(p,d,q) model by exact maximum likelihood via Kalman filter.\n\n Parameters\n ----------\n start_params : array-like, optional\n Starting parameters for ARMA(p,q). If None, the default is given\n by ARMA._fit_start_params. See there for more information.\n transparams : bool, optional\n Whehter or not to transform the parameters to ensure stationarity.\n Uses the transformation suggested in Jones (1980). If False,\n no checking for stationarity or invertibility is done.\n method : str {'css-mle','mle','css'}\n This is the loglikelihood to maximize. If \"css-mle\", the\n conditional sum of squares likelihood is maximized and its values\n are used as starting values for the computation of the exact\n likelihood via the Kalman filter. If \"mle\", the exact likelihood\n is maximized via the Kalman Filter. If \"css\" the conditional sum\n of squares likelihood is maximized. All three methods use\n `start_params` as starting parameters. See above for more\n information.\n trend : str {'c','nc'}\n Whether to include a constant or not. 'c' includes constant,\n 'nc' no constant.\n solver : str or None, optional\n Solver to be used. The default is 'lbfgs' (limited memory\n Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',\n 'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -\n (conjugate gradient), 'ncg' (non-conjugate gradient), and\n 'powell'. By default, the limited memory BFGS uses m=12 to\n approximate the Hessian, projected gradient tolerance of 1e-8 and\n factr = 1e2. You can change these by using kwargs.\n maxiter : int, optional\n The maximum number of function evaluations. Default is 500.\n tol : float\n The convergence tolerance. Default is 1e-08.\n full_output : bool, optional\n If True, all output from solver will be available in\n the Results object's mle_retvals attribute. Output is dependent\n on the solver. See Notes for more information.\n disp : int, optional\n If True, convergence information is printed. For the default\n l_bfgs_b solver, disp controls the frequency of the output during\n the iterations. disp < 0 means no output in this case.\n callback : function, optional\n Called after each iteration as callback(xk) where xk is the current\n parameter vector.\n start_ar_lags : int, optional\n Parameter for fitting start_params. When fitting start_params,\n residuals are obtained from an AR fit, then an ARMA(p,q) model is\n fit via OLS using these residuals. If start_ar_lags is None, fit\n an AR process according to best BIC. If start_ar_lags is not None,\n fits an AR process with a lag length equal to start_ar_lags.\n See ARMA._fit_start_params_hr for more information.\n kwargs\n See Notes for keyword arguments that can be passed to fit.\n\n Returns\n -------\n `statsmodels.tsa.arima.ARIMAResults` class\n\n See Also\n --------\n statsmodels.base.model.LikelihoodModel.fit : for more information\n on using the solvers.\n ARIMAResults : results class returned by fit\n\n Notes\n -----\n If fit by 'mle', it is assumed for the Kalman Filter that the initial\n unknown state is zero, and that the initial variance is\n P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,\n r, order = 'F')\n\n \"\"\"\n mlefit = super(ARIMA, self).fit(start_params, trend,\n method, transparams, solver,\n maxiter, full_output, disp,\n callback, start_ar_lags, **kwargs)\n normalized_cov_params = None # TODO: fix this?\n arima_fit = ARIMAResults(self, mlefit._results.params,\n normalized_cov_params)\n arima_fit.k_diff = self.k_diff\n\n arima_fit.mle_retvals = mlefit.mle_retvals\n arima_fit.mle_settings = mlefit.mle_settings\n\n return ARIMAResultsWrapper(arima_fit)\n\n def predict(self, params, start=None, end=None, exog=None, typ='linear',\n dynamic=False):\n # go ahead and convert to an index for easier checking\n if isinstance(start, (string_types, datetime)):\n # start = _index_date(start, self.data.dates)\n start, _, _ = self._get_index_label_loc(start)\n if isinstance(start, slice):\n start = start.start\n # Adjustment since _index was already changed to fit the\n # differenced endog.\n start += self.k_diff\n if typ == 'linear':\n if not dynamic or (start != self.k_ar + self.k_diff and\n start is not None):\n return super(ARIMA, self).predict(params, start, end, exog,\n dynamic)\n else:\n # need to assume pre-sample residuals are zero\n # do this by a hack\n q = self.k_ma\n self.k_ma = 0\n predictedvalues = super(ARIMA, self).predict(params, start,\n end, exog,\n dynamic)\n self.k_ma = q\n return predictedvalues\n elif typ == 'levels':\n endog = self.data.endog\n if not dynamic:\n predict = super(ARIMA, self).predict(params, start, end, exog,\n dynamic)\n\n start, end, out_of_sample, _ = (\n self._get_prediction_index(start, end, dynamic))\n\n d = self.k_diff\n if 'mle' in self.method:\n start += d - 1 # for case where d == 2\n end += d - 1\n # add each predicted diff to lagged endog\n if out_of_sample:\n fv = predict[:-out_of_sample] + endog[start:end+1]\n if d == 2: #TODO: make a general solution to this\n fv += np.diff(endog[start - 1:end + 1])\n levels = unintegrate_levels(endog[-d:], d)\n fv = np.r_[fv,\n unintegrate(predict[-out_of_sample:],\n levels)[d:]]\n else:\n fv = predict + endog[start:end + 1]\n if d == 2:\n fv += np.diff(endog[start - 1:end + 1])\n else:\n k_ar = self.k_ar\n if out_of_sample:\n fv = (predict[:-out_of_sample] +\n endog[max(start, self.k_ar-1):end+k_ar+1])\n if d == 2:\n fv += np.diff(endog[start - 1:end + 1])\n levels = unintegrate_levels(endog[-d:], d)\n fv = np.r_[fv,\n unintegrate(predict[-out_of_sample:],\n levels)[d:]]\n else:\n fv = predict + endog[max(start, k_ar):end+k_ar+1]\n if d == 2:\n fv += np.diff(endog[start - 1:end + 1])\n else:\n #IFF we need to use pre-sample values assume pre-sample\n # residuals are zero, do this by a hack\n if start == self.k_ar + self.k_diff or start is None:\n # do the first k_diff+1 separately\n p = self.k_ar\n q = self.k_ma\n k_exog = self.k_exog\n k_trend = self.k_trend\n k_diff = self.k_diff\n (trendparam, exparams,\n arparams, maparams) = _unpack_params(params, (p, q),\n k_trend,\n k_exog,\n reverse=True)\n # this is the hack\n self.k_ma = 0\n\n predict = super(ARIMA, self).predict(params, start, end,\n exog, dynamic)\n if not start:\n start, _, _, _ = self._get_prediction_index(\n start, end, dynamic)\n start += k_diff\n self.k_ma = q\n return endog[start-1] + np.cumsum(predict)\n else:\n predict = super(ARIMA, self).predict(params, start, end,\n exog, dynamic)\n return endog[start-1] + np.cumsum(predict)\n return fv\n\n else: # pragma : no cover\n raise ValueError(\"typ %s not understood\" % typ)\n\n predict.__doc__ = _arima_predict\n\n\nclass ARMAResults(tsbase.TimeSeriesModelResults):\n \"\"\"\n Class to hold results from fitting an ARMA model.\n\n Parameters\n ----------\n model : ARMA instance\n The fitted model instance\n params : array\n Fitted parameters\n normalized_cov_params : array, optional\n The normalized variance covariance matrix\n scale : float, optional\n Optional argument to scale the variance covariance matrix.\n\n Returns\n -------\n **Attributes**\n\n aic : float\n Akaike Information Criterion\n :math:`-2*llf+2* df_model`\n where `df_model` includes all AR parameters, MA parameters, constant\n terms parameters on constant terms and the variance.\n arparams : array\n The parameters associated with the AR coefficients in the model.\n arroots : array\n The roots of the AR coefficients are the solution to\n (1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0\n Stability requires that the roots in modulus lie outside the unit\n circle.\n bic : float\n Bayes Information Criterion\n -2*llf + log(nobs)*df_model\n Where if the model is fit using conditional sum of squares, the\n number of observations `nobs` does not include the `p` pre-sample\n observations.\n bse : array\n The standard errors of the parameters. These are computed using the\n numerical Hessian.\n df_model : array\n The model degrees of freedom = `k_exog` + `k_trend` + `k_ar` + `k_ma`\n df_resid : array\n The residual degrees of freedom = `nobs` - `df_model`\n fittedvalues : array\n The predicted values of the model.\n hqic : float\n Hannan-Quinn Information Criterion\n -2*llf + 2*(`df_model`)*log(log(nobs))\n Like `bic` if the model is fit using conditional sum of squares then\n the `k_ar` pre-sample observations are not counted in `nobs`.\n k_ar : int\n The number of AR coefficients in the model.\n k_exog : int\n The number of exogenous variables included in the model. Does not\n include the constant.\n k_ma : int\n The number of MA coefficients.\n k_trend : int\n This is 0 for no constant or 1 if a constant is included.\n llf : float\n The value of the log-likelihood function evaluated at `params`.\n maparams : array\n The value of the moving average coefficients.\n maroots : array\n The roots of the MA coefficients are the solution to\n (1 + maparams[0]*z + maparams[1]*z**2 + ... + maparams[q-1]*z**q) = 0\n Stability requires that the roots in modules lie outside the unit\n circle.\n model : ARMA instance\n A reference to the model that was fit.\n nobs : float\n The number of observations used to fit the model. If the model is fit\n using exact maximum likelihood this is equal to the total number of\n observations, `n_totobs`. If the model is fit using conditional\n maximum likelihood this is equal to `n_totobs` - `k_ar`.\n n_totobs : float\n The total number of observations for `endog`. This includes all\n observations, even pre-sample values if the model is fit using `css`.\n params : array\n The parameters of the model. The order of variables is the trend\n coefficients and the `k_exog` exognous coefficients, then the\n `k_ar` AR coefficients, and finally the `k_ma` MA coefficients.\n pvalues : array\n The p-values associated with the t-values of the coefficients. Note\n that the coefficients are assumed to have a Student's T distribution.\n resid : array\n The model residuals. If the model is fit using 'mle' then the\n residuals are created via the Kalman Filter. If the model is fit\n using 'css' then the residuals are obtained via `scipy.signal.lfilter`\n adjusted such that the first `k_ma` residuals are zero. These zero\n residuals are not returned.\n scale : float\n This is currently set to 1.0 and not used by the model or its results.\n sigma2 : float\n The variance of the residuals. If the model is fit by 'css',\n sigma2 = ssr/nobs, where ssr is the sum of squared residuals. If\n the model is fit by 'mle', then sigma2 = 1/nobs * sum(v**2 / F)\n where v is the one-step forecast error and F is the forecast error\n variance. See `nobs` for the difference in definitions depending on the\n fit.\n \"\"\"\n _cache = {}\n\n #TODO: use this for docstring when we fix nobs issue\n\n def __init__(self, model, params, normalized_cov_params=None, scale=1.):\n super(ARMAResults, self).__init__(model, params, normalized_cov_params,\n scale)\n self.sigma2 = model.sigma2\n nobs = model.nobs\n self.nobs = nobs\n k_exog = model.k_exog\n self.k_exog = k_exog\n k_trend = model.k_trend\n self.k_trend = k_trend\n k_ar = model.k_ar\n self.k_ar = k_ar\n self.n_totobs = len(model.endog)\n k_ma = model.k_ma\n self.k_ma = k_ma\n df_model = k_exog + k_trend + k_ar + k_ma\n self._ic_df_model = df_model + 1\n self.df_model = df_model\n self.df_resid = self.nobs - df_model\n self._cache = {}\n\n @cache_readonly\n def arroots(self):\n return np.roots(np.r_[1, -self.arparams])**-1\n\n @cache_readonly\n def maroots(self):\n return np.roots(np.r_[1, self.maparams])**-1\n\n @cache_readonly\n def arfreq(self):\n r\"\"\"\n Returns the frequency of the AR roots.\n\n This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the\n roots.\n \"\"\"\n z = self.arroots\n return np.arctan2(z.imag, z.real) / (2*pi)\n\n @cache_readonly\n def mafreq(self):\n r\"\"\"\n Returns the frequency of the MA roots.\n\n This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the\n roots.\n \"\"\"\n z = self.maroots\n return np.arctan2(z.imag, z.real) / (2*pi)\n\n @cache_readonly\n def arparams(self):\n k = self.k_exog + self.k_trend\n return self.params[k:k+self.k_ar]\n\n @cache_readonly\n def maparams(self):\n k = self.k_exog + self.k_trend\n k_ar = self.k_ar\n return self.params[k+k_ar:]\n\n @cache_readonly\n def llf(self):\n return self.model.loglike(self.params)\n\n @cache_readonly\n def bse(self):\n params = self.params\n hess = self.model.hessian(params)\n if len(params) == 1: # can't take an inverse, ensure 1d\n return np.sqrt(-1./hess[0])\n return np.sqrt(np.diag(-inv(hess)))\n\n def cov_params(self): # add scale argument?\n params = self.params\n hess = self.model.hessian(params)\n return -inv(hess)\n\n @cache_readonly\n def aic(self):\n return -2 * self.llf + 2 * self._ic_df_model\n\n @cache_readonly\n def bic(self):\n nobs = self.nobs\n return -2 * self.llf + np.log(nobs) * self._ic_df_model\n\n @cache_readonly\n def hqic(self):\n nobs = self.nobs\n return -2 * self.llf + 2 * np.log(np.log(nobs)) * self._ic_df_model\n\n @cache_readonly\n def fittedvalues(self):\n model = self.model\n endog = model.endog.copy()\n k_ar = self.k_ar\n exog = model.exog # this is a copy\n if exog is not None:\n if model.method == \"css\" and k_ar > 0:\n exog = exog[k_ar:]\n if model.method == \"css\" and k_ar > 0:\n endog = endog[k_ar:]\n fv = endog - self.resid\n # add deterministic part back in\n #k = self.k_exog + self.k_trend\n #TODO: this needs to be commented out for MLE with constant\n #if k != 0:\n # fv += dot(exog, self.params[:k])\n return fv\n\n @cache_readonly\n def resid(self):\n return self.model.geterrors(self.params)\n\n @cache_readonly\n def pvalues(self):\n #TODO: same for conditional and unconditional?\n df_resid = self.df_resid\n return t.sf(np.abs(self.tvalues), df_resid) * 2\n\n def predict(self, start=None, end=None, exog=None, dynamic=False):\n return self.model.predict(self.params, start, end, exog, dynamic)\n predict.__doc__ = _arma_results_predict\n\n def _forecast_error(self, steps):\n sigma2 = self.sigma2\n ma_rep = arma2ma(np.r_[1, -self.arparams],\n np.r_[1, self.maparams], lags=steps)\n\n fcasterr = np.sqrt(sigma2 * np.cumsum(ma_rep**2))\n return fcasterr\n\n def _forecast_conf_int(self, forecast, fcasterr, alpha):\n const = norm.ppf(1 - alpha / 2.)\n conf_int = np.c_[forecast - const * fcasterr,\n forecast + const * fcasterr]\n\n return conf_int\n\n def forecast(self, steps=1, exog=None, alpha=.05):\n \"\"\"\n Out-of-sample forecasts\n\n Parameters\n ----------\n steps : int\n The number of out of sample forecasts from the end of the\n sample.\n exog : array\n If the model is an ARMAX, you must provide out of sample\n values for the exogenous variables. This should not include\n the constant.\n alpha : float\n The confidence intervals for the forecasts are (1 - alpha) %\n\n Returns\n -------\n forecast : array\n Array of out of sample forecasts\n stderr : array\n Array of the standard error of the forecasts.\n conf_int : array\n 2d array of the confidence interval for the forecast\n \"\"\"\n if exog is not None:\n #TODO: make a convenience function for this. we're using the\n # pattern elsewhere in the codebase\n exog = np.asarray(exog)\n if self.k_exog == 1 and exog.ndim == 1:\n exog = exog[:, None]\n elif exog.ndim == 1:\n if len(exog) != self.k_exog:\n raise ValueError(\"1d exog given and len(exog) != k_exog\")\n exog = exog[None, :]\n if exog.shape[0] != steps:\n raise ValueError(\"new exog needed for each step\")\n if self.k_exog != exog.shape[1]:\n raise ValueError('exog must contain the same number of '\n 'variables as in the estimated model.')\n # prepend in-sample exog observations\n if self.k_ar > 0:\n exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],\n exog))\n else:\n if self.k_exog:\n raise ValueError('Forecast values for exog are required when '\n 'the model contains exogenous regressors.')\n\n\n\n forecast = _arma_predict_out_of_sample(self.params,\n steps, self.resid, self.k_ar,\n self.k_ma, self.k_trend,\n self.k_exog, self.model.endog,\n exog, method=self.model.method)\n\n # compute the standard errors\n fcasterr = self._forecast_error(steps)\n conf_int = self._forecast_conf_int(forecast, fcasterr, alpha)\n\n return forecast, fcasterr, conf_int\n\n def summary(self, alpha=.05):\n \"\"\"Summarize the Model\n\n Parameters\n ----------\n alpha : float, optional\n Significance level for the confidence intervals.\n\n Returns\n -------\n smry : Summary instance\n This holds the summary table and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary.Summary\n \"\"\"\n from statsmodels.iolib.summary import Summary\n model = self.model\n title = model.__class__.__name__ + ' Model Results'\n method = model.method\n # get sample TODO: make better sample machinery for estimation\n k_diff = getattr(self, 'k_diff', 0)\n if 'mle' in method:\n start = k_diff\n else:\n start = k_diff + self.k_ar\n if self.data.dates is not None:\n dates = self.data.dates\n sample = [dates[start].strftime('%m-%d-%Y')]\n sample += ['- ' + dates[-1].strftime('%m-%d-%Y')]\n else:\n sample = str(start) + ' - ' + str(len(self.data.orig_endog))\n\n k_ar, k_ma = self.k_ar, self.k_ma\n if not k_diff:\n order = str((k_ar, k_ma))\n else:\n order = str((k_ar, k_diff, k_ma))\n top_left = [('Dep. Variable:', None),\n ('Model:', [model.__class__.__name__ + order]),\n ('Method:', [method]),\n ('Date:', None),\n ('Time:', None),\n ('Sample:', [sample[0]]),\n ('', [sample[1]])\n ]\n\n top_right = [\n ('No. Observations:', [str(len(self.model.endog))]),\n ('Log Likelihood', [\"%#5.3f\" % self.llf]),\n ('S.D. of innovations', [\"%#5.3f\" % self.sigma2**.5]),\n ('AIC', [\"%#5.3f\" % self.aic]),\n ('BIC', [\"%#5.3f\" % self.bic]),\n ('HQIC', [\"%#5.3f\" % self.hqic])]\n\n smry = Summary()\n smry.add_table_2cols(self, gleft=top_left, gright=top_right,\n title=title)\n smry.add_table_params(self, alpha=alpha, use_t=False)\n\n # Make the roots table\n from statsmodels.iolib.table import SimpleTable\n\n if k_ma and k_ar:\n arstubs = [\"AR.%d\" % i for i in range(1, k_ar + 1)]\n mastubs = [\"MA.%d\" % i for i in range(1, k_ma + 1)]\n stubs = arstubs + mastubs\n roots = np.r_[self.arroots, self.maroots]\n freq = np.r_[self.arfreq, self.mafreq]\n elif k_ma:\n mastubs = [\"MA.%d\" % i for i in range(1, k_ma + 1)]\n stubs = mastubs\n roots = self.maroots\n freq = self.mafreq\n elif k_ar:\n arstubs = [\"AR.%d\" % i for i in range(1, k_ar + 1)]\n stubs = arstubs\n roots = self.arroots\n freq = self.arfreq\n else: # 0,0 model\n stubs = []\n if len(stubs): # not 0, 0\n modulus = np.abs(roots)\n data = np.column_stack((roots.real, roots.imag, modulus, freq))\n roots_table = SimpleTable([('%17.4f' % row[0],\n '%+17.4fj' % row[1],\n '%17.4f' % row[2],\n '%17.4f' % row[3]) for row in data],\n headers=[' Real',\n ' Imaginary',\n ' Modulus',\n ' Frequency'],\n title=\"Roots\",\n stubs=stubs)\n\n smry.tables.append(roots_table)\n return smry\n\n def summary2(self, title=None, alpha=.05, float_format=\"%.4f\"):\n \"\"\"Experimental summary function for ARIMA Results\n\n Parameters\n ----------\n title : string, optional\n Title for the top table. If not None, then this replaces the\n default title\n alpha : float\n significance level for the confidence intervals\n float_format: string\n print format for floats in parameters summary\n\n Returns\n -------\n smry : Summary instance\n This holds the summary table and text, which can be printed or\n converted to various output formats.\n\n See Also\n --------\n statsmodels.iolib.summary2.Summary : class to hold summary\n results\n\n \"\"\"\n from pandas import DataFrame\n # get sample TODO: make better sample machinery for estimation\n k_diff = getattr(self, 'k_diff', 0)\n if 'mle' in self.model.method:\n start = k_diff\n else:\n start = k_diff + self.k_ar\n if self.data.dates is not None:\n dates = self.data.dates\n sample = [dates[start].strftime('%m-%d-%Y')]\n sample += [dates[-1].strftime('%m-%d-%Y')]\n else:\n sample = str(start) + ' - ' + str(len(self.data.orig_endog))\n\n k_ar, k_ma = self.k_ar, self.k_ma\n\n # Roots table\n if k_ma and k_ar:\n arstubs = [\"AR.%d\" % i for i in range(1, k_ar + 1)]\n mastubs = [\"MA.%d\" % i for i in range(1, k_ma + 1)]\n stubs = arstubs + mastubs\n roots = np.r_[self.arroots, self.maroots]\n freq = np.r_[self.arfreq, self.mafreq]\n elif k_ma:\n mastubs = [\"MA.%d\" % i for i in range(1, k_ma + 1)]\n stubs = mastubs\n roots = self.maroots\n freq = self.mafreq\n elif k_ar:\n arstubs = [\"AR.%d\" % i for i in range(1, k_ar + 1)]\n stubs = arstubs\n roots = self.arroots\n freq = self.arfreq\n else: # 0, 0 order\n stubs = []\n\n if len(stubs):\n modulus = np.abs(roots)\n data = np.column_stack((roots.real, roots.imag, modulus, freq))\n data = DataFrame(data)\n data.columns = ['Real', 'Imaginary', 'Modulus', 'Frequency']\n data.index = stubs\n\n # Summary\n from statsmodels.iolib import summary2\n smry = summary2.Summary()\n\n # Model info\n model_info = summary2.summary_model(self)\n model_info['Method:'] = self.model.method\n model_info['Sample:'] = sample[0]\n model_info[' '] = sample[-1]\n model_info['S.D. of innovations:'] = \"%#5.3f\" % self.sigma2**.5\n model_info['HQIC:'] = \"%#5.3f\" % self.hqic\n model_info['No. Observations:'] = str(len(self.model.endog))\n\n # Parameters\n params = summary2.summary_params(self)\n smry.add_dict(model_info)\n smry.add_df(params, float_format=float_format)\n if len(stubs):\n smry.add_df(data, float_format=\"%17.4f\")\n smry.add_title(results=self, title=title)\n\n return smry\n\n def plot_predict(self, start=None, end=None, exog=None, dynamic=False,\n alpha=.05, plot_insample=True, ax=None):\n from statsmodels.graphics.utils import _import_mpl, create_mpl_ax\n _ = _import_mpl()\n fig, ax = create_mpl_ax(ax)\n\n # use predict so you set dates\n forecast = self.predict(start, end, exog, dynamic)\n # doing this twice. just add a plot keyword to predict?\n start, end, out_of_sample, _ = (\n self.model._get_prediction_index(start, end, dynamic=False))\n\n if out_of_sample:\n steps = out_of_sample\n fc_error = self._forecast_error(steps)\n conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,\n alpha)\n\n if hasattr(self.data, \"predict_dates\"):\n from pandas import Series\n forecast = Series(forecast, index=self.data.predict_dates)\n ax = forecast.plot(ax=ax, label='forecast')\n else:\n ax.plot(forecast)\n\n x = ax.get_lines()[-1].get_xdata()\n if out_of_sample:\n label = \"{0:.0%} confidence interval\".format(1 - alpha)\n ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],\n color='gray', alpha=.5, label=label)\n\n if plot_insample:\n ax.plot(x[:end + 1 - start], self.model.endog[start:end+1],\n label=self.model.endog_names)\n\n ax.legend(loc='best')\n\n return fig\n plot_predict.__doc__ = _plot_predict\n\n\nclass ARMAResultsWrapper(wrap.ResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {}\n _wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(ARMAResultsWrapper, ARMAResults) # noqa:E305\n\n\nclass ARIMAResults(ARMAResults):\n def predict(self, start=None, end=None, exog=None, typ='linear',\n dynamic=False):\n return self.model.predict(self.params, start, end, exog, typ, dynamic)\n predict.__doc__ = _arima_results_predict\n\n def _forecast_error(self, steps):\n sigma2 = self.sigma2\n ma_rep = arma2ma(np.r_[1, -self.arparams],\n np.r_[1, self.maparams], lags=steps)\n\n fcerr = np.sqrt(np.cumsum(cumsum_n(ma_rep, self.k_diff)**2)*sigma2)\n return fcerr\n\n def _forecast_conf_int(self, forecast, fcerr, alpha):\n const = norm.ppf(1 - alpha/2.)\n conf_int = np.c_[forecast - const*fcerr, forecast + const*fcerr]\n return conf_int\n\n def forecast(self, steps=1, exog=None, alpha=.05):\n \"\"\"\n Out-of-sample forecasts\n\n Parameters\n ----------\n steps : int\n The number of out of sample forecasts from the end of the\n sample.\n exog : array\n If the model is an ARIMAX, you must provide out of sample\n values for the exogenous variables. This should not include\n the constant.\n alpha : float\n The confidence intervals for the forecasts are (1 - alpha) %\n\n Returns\n -------\n forecast : array\n Array of out of sample forecasts\n stderr : array\n Array of the standard error of the forecasts.\n conf_int : array\n 2d array of the confidence interval for the forecast\n\n Notes\n -----\n Prediction is done in the levels of the original endogenous variable.\n If you would like prediction of differences in levels use `predict`.\n \"\"\"\n if exog is not None:\n if self.k_exog == 1 and exog.ndim == 1:\n exog = exog[:, None]\n if exog.shape[0] != steps:\n raise ValueError(\"new exog needed for each step\")\n if self.k_exog != exog.shape[1]:\n raise ValueError('exog must contain the same number of '\n 'variables as in the estimated model.')\n # prepend in-sample exog observations\n if self.k_ar > 0:\n exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],\n exog))\n else:\n if self.k_exog:\n raise ValueError('Forecast values for exog are required when '\n 'the model contains exogenous regressors.')\n\n forecast = _arma_predict_out_of_sample(self.params, steps, self.resid,\n self.k_ar, self.k_ma,\n self.k_trend, self.k_exog,\n self.model.endog,\n exog, method=self.model.method)\n\n d = self.k_diff\n endog = self.model.data.endog[-d:]\n forecast = unintegrate(forecast, unintegrate_levels(endog, d))[d:]\n\n # get forecast errors\n fcerr = self._forecast_error(steps)\n conf_int = self._forecast_conf_int(forecast, fcerr, alpha)\n return forecast, fcerr, conf_int\n\n def plot_predict(self, start=None, end=None, exog=None, dynamic=False,\n alpha=.05, plot_insample=True, ax=None):\n from statsmodels.graphics.utils import _import_mpl, create_mpl_ax\n _ = _import_mpl()\n fig, ax = create_mpl_ax(ax)\n\n # use predict so you set dates\n forecast = self.predict(start, end, exog, 'levels', dynamic)\n # doing this twice. just add a plot keyword to predict?\n start, end, out_of_sample, _ = (\n self.model._get_prediction_index(start, end, dynamic))\n\n if out_of_sample:\n steps = out_of_sample\n fc_error = self._forecast_error(steps)\n conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,\n alpha)\n\n if hasattr(self.data, \"predict_dates\"):\n from pandas import Series\n forecast = Series(forecast, index=self.data.predict_dates)\n ax = forecast.plot(ax=ax, label='forecast')\n else:\n ax.plot(forecast)\n\n x = ax.get_lines()[-1].get_xdata()\n if out_of_sample:\n label = \"{0:.0%} confidence interval\".format(1 - alpha)\n ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],\n color='gray', alpha=.5, label=label)\n\n if plot_insample:\n import re\n k_diff = self.k_diff\n label = re.sub(r\"D\\d*\\.\", \"\", self.model.endog_names)\n levels = unintegrate(self.model.endog,\n self.model._first_unintegrate)\n ax.plot(x[:end + 1 - start],\n levels[start + k_diff:end + k_diff + 1], label=label)\n\n ax.legend(loc='best')\n\n return fig\n\n plot_predict.__doc__ = _arima_plot_predict\n\n\nclass ARIMAResultsWrapper(ARMAResultsWrapper):\n pass\nwrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults) # noqa:E305\n\n\nif __name__ == \"__main__\":\n import statsmodels.api as sm\n\n # simulate arma process\n from statsmodels.tsa.arima_process import arma_generate_sample\n y = arma_generate_sample([1., -.75], [1., .25], nsample=1000)\n arma = ARMA(y)\n res = arma.fit(trend='nc', order=(1, 1))\n\n np.random.seed(12345)\n y_arma22 = arma_generate_sample([1., -.85, .35], [1, .25, -.9],\n nsample=1000)\n arma22 = ARMA(y_arma22)\n res22 = arma22.fit(trend='nc', order=(2, 2))\n\n # test CSS\n arma22_css = ARMA(y_arma22)\n res22css = arma22_css.fit(trend='nc', order=(2, 2), method='css')\n\n data = sm.datasets.sunspots.load(as_pandas=False)\n ar = ARMA(data.endog)\n resar = ar.fit(trend='nc', order=(9, 0))\n\n y_arma31 = arma_generate_sample([1, -.75, -.35, .25], [.1],\n nsample=1000)\n\n arma31css = ARMA(y_arma31)\n res31css = arma31css.fit(order=(3, 1), method=\"css\", trend=\"nc\",\n transparams=True)\n\n y_arma13 = arma_generate_sample([1., -.75], [1, .25, -.5, .8],\n nsample=1000)\n arma13css = ARMA(y_arma13)\n res13css = arma13css.fit(order=(1, 3), method='css', trend='nc')\n\n# check css for p < q and q < p\n y_arma41 = arma_generate_sample([1., -.75, .35, .25, -.3], [1, -.35],\n nsample=1000)\n arma41css = ARMA(y_arma41)\n res41css = arma41css.fit(order=(4, 1), trend='nc', method='css')\n\n y_arma14 = arma_generate_sample([1, -.25], [1., -.75, .35, .25, -.3],\n nsample=1000)\n arma14css = ARMA(y_arma14)\n res14css = arma14css.fit(order=(4, 1), trend='nc', method='css')\n\n # ARIMA Model\n from statsmodels.datasets import webuse\n dta = webuse('wpi1')\n wpi = dta['wpi']\n\n mod = ARIMA(wpi, (1, 1, 1)).fit()\n" ]
[ [ "numpy.dot", "scipy.stats.norm.ppf", "numpy.sqrt", "pandas.Series", "scipy.optimize.fmin_l_bfgs_b", "numpy.asarray", "numpy.cumsum", "pandas.DataFrame", "numpy.arctan2", "numpy.zeros_like", "numpy.roots", "numpy.diff", "numpy.column_stack", "scipy.signal.lfilter", "numpy.zeros", "numpy.log", "numpy.linalg.inv", "numpy.ndim", "numpy.array", "numpy.abs", "numpy.random.seed", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
mraspaud/rioxarray
[ "1f3ed6c5db2475a0d9b9aa9c4985c1b0c558c6e6" ]
[ "test/integration/test_integration_rioxarray.py" ]
[ "import json\nimport os\nimport platform\nimport threading\nfrom functools import partial\n\nimport dask.array as da\nimport numpy\nimport pytest\nimport rasterio\nimport xarray\nfrom affine import Affine\nfrom dask.delayed import Delayed\nfrom numpy.testing import assert_almost_equal, assert_array_equal\nfrom packaging import version\nfrom pyproj import CRS as pCRS\nfrom pyproj import Transformer\nfrom rasterio.control import GroundControlPoint\nfrom rasterio.crs import CRS\nfrom rasterio.windows import Window\n\nimport rioxarray\nfrom rioxarray.exceptions import (\n DimensionMissingCoordinateError,\n MissingCRS,\n MissingSpatialDimensionError,\n NoDataInBounds,\n OneDimensionalRaster,\n RioXarrayError,\n)\nfrom rioxarray.rioxarray import _make_coords\nfrom test.conftest import (\n PYPROJ_LT_3,\n RASTERIO_EQ_122,\n RASTERIO_LT_122,\n TEST_COMPARE_DATA_DIR,\n TEST_INPUT_DATA_DIR,\n _assert_xarrays_equal,\n open_rasterio_engine,\n)\n\ntry:\n import scipy\n\n SCIPY_VERSION = scipy.__version__\n SCIPY_INSTALLED = True\nexcept ModuleNotFoundError:\n SCIPY_VERSION = \"0.0.0\"\n SCIPY_INSTALLED = False\n\n\[email protected](\n params=[\n xarray.open_dataset,\n xarray.open_dataarray,\n open_rasterio_engine,\n ]\n)\ndef modis_reproject(request):\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_UTM.nc\"),\n to_proj=\"+datum=WGS84 +no_defs +proj=utm +units=m +zone=15\",\n open=request.param,\n )\n\n\[email protected]\ndef modis_reproject_3d():\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"PLANET_SCOPE_WGS84.nc\"),\n to_proj=\"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\",\n )\n\n\[email protected](\n params=[\n xarray.open_dataset,\n xarray.open_dataarray,\n open_rasterio_engine,\n ]\n)\ndef interpolate_na(request):\n pytest.importorskip(\"scipy\")\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_INTERPOLATE.nc\"),\n open=request.param,\n )\n\n\[email protected]\ndef interpolate_na_3d():\n pytest.importorskip(\"scipy\")\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"PLANET_SCOPE_3D_INTERPOLATE.nc\"),\n )\n\n\[email protected](\n params=[\n xarray.open_dataset,\n xarray.open_dataarray,\n open_rasterio_engine,\n ]\n)\ndef interpolate_na_filled(request):\n pytest.importorskip(\"scipy\")\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"),\n compare=os.path.join(\n TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_INTERPOLATE_FILLED.nc\"\n ),\n open=request.param,\n )\n\n\[email protected]\ndef interpolate_na_veris():\n pytest.importorskip(\"scipy\")\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"veris.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"veris_interpolate.nc\"),\n )\n\n\[email protected](\n params=[\n xarray.open_dataarray,\n rioxarray.open_rasterio,\n xarray.open_dataset,\n open_rasterio_engine,\n ]\n)\ndef interpolate_na_nan(request):\n pytest.importorskip(\"scipy\")\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_INTERPOLATE_NAN.nc\"),\n open=request.param,\n )\n\n\[email protected](\n params=[\n xarray.open_dataset,\n xarray.open_dataarray,\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n open_rasterio_engine,\n ]\n)\ndef modis_reproject_match(request):\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_MATCH_UTM.nc\"),\n match=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY_MATCH.nc\"),\n open=request.param,\n )\n\n\[email protected](\n params=[\n xarray.open_dataset,\n xarray.open_dataarray,\n rioxarray.open_rasterio,\n open_rasterio_engine,\n ]\n)\ndef modis_reproject_match_coords(request):\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_MATCH_UTM.nc\"),\n match=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY_MATCH.nc\"),\n open=request.param,\n )\n\n\[email protected](\n params=[\n xarray.open_dataset,\n xarray.open_dataarray,\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n open_rasterio_engine,\n ]\n)\ndef modis_reproject_match__passed_nodata(request):\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"),\n compare=os.path.join(\n TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_MATCH_PASSED_NODATA.nc\"\n ),\n match=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY_MATCH.nc\"),\n open=request.param,\n )\n\n\ndef _mod_attr(input_xr, attr, val=None, remove=False):\n if hasattr(input_xr, \"variables\"):\n for var in input_xr.rio.vars:\n _mod_attr(input_xr[var], attr, val=val, remove=remove)\n else:\n if remove:\n input_xr.attrs.pop(attr, None)\n else:\n input_xr.attrs[attr] = val\n\n\ndef _get_attr(input_xr, attr):\n if hasattr(input_xr, \"variables\"):\n return input_xr[input_xr.rio.vars.pop()].attrs[attr]\n return input_xr.attrs[attr]\n\n\ndef _del_attr(input_xr, attr):\n _mod_attr(input_xr, attr, remove=True)\n\n\[email protected](\n params=[\n xarray.open_dataset,\n xarray.open_dataarray,\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n open_rasterio_engine,\n ]\n)\ndef modis_clip(request, tmpdir):\n return dict(\n input=os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"),\n compare=os.path.join(TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_CLIP.nc\"),\n compare_expand=os.path.join(\n TEST_COMPARE_DATA_DIR, \"MODIS_ARRAY_CLIP_EXPAND.nc\"\n ),\n open=request.param,\n output=str(tmpdir.join(\"MODIS_CLIP_DUMP.nc\")),\n )\n\n\ndef test_pad_box(modis_clip):\n if isinstance(modis_clip[\"open\"], partial):\n # SKIP: parse_coodinates=False is not supported\n return\n with modis_clip[\"open\"](modis_clip[\"input\"]) as xdi:\n # first, clip\n clipped_ds = xdi.rio.clip_box(\n minx=xdi.x[4].values,\n miny=xdi.y[6].values,\n maxx=xdi.x[6].values,\n maxy=xdi.y[4].values,\n )\n # then, extend back to original\n padded_ds = clipped_ds.rio.pad_box(\n minx=xdi.x[0].values,\n miny=xdi.y[-1].values,\n maxx=xdi.x[-1].values,\n maxy=xdi.y[0].values,\n )\n # check the nodata value\n try:\n nodata = padded_ds[padded_ds.rio.vars[0]].rio.nodata\n if nodata is not None and not numpy.isnan(nodata):\n assert all(\n [padded_ds[padded_ds.rio.vars[0]].isel(x=0, y=0).values == nodata]\n )\n else:\n assert all(\n numpy.isnan(\n [padded_ds[padded_ds.rio.vars[0]].isel(x=0, y=0).values]\n )\n )\n except AttributeError:\n if padded_ds.rio.nodata is not None and not numpy.isnan(\n padded_ds.rio.nodata\n ):\n assert all([padded_ds.isel(x=0, y=0).values == padded_ds.rio.nodata])\n else:\n assert all(numpy.isnan([padded_ds.isel(x=0, y=0).values]))\n # finally, clip again\n clipped_ds2 = padded_ds.rio.clip_box(\n minx=xdi.x[4].values,\n miny=xdi.y[6].values,\n maxx=xdi.x[6].values,\n maxy=xdi.y[4].values,\n )\n _assert_xarrays_equal(clipped_ds, clipped_ds2)\n # padded data should have the same size as original data\n if hasattr(xdi, \"variables\"):\n for var in xdi.rio.vars:\n assert_almost_equal(\n xdi[var].rio._cached_transform(),\n padded_ds[var].rio._cached_transform(),\n )\n for padded_size, original_size in zip(\n padded_ds[var].shape, xdi[var].shape\n ):\n assert padded_size == original_size\n else:\n assert_almost_equal(\n xdi.rio._cached_transform(), padded_ds.rio._cached_transform()\n )\n for padded_size, original_size in zip(padded_ds.shape, xdi.shape):\n assert padded_size == original_size\n # make sure it safely writes to netcdf\n padded_ds.to_netcdf(modis_clip[\"output\"])\n\n\ndef test_clip_box(modis_clip):\n with modis_clip[\"open\"](modis_clip[\"input\"]) as xdi, modis_clip[\"open\"](\n modis_clip[\"compare\"]\n ) as xdc:\n clipped_ds = xdi.rio.clip_box(\n minx=-7272967.195874103, # xdi.x[4].values,\n miny=5048602.8438240355, # xdi.y[6].values,\n maxx=-7272503.8831575755, # xdi.x[6].values,\n maxy=5049066.156540562, # xdi.y[4].values,\n )\n assert xdi.rio._cached_transform() != clipped_ds.rio._cached_transform()\n var = \"__xarray_dataarray_variable__\"\n try:\n clipped_ds_values = clipped_ds[var].values\n except KeyError:\n clipped_ds_values = clipped_ds.values\n try:\n xdc_values = xdc[var].values\n except KeyError:\n xdc_values = xdc.values\n assert_almost_equal(clipped_ds_values, xdc_values)\n assert_almost_equal(clipped_ds.rio.transform(), xdc.rio.transform())\n # make sure it safely writes to netcdf\n clipped_ds.to_netcdf(modis_clip[\"output\"])\n\n\ndef test_clip_box__auto_expand(modis_clip):\n with modis_clip[\"open\"](modis_clip[\"input\"]) as xdi, modis_clip[\"open\"](\n modis_clip[\"compare_expand\"]\n ) as xdc:\n clipped_ds = xdi.rio.clip_box(\n minx=-7272735.53951584, # xdi.x[5].values\n miny=5048834.500182299, # xdi.y[5].values\n maxx=-7272735.53951584, # xdi.x[5].values\n maxy=5048834.500182299, # xdi.y[5].values\n auto_expand=True,\n )\n assert xdi.rio._cached_transform() != clipped_ds.rio._cached_transform()\n var = \"__xarray_dataarray_variable__\"\n try:\n clipped_ds_values = clipped_ds[var].values\n except KeyError:\n clipped_ds_values = clipped_ds.values\n try:\n xdc_values = xdc[var].values\n except KeyError:\n xdc_values = xdc.values\n assert_almost_equal(clipped_ds_values, xdc_values)\n assert_almost_equal(clipped_ds.rio.transform(), xdc.rio.transform())\n # make sure it safely writes to netcdf\n clipped_ds.to_netcdf(modis_clip[\"output\"])\n\n\ndef test_clip_box__nodata_error(modis_clip):\n with modis_clip[\"open\"](modis_clip[\"input\"]) as xdi:\n var_match = \"\"\n if hasattr(xdi, \"name\") and xdi.name:\n var_match = \" Data variable: __xarray_dataarray_variable__\"\n if RASTERIO_LT_122:\n expected_exception = NoDataInBounds\n else:\n expected_exception = rasterio.errors.WindowError\n var_match = \"Bounds and transform are inconsistent\"\n\n with pytest.raises(expected_exception, match=var_match):\n xdi.rio.clip_box(\n minx=-8272735.53951584, # xdi.x[5].values\n miny=8048371.187465771, # xdi.y[7].values\n maxx=-8272967.195874103, # xdi.x[4].values\n maxy=8048834.500182299, # xdi.y[5].values\n )\n\n\ndef test_clip_box__one_dimension_error(modis_clip):\n with modis_clip[\"open\"](modis_clip[\"input\"]) as xdi:\n var_match = \"\"\n if hasattr(xdi, \"name\") and xdi.name:\n var_match = \" Data variable: __xarray_dataarray_variable__\"\n if RASTERIO_EQ_122:\n expected_exception = rasterio.errors.WindowError\n var_match = \"Bounds and transform are inconsistent\"\n else:\n expected_exception = OneDimensionalRaster\n var_match = (\n \"At least one of the clipped raster x,y coordinates has \"\n f\"only one point.{var_match}\"\n )\n # test exception after raster clipped\n with pytest.raises(\n expected_exception,\n match=var_match,\n ):\n xdi.rio.clip_box(\n minx=-7272735.53951584, # xdi.x[5].values\n miny=5048834.500182299, # xdi.y[5].values\n maxx=-7272735.53951584, # xdi.x[5].values\n maxy=5048834.500182299, # xdi.y[5].values\n )\n # test exception before raster clipped\n with pytest.raises(OneDimensionalRaster):\n xdi.isel(x=slice(5, 6), y=slice(5, 6)).rio.clip_box(\n minx=-7272735.53951584, # xdi.x[5].values\n miny=5048371.187465771, # xdi.y[7].values\n maxx=-7272272.226799311, # xdi.x[7].values\n maxy=5048834.500182299, # xdi.y[5].values\n )\n\n\ndef test_clip_box__nodata_in_bounds():\n xds = rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"clip_bbox__out_of_bounds.tif\")\n )\n with pytest.raises(NoDataInBounds):\n xds.rio.clip_box(\n *Transformer.from_crs(\n xds.rio.crs, \"EPSG:4326\", always_xy=True\n ).transform_bounds(\n 135.7821043660001123,\n -17.1608065079999506,\n 135.7849362810001139,\n -17.1580839999999739,\n )\n )\n\n with pytest.raises(NoDataInBounds):\n xds.rio.reproject(\"EPSG:4326\").rio.clip_box(\n 135.7821043660001123,\n -17.1608065079999506,\n 135.7849362810001139,\n -17.1580839999999739,\n )\n\n\ndef test_slice_xy(modis_clip):\n if isinstance(modis_clip[\"open\"], partial):\n # SKIP: parse_coodinates=False is not supported\n return\n with modis_clip[\"open\"](modis_clip[\"input\"]) as xdi, modis_clip[\"open\"](\n modis_clip[\"compare\"]\n ) as xdc:\n clipped_ds = xdi.rio.slice_xy(\n minx=-7272967.195874103, # xdi.x[4].values,\n miny=5048602.8438240355, # xdi.y[6].values,\n maxx=-7272503.8831575755, # xdi.x[6].values,\n maxy=5049297.812898826, # xdi.y[4].values - resolution_y,\n )\n assert xdi.rio._cached_transform() != clipped_ds.rio._cached_transform()\n var = \"__xarray_dataarray_variable__\"\n try:\n clipped_ds_values = clipped_ds[var].values\n except KeyError:\n clipped_ds_values = clipped_ds.values\n try:\n xdc_values = xdc[var].values\n except KeyError:\n xdc_values = xdc.values\n assert_almost_equal(clipped_ds_values, xdc_values)\n assert_almost_equal(clipped_ds.rio.transform(), xdc.rio.transform())\n # make sure it safely writes to netcdf\n clipped_ds.to_netcdf(modis_clip[\"output\"])\n\n\[email protected](\"from_disk\", [True, False])\[email protected](\n \"open_func\",\n [\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n partial(rioxarray.open_rasterio, parse_coordinates=False, masked=True),\n open_rasterio_engine,\n partial(open_rasterio_engine, parse_coordinates=False),\n partial(open_rasterio_engine, parse_coordinates=False, mask_and_scale=False),\n ],\n)\ndef test_clip_geojson(open_func, from_disk):\n with open_func(\n os.path.join(TEST_COMPARE_DATA_DIR, \"small_dem_3m_merged.tif\"),\n ) as xdi:\n # get subset for testing\n subset = xdi.isel(x=slice(150, 160), y=slice(100, 150))\n comp_subset = subset.isel(x=slice(1, None), y=slice(1, None))\n if isinstance(comp_subset, xarray.Dataset):\n comp_subset = comp_subset.band_data\n # add transform for test\n comp_subset.rio.write_transform(inplace=True)\n # add grid mapping for test\n comp_subset.rio.write_crs(subset.rio.crs, inplace=True)\n if comp_subset.rio.encoded_nodata is None:\n comp_subset.rio.write_nodata(comp_subset.rio.nodata, inplace=True)\n\n geometries = [\n {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [425499.18381405267, 4615331.540546387],\n [425499.18381405267, 4615478.540546387],\n [425526.18381405267, 4615478.540546387],\n [425526.18381405267, 4615331.540546387],\n [425499.18381405267, 4615331.540546387],\n ]\n ],\n }\n ]\n # test data array\n clipped = xdi.rio.clip(geometries, from_disk=from_disk)\n if from_disk and not isinstance(clipped, xarray.Dataset):\n _assert_xarrays_equal(clipped[:, 1:, 1:], comp_subset)\n if comp_subset.rio.encoded_nodata is not None:\n assert numpy.isnan(clipped.values[:, 0, :]).all()\n assert numpy.isnan(clipped.values[:, :, 0]).all()\n else:\n assert (clipped.values[:, 0, :] == comp_subset.rio.nodata).all()\n assert (clipped.values[:, :, 0] == comp_subset.rio.nodata).all()\n else:\n if isinstance(clipped, xarray.Dataset):\n clipped = clipped.band_data\n _assert_xarrays_equal(clipped, comp_subset)\n\n # test dataset\n if isinstance(xdi, xarray.DataArray):\n clipped_ds = xdi.to_dataset(name=\"band_data\").rio.clip(geometries)\n else:\n clipped_ds = xdi.rio.clip(geometries)\n comp_subset_ds = comp_subset.to_dataset(name=\"band_data\")\n # This coordinate checking is skipped when parse_coordinates=False\n # as the auto-generated coordinates differ and can be ignored\n _assert_xarrays_equal(\n clipped_ds, comp_subset_ds, skip_xy_check=isinstance(open_func, partial)\n )\n # check the transform\n assert_almost_equal(\n clipped_ds.rio.transform(),\n (3.0, 0.0, 425500.68381405267, 0.0, -3.0, 4615477.040546387, 0.0, 0.0, 1.0),\n )\n\n\[email protected](\n \"invert, from_disk, expected_sum\",\n [\n (False, False, 2150837592),\n (True, False, 535691205),\n (False, True, 2150837592),\n (True, True, 535691205),\n ],\n)\[email protected](\n \"open_func\",\n [\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n ],\n)\ndef test_clip_geojson__no_drop(open_func, invert, from_disk, expected_sum):\n with open_func(\n os.path.join(TEST_COMPARE_DATA_DIR, \"small_dem_3m_merged.tif\")\n ) as xdi:\n geometries = [\n {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [-93.880889448126, 41.68465068553298],\n [-93.89966980835203, 41.68465068553298],\n [-93.89966980835203, 41.689430423525266],\n [-93.880889448126, 41.689430423525266],\n [-93.880889448126, 41.68465068553298],\n ]\n ],\n }\n ]\n # test data array\n clipped = xdi.rio.clip(\n geometries, \"epsg:4326\", drop=False, invert=invert, from_disk=from_disk\n )\n assert clipped.rio.crs == xdi.rio.crs\n assert clipped.shape == xdi.shape\n assert clipped.sum().item() == expected_sum\n assert clipped.rio.nodata == 0.0\n assert clipped.rio.nodata == xdi.rio.nodata\n\n # test dataset\n clipped_ds = xdi.to_dataset(name=\"test_data\").rio.clip(\n geometries, \"epsg:4326\", drop=False, invert=invert\n )\n assert clipped_ds.rio.crs == xdi.rio.crs\n assert clipped_ds.test_data.shape == xdi.shape\n assert clipped_ds.test_data.sum().item() == expected_sum\n assert clipped_ds.test_data.rio.nodata == xdi.rio.nodata\n\n\[email protected]\ndef dummy_dataset_non_geospatial():\n ds = xarray.Dataset(\n {\n \"stuff\": xarray.DataArray(\n data=numpy.zeros((6, 6, 6), dtype=float),\n dims=[\"time\", \"y\", \"x\"],\n coords={\n \"time\": numpy.arange(6),\n \"y\": numpy.linspace(4615514.54054639, 4615295.54054639, num=6),\n \"x\": numpy.linspace(425493.18381405, 425532.18381405, num=6),\n },\n attrs={\n \"_FillValue\": -1,\n },\n ),\n \"non_geo_stuff\": xarray.DataArray(\n data=numpy.zeros((6, 6), dtype=float),\n dims=[\"time\", \"bb\"],\n coords={\n \"time\": numpy.arange(6),\n \"bb\": numpy.arange(6),\n },\n ),\n \"meta\": (\"time\", numpy.random.random(6)),\n }\n )\n ds.rio.set_spatial_dims(x_dim=\"x\", y_dim=\"y\", inplace=True)\n ds.rio.write_crs(\"EPSG:26915\", inplace=True)\n return ds\n\n\ndef test_clip__non_geospatial(dummy_dataset_non_geospatial):\n # check for variables without spatial dims\n geometries = [\n {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [425499.18381405267, 4615331.540546387],\n [425499.18381405267, 4615478.540546387],\n [425526.18381405267, 4615478.540546387],\n [425526.18381405267, 4615331.540546387],\n [425499.18381405267, 4615331.540546387],\n ]\n ],\n }\n ]\n ds = dummy_dataset_non_geospatial\n assert ds.stuff.shape == (6, 6, 6)\n with pytest.raises(MissingSpatialDimensionError):\n ds.rio.clip(geometries)\n\n ds_clip = ds[[\"stuff\", \"meta\"]].rio.clip(geometries)\n assert ds_clip.stuff.shape == (6, 4, 4)\n assert \"meta\" in ds_clip.data_vars\n\n with rioxarray.set_options(skip_missing_spatial_dims=True):\n ds_clip = ds.rio.clip(geometries)\n assert ds_clip.stuff.shape == (6, 4, 4)\n assert \"meta\" in ds_clip.data_vars\n assert \"non_geo_stuff\" in ds_clip.data_vars\n\n\ndef test_clip_box__non_geospatial(dummy_dataset_non_geospatial):\n ds = dummy_dataset_non_geospatial\n assert ds.stuff.shape == (6, 6, 6)\n with pytest.raises(MissingSpatialDimensionError):\n ds.rio.clip_box(\n minx=425500.18381405,\n miny=4615395.54054639,\n maxx=425520.18381405,\n maxy=4615414.54054639,\n )\n\n ds_clip_box = ds[[\"stuff\", \"meta\"]].rio.clip_box(\n minx=425500.18381405,\n miny=4615395.54054639,\n maxx=425520.18381405,\n maxy=4615414.54054639,\n )\n assert ds_clip_box.stuff.shape == (6, 2, 3)\n assert \"meta\" in ds_clip_box.data_vars\n\n with rioxarray.set_options(skip_missing_spatial_dims=True):\n ds_clip_box = ds.rio.clip_box(\n minx=425500.18381405,\n miny=4615395.54054639,\n maxx=425520.18381405,\n maxy=4615414.54054639,\n )\n assert ds_clip_box.stuff.shape == (6, 2, 3)\n assert \"meta\" in ds_clip_box.data_vars\n assert \"non_geo_stuff\" in ds_clip_box.data_vars\n\n\ndef test_reproject__non_geospatial(dummy_dataset_non_geospatial):\n ds = dummy_dataset_non_geospatial\n assert ds.stuff.shape == (6, 6, 6)\n with pytest.raises(MissingSpatialDimensionError):\n ds.rio.reproject(\"EPSG:4326\")\n\n ds_reproject = ds[[\"stuff\", \"meta\"]].rio.reproject(\"EPSG:4326\")\n assert ds_reproject.stuff.shape == (6, 8, 2)\n assert \"meta\" in ds_reproject.data_vars\n\n with rioxarray.set_options(skip_missing_spatial_dims=True):\n ds_reproject = ds.rio.reproject(\"EPSG:4326\")\n assert ds_reproject.stuff.shape == (6, 8, 2)\n assert \"meta\" in ds_reproject.data_vars\n assert \"non_geo_stuff\" in ds_reproject.data_vars\n\n\ndef test_reproject_match__non_geospatial(dummy_dataset_non_geospatial):\n ds = dummy_dataset_non_geospatial\n assert ds.stuff.shape == (6, 6, 6)\n with pytest.raises(MissingSpatialDimensionError):\n ds.rio.reproject_match(ds)\n\n ds_reproject = ds[[\"stuff\", \"meta\"]].rio.reproject_match(ds)\n assert ds_reproject.stuff.shape == (6, 6, 6)\n assert \"meta\" in ds_reproject.data_vars\n\n with rioxarray.set_options(skip_missing_spatial_dims=True):\n ds_reproject = ds.rio.reproject_match(ds)\n assert ds_reproject.stuff.shape == (6, 6, 6)\n assert \"meta\" in ds_reproject.data_vars\n assert \"non_geo_stuff\" in ds_reproject.data_vars\n\n\ndef test_interpolate_na__non_geospatial(dummy_dataset_non_geospatial):\n pytest.importorskip(\"scipy\")\n ds = dummy_dataset_non_geospatial\n assert ds.stuff.shape == (6, 6, 6)\n with pytest.raises(MissingSpatialDimensionError):\n ds.rio.interpolate_na()\n\n ds_interp = ds[[\"stuff\", \"meta\"]].rio.interpolate_na()\n assert ds_interp.stuff.shape == (6, 6, 6)\n assert \"meta\" in ds_interp.data_vars\n\n with rioxarray.set_options(skip_missing_spatial_dims=True):\n ds_interp = ds.rio.interpolate_na()\n assert ds_interp.stuff.shape == (6, 6, 6)\n assert \"meta\" in ds_interp.data_vars\n assert \"non_geo_stuff\" in ds_interp.data_vars\n\n\ndef test_pad_box__non_geospatial(dummy_dataset_non_geospatial):\n ds = dummy_dataset_non_geospatial\n assert ds.stuff.shape == (6, 6, 6)\n with pytest.raises(MissingSpatialDimensionError):\n ds.rio.pad_box(*ds.rio.bounds())\n\n ds_pad_box = (\n ds[[\"stuff\", \"meta\"]]\n .rio.clip_box(\n minx=425500.18381405,\n miny=4615395.54054639,\n maxx=425520.18381405,\n maxy=4615414.54054639,\n )\n .rio.pad_box(*ds.rio.bounds())\n )\n assert ds_pad_box.stuff.shape == (6, 6, 6)\n assert \"meta\" in ds_pad_box.data_vars\n\n with rioxarray.set_options(skip_missing_spatial_dims=True):\n ds_pad_box = ds.rio.clip_box(\n minx=425500.18381405,\n miny=4615395.54054639,\n maxx=425520.18381405,\n maxy=4615414.54054639,\n ).rio.pad_box(*ds.rio.bounds())\n assert ds_pad_box.stuff.shape == (6, 6, 6)\n assert \"meta\" in ds_pad_box.data_vars\n assert \"non_geo_stuff\" in ds_pad_box.data_vars\n\n\[email protected](\n \"open_func\",\n [\n xarray.open_dataset,\n xarray.open_dataarray,\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n open_rasterio_engine,\n partial(open_rasterio_engine, parse_coordinates=False),\n ],\n)\ndef test_transform_bounds(open_func):\n with open_func(os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\")) as xdi:\n bounds = xdi.rio.transform_bounds(\n \"+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +ellps=WGS84\"\n \" +datum=WGS84 +units=m +no_defs\",\n densify_pts=100,\n )\n assert_almost_equal(\n bounds,\n (\n -10374232.525903117,\n 5591295.917919335,\n -10232919.684719983,\n 5656912.314724255,\n ),\n )\n\n\ndef test_reproject_with_shape(modis_reproject):\n new_shape = (9, 10)\n mask_args = (\n dict(masked=False, mask_and_scale=False)\n if \"rasterio\" in str(modis_reproject[\"open\"])\n else dict(mask_and_scale=False)\n )\n with modis_reproject[\"open\"](modis_reproject[\"input\"], **mask_args) as mda:\n mds_repr = mda.rio.reproject(modis_reproject[\"to_proj\"], shape=new_shape)\n # test\n if hasattr(mds_repr, \"variables\"):\n for var in mds_repr.rio.vars:\n assert mds_repr[var].rio.shape == new_shape\n else:\n assert mds_repr.rio.shape == new_shape\n\n\ndef test_reproject(modis_reproject):\n mask_args = (\n dict(masked=False, mask_and_scale=False)\n if \"rasterio\" in str(modis_reproject[\"open\"])\n else dict(mask_and_scale=False)\n )\n with modis_reproject[\"open\"](\n modis_reproject[\"input\"], **mask_args\n ) as mda, modis_reproject[\"open\"](modis_reproject[\"compare\"], **mask_args) as mdc:\n mds_repr = mda.rio.reproject(modis_reproject[\"to_proj\"])\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n assert mds_repr.coords[mds_repr.rio.x_dim].attrs == {\n \"axis\": \"X\",\n \"long_name\": \"x coordinate of projection\",\n \"standard_name\": \"projection_x_coordinate\",\n \"units\": \"metre\",\n }\n assert mds_repr.coords[mds_repr.rio.y_dim].attrs == {\n \"axis\": \"Y\",\n \"long_name\": \"y coordinate of projection\",\n \"standard_name\": \"projection_y_coordinate\",\n \"units\": \"metre\",\n }\n\n\[email protected](\n \"open_func\",\n [\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n open_rasterio_engine,\n partial(open_rasterio_engine, parse_coordinates=False),\n ],\n)\ndef test_reproject_3d(open_func, modis_reproject_3d):\n with open_func(modis_reproject_3d[\"input\"]) as mda, open_func(\n modis_reproject_3d[\"compare\"]\n ) as mdc:\n mds_repr = mda.rio.reproject(modis_reproject_3d[\"to_proj\"])\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n if mdc.rio.x_dim in mdc.coords:\n assert mds_repr.coords[mds_repr.rio.x_dim].attrs == {\n \"long_name\": \"longitude\",\n \"standard_name\": \"longitude\",\n \"units\": \"degrees_east\",\n \"axis\": \"X\",\n }\n assert mds_repr.coords[mds_repr.rio.y_dim].attrs == {\n \"long_name\": \"latitude\",\n \"standard_name\": \"latitude\",\n \"units\": \"degrees_north\",\n \"axis\": \"Y\",\n }\n\n\ndef test_reproject__grid_mapping(modis_reproject):\n mask_args = (\n dict(masked=False, mask_and_scale=False)\n if \"rasterio\" in str(modis_reproject[\"open\"])\n else dict(mask_and_scale=False)\n )\n with modis_reproject[\"open\"](\n modis_reproject[\"input\"], **mask_args\n ) as mda, modis_reproject[\"open\"](modis_reproject[\"compare\"], **mask_args) as mdc:\n\n # remove 'crs' attribute and add grid mapping\n mda.coords[\"spatial_ref\"] = 0\n mda.coords[\"spatial_ref\"].attrs[\"spatial_ref\"] = CRS.from_user_input(\n _get_attr(mda, \"crs\")\n ).wkt\n _mod_attr(mda, \"grid_mapping\", val=\"spatial_ref\")\n _del_attr(mda, \"crs\")\n mdc.coords[\"spatial_ref\"] = 0\n mdc.coords[\"spatial_ref\"].attrs[\"spatial_ref\"] = CRS.from_user_input(\n _get_attr(mdc, \"crs\")\n ).wkt\n _mod_attr(mdc, \"grid_mapping\", val=\"spatial_ref\")\n _del_attr(mdc, \"crs\")\n\n # reproject\n mds_repr = mda.rio.reproject(modis_reproject[\"to_proj\"])\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n\n\ndef test_reproject__masked(modis_reproject):\n with modis_reproject[\"open\"](modis_reproject[\"input\"]) as mda, modis_reproject[\n \"open\"\n ](modis_reproject[\"compare\"]) as mdc:\n # reproject\n mds_repr = mda.rio.reproject(modis_reproject[\"to_proj\"])\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n\n\ndef test_reproject__no_transform(modis_reproject):\n with modis_reproject[\"open\"](modis_reproject[\"input\"]) as mda, modis_reproject[\n \"open\"\n ](modis_reproject[\"compare\"]) as mdc:\n orig_trans = mda.rio.transform()\n _del_attr(mda, \"transform\")\n # reproject\n mds_repr = mda.rio.reproject(modis_reproject[\"to_proj\"])\n # test\n if hasattr(mds_repr, \"variables\"):\n for var in mds_repr.rio.vars:\n assert_array_equal(orig_trans, tuple(mda[var].rio.transform()))\n else:\n assert_array_equal(orig_trans, tuple(mda.rio.transform()))\n _assert_xarrays_equal(mds_repr, mdc)\n\n\[email protected](\"nodata\", [None, -9999])\ndef test_reproject__no_nodata(nodata, modis_reproject):\n mask_args = (\n dict(masked=False, mask_and_scale=False)\n if \"rasterio\" in str(modis_reproject[\"open\"])\n else dict(mask_and_scale=False)\n )\n with modis_reproject[\"open\"](\n modis_reproject[\"input\"], **mask_args\n ) as mda, modis_reproject[\"open\"](modis_reproject[\"compare\"], **mask_args) as mdc:\n orig_fill = _get_attr(mda, \"_FillValue\")\n _del_attr(mda, \"_FillValue\")\n _del_attr(mda, \"nodata\")\n # reproject\n mds_repr = mda.rio.reproject(modis_reproject[\"to_proj\"], nodata=nodata)\n\n # overwrite test dataset\n # if isinstance(modis_reproject['open'], xarray.DataArray):\n # mds_repr.to_netcdf(modis_reproject['compare'])\n\n # replace -9999 with original _FillValue for testing\n fill_nodata = -32768 if nodata is None else nodata\n if hasattr(mds_repr, \"variables\"):\n for var in mds_repr.rio.vars:\n mds_repr[var].values[mds_repr[var].values == fill_nodata] = orig_fill\n else:\n mds_repr.values[mds_repr.values == fill_nodata] = orig_fill\n _mod_attr(mdc, \"_FillValue\", val=fill_nodata)\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n\n\[email protected](\"open_func\", [rioxarray.open_rasterio, open_rasterio_engine])\ndef test_reproject__scalar_coord(open_func):\n with open_func(\n os.path.join(TEST_COMPARE_DATA_DIR, \"small_dem_3m_merged.tif\")\n ) as xdi:\n xdi_repr = xdi.squeeze().rio.reproject(\"epsg:3395\")\n for coord in xdi.coords:\n assert coord in xdi_repr.coords\n\n\ndef test_reproject__no_nodata_masked(modis_reproject):\n with modis_reproject[\"open\"](modis_reproject[\"input\"]) as mda, modis_reproject[\n \"open\"\n ](modis_reproject[\"compare\"]) as mdc:\n _del_attr(mda, \"nodata\")\n # reproject\n mds_repr = mda.rio.reproject(modis_reproject[\"to_proj\"])\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n\n\ndef test_reproject__gcps_kwargs(tmp_path):\n tiffname = tmp_path / \"test.tif\"\n src_gcps = [\n GroundControlPoint(row=0, col=0, x=156113, y=2818720, z=0),\n GroundControlPoint(row=0, col=800, x=338353, y=2785790, z=0),\n GroundControlPoint(row=800, col=800, x=297939, y=2618518, z=0),\n GroundControlPoint(row=800, col=0, x=115698, y=2651448, z=0),\n ]\n crs = CRS.from_epsg(32618)\n with rasterio.open(\n tiffname,\n mode=\"w\",\n height=800,\n width=800,\n count=3,\n dtype=numpy.uint8,\n driver=\"GTiff\",\n ) as source:\n source.gcps = (src_gcps, crs)\n\n rds = rioxarray.open_rasterio(tiffname)\n rds.rio.write_crs(crs, inplace=True)\n rds = rds.rio.reproject(\n crs,\n gcps=src_gcps,\n )\n assert rds.rio.height == 923\n assert rds.rio.width == 1027\n assert rds.rio.crs == crs\n assert rds.rio.transform().almost_equals(\n Affine(\n 216.8587081056465,\n 0.0,\n 115698.25,\n 0.0,\n -216.8587081056465,\n 2818720.0,\n )\n )\n\n\ndef test_reproject_match(modis_reproject_match):\n mask_args = (\n dict(masked=False, mask_and_scale=False)\n if \"rasterio\" in str(modis_reproject_match[\"open\"])\n else dict(mask_and_scale=False)\n )\n with modis_reproject_match[\"open\"](\n modis_reproject_match[\"input\"], **mask_args\n ) as mda, modis_reproject_match[\"open\"](\n modis_reproject_match[\"compare\"], **mask_args\n ) as mdc, xarray.open_dataarray(\n modis_reproject_match[\"match\"]\n ) as mdm:\n # reproject\n mds_repr = mda.rio.reproject_match(mdm)\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n if mdc.rio.x_dim in mdc.coords:\n assert mds_repr.coords[mds_repr.rio.x_dim].attrs == {\n \"axis\": \"X\",\n \"long_name\": \"x coordinate of projection\",\n \"standard_name\": \"projection_x_coordinate\",\n \"units\": \"metre\",\n }\n assert mds_repr.coords[mds_repr.rio.y_dim].attrs == {\n \"axis\": \"Y\",\n \"long_name\": \"y coordinate of projection\",\n \"standard_name\": \"projection_y_coordinate\",\n \"units\": \"metre\",\n }\n\n\ndef test_reproject_match__masked(modis_reproject_match):\n mask_args = (\n dict(masked=True)\n if \"rasterio\" in str(modis_reproject_match[\"open\"])\n else dict(mask_and_scale=True)\n )\n with modis_reproject_match[\"open\"](\n modis_reproject_match[\"input\"], **mask_args\n ) as mda, modis_reproject_match[\"open\"](\n modis_reproject_match[\"compare\"], **mask_args\n ) as mdc, xarray.open_dataarray(\n modis_reproject_match[\"match\"]\n ) as mdm:\n # reproject\n mds_repr = mda.rio.reproject_match(mdm)\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n\n\ndef test_reproject_match__no_transform_nodata(modis_reproject_match_coords):\n mask_args = (\n dict(masked=True)\n if \"rasterio\" in str(modis_reproject_match_coords[\"open\"])\n else dict(mask_and_scale=True)\n )\n with modis_reproject_match_coords[\"open\"](\n modis_reproject_match_coords[\"input\"], **mask_args\n ) as mda, modis_reproject_match_coords[\"open\"](\n modis_reproject_match_coords[\"compare\"], **mask_args\n ) as mdc, xarray.open_dataarray(\n modis_reproject_match_coords[\"match\"]\n ) as mdm:\n _del_attr(mda, \"transform\")\n _del_attr(mda, \"nodata\")\n # reproject\n mds_repr = mda.rio.reproject_match(mdm)\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n\n\ndef test_reproject_match__pass_nodata(modis_reproject_match__passed_nodata):\n mask_args = (\n dict(masked=False, mask_and_scale=False)\n if \"rasterio\" in str(modis_reproject_match__passed_nodata[\"open\"])\n else dict(mask_and_scale=False, decode_coords=\"all\")\n )\n with modis_reproject_match__passed_nodata[\"open\"](\n modis_reproject_match__passed_nodata[\"input\"], **mask_args\n ) as mda, modis_reproject_match__passed_nodata[\"open\"](\n modis_reproject_match__passed_nodata[\"compare\"], **mask_args\n ) as mdc, xarray.open_dataarray(\n modis_reproject_match__passed_nodata[\"match\"]\n ) as mdm:\n # reproject\n mds_repr = mda.rio.reproject_match(mdm, nodata=-9999)\n # test\n _assert_xarrays_equal(mds_repr, mdc)\n\n\[email protected](\"open_func\", [rioxarray.open_rasterio, open_rasterio_engine])\ndef test_make_src_affine(open_func, modis_reproject):\n with xarray.open_dataarray(modis_reproject[\"input\"]) as xdi, open_func(\n modis_reproject[\"input\"]\n ) as xri:\n\n # check the transform\n attribute_transform = tuple(xdi.attrs[\"transform\"])\n attribute_transform_func = tuple(xdi.rio.transform())\n calculated_transform = tuple(xdi.rio.transform(recalc=True))\n # delete the transform to ensure it is not being used\n del xdi.attrs[\"transform\"]\n calculated_transform_check = tuple(xdi.rio.transform())\n calculated_transform_check2 = tuple(xdi.rio.transform())\n rio_transform = tuple(xri.rio._cached_transform())\n\n assert_array_equal(attribute_transform, attribute_transform_func)\n assert_array_equal(calculated_transform, calculated_transform_check)\n assert_array_equal(calculated_transform, calculated_transform_check2)\n assert_array_equal(attribute_transform, calculated_transform)\n assert_array_equal(calculated_transform, rio_transform)\n\n\ndef test_make_src_affine__single_point():\n point_input = os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_POINT.nc\")\n with xarray.open_dataarray(point_input) as xdi:\n # check the transform\n attribute_transform = tuple(xdi.attrs[\"transform\"])\n attribute_transform_func = tuple(xdi.rio.transform())\n calculated_transform = tuple(xdi.rio.transform(recalc=True))\n # delete the transform to ensure it is not being used\n del xdi.attrs[\"transform\"]\n assert xdi.rio.transform(recalc=True) == Affine.identity()\n assert xdi.rio.transform() == Affine.identity()\n\n assert_array_equal(attribute_transform, attribute_transform_func)\n assert_array_equal(attribute_transform, calculated_transform)\n\n\[email protected](\n \"open_func\",\n [\n xarray.open_dataset,\n open_rasterio_engine,\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n partial(open_rasterio_engine, parse_coordinates=False),\n ],\n)\ndef test_make_coords__calc_trans(open_func, modis_reproject):\n with xarray.open_dataarray(modis_reproject[\"input\"]) as xdi, open_func(\n modis_reproject[\"input\"]\n ) as xri:\n # calculate coordinates from the calculated transform\n width, height = xdi.rio.shape\n calculated_transform = xdi.rio.transform(recalc=True)\n calc_coords_calc_trans = _make_coords(xdi, calculated_transform, width, height)\n widthr, heightr = xri.rio.shape\n calculated_transformr = xri.rio.transform(recalc=True)\n calc_coords_calc_transr = _make_coords(\n xri, calculated_transformr, widthr, heightr\n )\n\n assert_almost_equal(calculated_transform, calculated_transformr)\n # check to see if they all match\n if not isinstance(open_func, partial):\n assert_almost_equal(\n xri.coords[\"x\"].values, calc_coords_calc_trans[\"x\"].values, decimal=9\n )\n assert_almost_equal(\n xri.coords[\"y\"].values, calc_coords_calc_trans[\"y\"].values, decimal=9\n )\n assert_almost_equal(\n xri.coords[\"x\"].values, calc_coords_calc_transr[\"x\"].values, decimal=9\n )\n assert_almost_equal(\n xri.coords[\"y\"].values, calc_coords_calc_transr[\"y\"].values, decimal=9\n )\n\n\[email protected](\n \"open_func\",\n [\n xarray.open_dataset,\n rioxarray.open_rasterio,\n partial(rioxarray.open_rasterio, parse_coordinates=False),\n open_rasterio_engine,\n partial(open_rasterio_engine, parse_coordinates=False),\n ],\n)\ndef test_make_coords__attr_trans(open_func, modis_reproject):\n with xarray.open_dataarray(modis_reproject[\"input\"]) as xdi, open_func(\n modis_reproject[\"input\"]\n ) as xri:\n # calculate coordinates from the attribute transform\n width, height = xdi.rio.shape\n attr_transform = xdi.rio.transform()\n calc_coords_attr_trans = _make_coords(xdi, attr_transform, width, height)\n widthr, heightr = xri.rio.shape\n calculated_transformr = xri.rio.transform()\n calc_coords_calc_transr = _make_coords(\n xri, calculated_transformr, widthr, heightr\n )\n assert_almost_equal(attr_transform, calculated_transformr)\n # check to see if they all match\n if not isinstance(open_func, partial):\n assert_almost_equal(\n xri.coords[\"x\"].values, calc_coords_calc_transr[\"x\"].values, decimal=9\n )\n assert_almost_equal(\n xri.coords[\"y\"].values, calc_coords_calc_transr[\"y\"].values, decimal=9\n )\n assert_almost_equal(\n xri.coords[\"x\"].values, calc_coords_attr_trans[\"x\"].values, decimal=9\n )\n assert_almost_equal(\n xri.coords[\"y\"].values, calc_coords_attr_trans[\"y\"].values, decimal=9\n )\n assert_almost_equal(\n xdi.coords[\"x\"].values, xri.coords[\"x\"].values, decimal=9\n )\n assert_almost_equal(\n xdi.coords[\"y\"].values, xri.coords[\"y\"].values, decimal=9\n )\n\n\ndef test_interpolate_na(interpolate_na):\n mask_args = (\n dict(masked=False, mask_and_scale=False)\n if \"rasterio\" in str(interpolate_na[\"open\"])\n else dict(mask_and_scale=False)\n )\n with interpolate_na[\"open\"](\n interpolate_na[\"input\"], **mask_args\n ) as mda, interpolate_na[\"open\"](interpolate_na[\"compare\"], **mask_args) as mdc:\n interpolated_ds = mda.rio.interpolate_na()\n # test\n _assert_xarrays_equal(interpolated_ds, mdc)\n\n\[email protected](SCIPY_INSTALLED, reason=\"Only test if scipy is not installed.\")\ndef test_interpolate_na__missing_scipy():\n xds = xarray.open_dataarray(os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\"))\n with pytest.raises(ModuleNotFoundError, match=r\"rioxarray\\[interp\\]\"):\n xds.rio.interpolate_na()\n\n\[email protected](\n version.parse(SCIPY_VERSION) < version.parse(\"1.7.0\")\n or platform.system() != \"Linux\",\n reason=\"griddata behaves differently across versions and platforms\",\n)\ndef test_interpolate_na_veris(interpolate_na_veris):\n with xarray.open_dataset(interpolate_na_veris[\"input\"]) as mda, xarray.open_dataset(\n interpolate_na_veris[\"compare\"]\n ) as mdc:\n interpolated_ds = mda.rio.interpolate_na()\n # test\n _assert_xarrays_equal(interpolated_ds, mdc)\n\n\ndef test_interpolate_na_3d(interpolate_na_3d):\n with xarray.open_dataset(interpolate_na_3d[\"input\"]) as mda, xarray.open_dataset(\n interpolate_na_3d[\"compare\"]\n ) as mdc:\n interpolated_ds = mda.rio.interpolate_na()\n # test\n _assert_xarrays_equal(interpolated_ds, mdc)\n\n\ndef test_interpolate_na__nodata_filled(interpolate_na_filled):\n mask_args = (\n dict(masked=False, mask_and_scale=False)\n if \"rasterio\" in str(interpolate_na_filled[\"open\"])\n else dict(mask_and_scale=False)\n )\n with interpolate_na_filled[\"open\"](\n interpolate_na_filled[\"input\"], **mask_args\n ) as mda, interpolate_na_filled[\"open\"](\n interpolate_na_filled[\"compare\"], **mask_args\n ) as mdc:\n if hasattr(mda, \"variables\"):\n for var in mda.rio.vars:\n mda[var].values[mda[var].values == mda[var].rio.nodata] = 400\n else:\n mda.values[mda.values == mda.rio.nodata] = 400\n\n interpolated_ds = mda.rio.interpolate_na()\n # test\n _assert_xarrays_equal(interpolated_ds, mdc)\n\n\ndef test_interpolate_na__all_nodata(interpolate_na_nan):\n rio_opened = \"open_rasterio \" in str(interpolate_na_nan[\"open\"])\n with interpolate_na_nan[\"open\"](\n interpolate_na_nan[\"input\"], mask_and_scale=True\n ) as mda, interpolate_na_nan[\"open\"](\n interpolate_na_nan[\"compare\"], mask_and_scale=True\n ) as mdc:\n if hasattr(mda, \"variables\"):\n for var in mda.rio.vars:\n mda[var].values[~numpy.isnan(mda[var].values)] = numpy.nan\n else:\n mda.values[~numpy.isnan(mda.values)] = numpy.nan\n\n interpolated_ds = mda.rio.interpolate_na()\n if rio_opened and \"__xarray_dataarray_variable__\" in mdc:\n mdc = mdc[\"__xarray_dataarray_variable__\"]\n # test\n _assert_xarrays_equal(interpolated_ds, mdc)\n\n\ndef test_load_in_geographic_dimensions():\n sentinel_2_geographic = os.path.join(\n TEST_INPUT_DATA_DIR, \"sentinel_2_L1C_geographic.nc\"\n )\n with xarray.open_dataset(sentinel_2_geographic) as mda:\n assert mda.rio.x_dim == \"longitude\"\n assert mda.rio.y_dim == \"latitude\"\n assert mda.rio.crs.to_epsg() == 4326\n assert mda.red.rio.x_dim == \"longitude\"\n assert mda.red.rio.y_dim == \"latitude\"\n assert mda.red.rio.crs.to_epsg() == 4326\n\n\ndef test_geographic_reproject():\n sentinel_2_geographic = os.path.join(\n TEST_INPUT_DATA_DIR, \"sentinel_2_L1C_geographic.nc\"\n )\n sentinel_2_utm = os.path.join(TEST_COMPARE_DATA_DIR, \"sentinel_2_L1C_utm.nc\")\n with xarray.open_dataset(sentinel_2_geographic) as mda, xarray.open_dataset(\n sentinel_2_utm\n ) as mdc:\n mds_repr = mda.rio.reproject(\"epsg:32721\")\n # mds_repr.to_netcdf(sentinel_2_utm)\n # test\n _assert_xarrays_equal(mds_repr, mdc, precision=4)\n\n\ndef test_geographic_reproject__missing_nodata():\n sentinel_2_geographic = os.path.join(\n TEST_INPUT_DATA_DIR, \"sentinel_2_L1C_geographic.nc\"\n )\n sentinel_2_utm = os.path.join(\n TEST_COMPARE_DATA_DIR, \"sentinel_2_L1C_utm__auto_nodata.nc\"\n )\n with xarray.open_dataset(sentinel_2_geographic) as mda, xarray.open_dataset(\n sentinel_2_utm\n ) as mdc:\n mda.red.attrs.pop(\"nodata\")\n mda.nir.attrs.pop(\"nodata\")\n mds_repr = mda.rio.reproject(\"epsg:32721\")\n # mds_repr.to_netcdf(sentinel_2_utm)\n # test\n _mod_attr(mdc, \"_FillValue\", val=65535)\n _assert_xarrays_equal(mds_repr, mdc, precision=4)\n\n\ndef test_geographic_resample_integer():\n pytest.importorskip(\"scipy\")\n sentinel_2_geographic = os.path.join(\n TEST_INPUT_DATA_DIR, \"sentinel_2_L1C_geographic.nc\"\n )\n sentinel_2_interp = os.path.join(\n TEST_COMPARE_DATA_DIR, \"sentinel_2_L1C_interpolate_na.nc\"\n )\n with xarray.open_dataset(sentinel_2_geographic) as mda, xarray.open_dataset(\n sentinel_2_interp\n ) as mdc:\n mds_interp = mda.rio.interpolate_na()\n # mds_interp.to_netcdf(sentinel_2_interp)\n # test\n _assert_xarrays_equal(mds_interp, mdc)\n\n\[email protected](\n \"open_method\",\n [\n xarray.open_dataarray,\n partial(rioxarray.open_rasterio, masked=True),\n partial(rioxarray.open_rasterio, masked=True, chunks=True),\n partial(\n rioxarray.open_rasterio, masked=True, chunks=True, lock=threading.Lock()\n ),\n open_rasterio_engine,\n ],\n)\[email protected](\n \"windowed, recalc_transform\",\n [\n (True, True),\n (False, False),\n ],\n)\[email protected](\n \"write_lock, compute\",\n [\n (None, False),\n (threading.Lock(), False),\n (threading.Lock(), True),\n ],\n)\ndef test_to_raster(\n open_method, windowed, recalc_transform, write_lock, compute, tmpdir\n):\n tmp_raster = tmpdir.join(\"modis_raster.tif\")\n test_tags = {\"test\": \"1\"}\n with open_method(os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\")) as mda:\n if isinstance(mda, xarray.Dataset):\n mda = mda.__xarray_dataarray_variable__\n delayed = mda.rio.to_raster(\n str(tmp_raster),\n windowed=windowed,\n recalc_transform=recalc_transform,\n tags=test_tags,\n lock=write_lock,\n compute=compute,\n )\n xds = mda.copy().squeeze()\n xds_attrs = {\n key: str(value)\n for key, value in mda.attrs.items()\n if key\n not in (\n \"add_offset\",\n \"crs\",\n \"is_tiled\",\n \"nodata\",\n \"res\",\n \"scale_factor\",\n \"transform\",\n \"grid_mapping\",\n )\n }\n\n if write_lock is None or not isinstance(xds.data, da.Array) or compute:\n assert delayed is None\n else:\n assert isinstance(delayed, Delayed)\n delayed.compute()\n\n with rasterio.open(str(tmp_raster)) as rds:\n assert rds.count == 1\n assert rds.crs == xds.rio.crs\n assert_array_equal(rds.transform, xds.rio.transform())\n assert_array_equal(rds.nodata, xds.rio.encoded_nodata)\n assert_array_equal(rds.read(1), xds.fillna(xds.rio.encoded_nodata).values)\n assert rds.count == 1\n assert rds.tags() == {\"AREA_OR_POINT\": \"Area\", **test_tags, **xds_attrs}\n assert rds.dtypes == (\"int16\",)\n\n\[email protected](\n \"open_method\",\n [\n partial(xarray.open_dataset, decode_coords=\"all\"),\n partial(rioxarray.open_rasterio, masked=True),\n partial(rioxarray.open_rasterio, masked=True, chunks=True),\n partial(\n rioxarray.open_rasterio, masked=True, chunks=True, lock=threading.Lock()\n ),\n open_rasterio_engine,\n ],\n)\[email protected](\"windowed\", [True, False])\[email protected](\n \"write_lock, compute\",\n [\n (None, False),\n (threading.Lock(), False),\n (threading.Lock(), True),\n ],\n)\ndef test_to_raster_3d(open_method, windowed, write_lock, compute, tmpdir):\n tmp_raster = tmpdir.join(\"planet_3d_raster.tif\")\n with open_method(os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\")) as mda:\n xds = mda.green.fillna(mda.green.rio.encoded_nodata)\n xds.rio._nodata = mda.green.rio.encoded_nodata\n delayed = xds.rio.to_raster(\n str(tmp_raster), windowed=windowed, lock=write_lock, compute=compute\n )\n xds_attrs = {\n key: str(value)\n for key, value in xds.attrs.items()\n if key\n not in (\"add_offset\", \"nodata\", \"scale_factor\", \"transform\", \"grid_mapping\")\n }\n\n if write_lock is None or not isinstance(xds.data, da.Array) or compute:\n assert delayed is None\n else:\n assert isinstance(delayed, Delayed)\n delayed.compute()\n\n with rasterio.open(str(tmp_raster)) as rds:\n assert rds.crs == xds.rio.crs\n assert_array_equal(rds.transform, xds.rio.transform())\n assert_array_equal(rds.nodata, xds.rio.nodata)\n assert_array_equal(rds.read(), xds.values)\n assert rds.tags() == {\"AREA_OR_POINT\": \"Area\", **xds_attrs}\n assert rds.descriptions == (\"green\", \"green\")\n\n # test roundtrip\n with rioxarray.open_rasterio(str(tmp_raster)) as rds:\n assert rds.attrs[\"long_name\"] == \"green\"\n assert numpy.isnan(rds.rio.nodata)\n\n\ndef test_to_raster__custom_description(tmpdir):\n tmp_raster = tmpdir.join(\"planet_3d_raster.tif\")\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\")\n ) as mda:\n xds = mda.green.fillna(mda.green.rio.encoded_nodata)\n xds.attrs[\"long_name\"] = (\"one\", \"two\")\n xds.rio.to_raster(str(tmp_raster))\n xds_attrs = {\n key: str(value)\n for key, value in xds.attrs.items()\n if key not in (\"nodata\", \"long_name\", \"grid_mapping\")\n }\n\n with rasterio.open(str(tmp_raster)) as rds:\n assert rds.tags() == {\"AREA_OR_POINT\": \"Area\", **xds_attrs}\n assert rds.descriptions == (\"one\", \"two\")\n\n # test roundtrip\n with rioxarray.open_rasterio(str(tmp_raster)) as rds:\n assert rds.attrs[\"long_name\"] == (\"one\", \"two\")\n assert rds.rio.nodata == 0.0\n\n\[email protected](\"chunks\", [True, None])\ndef test_to_raster__scale_factor_and_add_offset(chunks, tmpdir):\n tmp_raster = tmpdir.join(\"air_temp_offset.tif\")\n\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"tmmx_20190121.nc\"), chunks=chunks\n ) as rds:\n assert rds.air_temperature.scale_factor == 0.1\n assert rds.air_temperature.add_offset == 220.0\n rds.air_temperature.rio.to_raster(str(tmp_raster))\n\n with rasterio.open(str(tmp_raster)) as rds:\n assert rds.scales == (0.1,)\n assert rds.offsets == (220.0,)\n\n # test roundtrip\n with rioxarray.open_rasterio(str(tmp_raster)) as rds:\n assert rds.scale_factor == 0.1\n assert rds.add_offset == 220.0\n assert rds.rio.nodata == 32767.0\n\n\[email protected](\"chunks\", [True, None])\ndef test_to_raster__offsets_and_scales(chunks, tmpdir):\n tmp_raster = tmpdir.join(\"air_temp_offset.tif\")\n\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"tmmx_20190121.nc\"), chunks=chunks\n ) as rds:\n attrs = dict(rds.air_temperature.attrs)\n attrs[\"scales\"] = [0.1]\n attrs[\"offsets\"] = [220.0]\n attrs.pop(\"scale_factor\")\n attrs.pop(\"add_offset\")\n rds.air_temperature.attrs = attrs\n assert rds.air_temperature.scales == [0.1]\n assert rds.air_temperature.offsets == [220.0]\n rds.air_temperature.rio.to_raster(str(tmp_raster))\n\n with rasterio.open(str(tmp_raster)) as rds:\n assert rds.scales == (0.1,)\n assert rds.offsets == (220.0,)\n\n # test roundtrip\n with rioxarray.open_rasterio(str(tmp_raster)) as rds:\n assert rds.scale_factor == 0.1\n assert rds.add_offset == 220.0\n assert rds.rio.nodata == 32767.0\n\n\ndef test_to_raster__custom_description__wrong(tmpdir):\n tmp_raster = tmpdir.join(\"planet_3d_raster.tif\")\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\")\n ) as mda:\n xds = mda.green.fillna(mda.green.rio.encoded_nodata)\n xds.attrs[\"long_name\"] = (\"one\", \"two\", \"three\")\n with pytest.raises(RioXarrayError):\n xds.rio.to_raster(str(tmp_raster))\n\n\[email protected](reason=\"Precision issues with windowed writing on python 3.6\")\[email protected](\"windowed\", [True, False])\ndef test_to_raster__preserve_profile__none_nodata(windowed, tmpdir):\n tmp_raster = tmpdir.join(\"output_profile.tif\")\n input_raster = tmpdir.join(\"input_profile.tif\")\n\n transform = Affine.from_gdal(0, 512, 0, 0, 0, 512)\n with rasterio.open(\n str(input_raster),\n \"w\",\n driver=\"GTiff\",\n height=512,\n width=512,\n count=1,\n crs=\"epsg:4326\",\n transform=transform,\n dtype=rasterio.float32,\n tiled=True,\n tilexsize=256,\n tileysize=256,\n ) as rds:\n rds.write(numpy.empty((1, 512, 512), dtype=numpy.float32))\n\n with rioxarray.open_rasterio(str(input_raster)) as mda:\n mda.rio.to_raster(str(tmp_raster), windowed=windowed)\n\n with rasterio.open(str(tmp_raster)) as rds, rasterio.open(str(input_raster)) as rdc:\n assert rds.count == rdc.count\n assert rds.crs == rdc.crs\n assert_array_equal(rds.transform, rdc.transform)\n assert_array_equal(rds.nodata, rdc.nodata)\n assert_almost_equal(rds.read(), rdc.read())\n assert rds.profile == rdc.profile\n assert rds.nodata is None\n\n\ndef test_to_raster__dataset(tmpdir):\n tmp_raster = tmpdir.join(\"planet_3d_raster.tif\")\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\")\n ) as mda:\n mda.isel(time=0).rio.to_raster(str(tmp_raster))\n\n with rioxarray.open_rasterio(str(tmp_raster)) as rdscompare:\n assert rdscompare.scale_factor == 1.0\n assert rdscompare.add_offset == 0.0\n assert rdscompare.long_name == (\"blue\", \"green\")\n assert rdscompare.rio.crs == mda.rio.crs\n assert numpy.isnan(rdscompare.rio.nodata)\n\n\[email protected](\"chunks\", [True, None])\ndef test_to_raster__dataset__mask_and_scale(chunks, tmpdir):\n output_raster = tmpdir.join(\"tmmx_20190121.tif\")\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"tmmx_20190121.nc\"), chunks=chunks\n ) as rds:\n rds.isel(band=0).rio.to_raster(str(output_raster))\n\n with rioxarray.open_rasterio(str(output_raster)) as rdscompare:\n assert rdscompare.scale_factor == 0.1\n assert rdscompare.add_offset == 220.0\n assert rdscompare.long_name == \"air_temperature\"\n assert rdscompare.rio.crs == rds.rio.crs\n assert rdscompare.rio.nodata == rds.air_temperature.rio.nodata\n\n\ndef test_to_raster__dataset__different_crs(tmpdir):\n tmp_raster = tmpdir.join(\"planet_3d_raster.tif\")\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\")\n ) as mda:\n rds = mda.isel(time=0)\n attrs = rds.green.attrs\n attrs[\"crs\"] = \"EPSG:4326\"\n attrs.pop(\"grid_mapping\")\n rds.green.attrs = attrs\n attrs = rds.blue.attrs\n attrs[\"crs\"] = \"EPSG:32722\"\n attrs.pop(\"grid_mapping\")\n rds.blue.attrs = attrs\n rds = rds.drop_vars(\"spatial_ref\")\n with pytest.raises(\n RioXarrayError, match=\"CRS in DataArrays differ in the Dataset\"\n ):\n rds.rio.to_raster(str(tmp_raster))\n\n\ndef test_to_raster__dataset__different_nodata(tmpdir):\n tmp_raster = tmpdir.join(\"planet_3d_raster.tif\")\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\"), mask_and_scale=False\n ) as mda:\n rds = mda.isel(time=0)\n rds.green.rio.write_nodata(1234, inplace=True)\n rds.blue.rio.write_nodata(2345, inplace=True)\n with pytest.raises(\n RioXarrayError,\n match=\"All nodata values must be the same when exporting to raster.\",\n ):\n rds.rio.to_raster(str(tmp_raster))\n\n\[email protected](\"windowed\", [True, False])\ndef test_to_raster__different_dtype(tmp_path, windowed):\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n test_da.values[1, 1] = -1.1\n test_nd = test_da.rio.write_nodata(-1.1)\n test_nd.rio.write_transform(\n Affine.from_gdal(425047, 3.0, 0.0, 4615780, 0.0, -3.0), inplace=True\n )\n test_nd.rio.write_crs(\"EPSG:4326\", inplace=True)\n tmpfile = tmp_path / \"dtype.tif\"\n with pytest.warns(\n UserWarning,\n match=(\n r\"The nodata value \\(-1.1\\) has been automatically changed to \"\n r\"\\(255\\) to match the dtype of the data.\"\n ),\n ):\n test_nd.rio.to_raster(tmpfile, dtype=numpy.uint8, windowed=windowed)\n xds = rioxarray.open_rasterio(tmpfile)\n assert str(xds.dtype) == \"uint8\"\n assert xds.attrs[\"_FillValue\"] == 255\n assert xds.rio.nodata == 255\n assert xds.squeeze().values[1, 1] == 255\n\n\ndef test_missing_spatial_dimensions():\n test_ds = xarray.Dataset()\n with pytest.raises(MissingSpatialDimensionError):\n test_ds.rio.shape\n with pytest.raises(MissingSpatialDimensionError):\n test_ds.rio.width\n with pytest.raises(MissingSpatialDimensionError):\n test_ds.rio.height\n test_da = xarray.DataArray(1)\n with pytest.raises(MissingSpatialDimensionError):\n test_da.rio.shape\n with pytest.raises(MissingSpatialDimensionError):\n test_da.rio.width\n with pytest.raises(MissingSpatialDimensionError):\n test_da.rio.height\n\n\ndef test_set_spatial_dims():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"lat\", \"lon\"),\n coords={\"lat\": numpy.arange(1, 6), \"lon\": numpy.arange(2, 7)},\n )\n test_da_copy = test_da.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\", inplace=False)\n assert test_da_copy.rio.x_dim == \"lon\"\n assert test_da_copy.rio.y_dim == \"lat\"\n assert test_da_copy.rio.width == 5\n assert test_da_copy.rio.height == 5\n assert test_da_copy.rio.shape == (5, 5)\n with pytest.raises(MissingSpatialDimensionError):\n test_da.rio.shape\n with pytest.raises(MissingSpatialDimensionError):\n test_da.rio.width\n with pytest.raises(MissingSpatialDimensionError):\n test_da.rio.height\n\n test_da.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\", inplace=True)\n assert test_da.rio.x_dim == \"lon\"\n assert test_da.rio.y_dim == \"lat\"\n assert test_da.rio.width == 5\n assert test_da.rio.height == 5\n assert test_da.rio.shape == (5, 5)\n\n\ndef test_set_spatial_dims__missing():\n test_ds = xarray.Dataset()\n with pytest.raises(MissingSpatialDimensionError):\n test_ds.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\")\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"lat\", \"lon\"),\n coords={\"lat\": numpy.arange(1, 6), \"lon\": numpy.arange(2, 7)},\n )\n with pytest.raises(MissingSpatialDimensionError):\n test_da.rio.set_spatial_dims(x_dim=\"long\", y_dim=\"lati\")\n\n\ndef test_crs_empty_dataset():\n assert xarray.Dataset().rio.crs is None\n\n\ndef test_crs_setter():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n assert test_da.rio.crs is None\n out_ds = test_da.rio.set_crs(4326)\n assert test_da.rio.crs.to_epsg() == 4326\n assert out_ds.rio.crs.to_epsg() == 4326\n test_ds = test_da.to_dataset(name=\"test\")\n assert test_ds.rio.crs is None\n out_ds = test_ds.rio.set_crs(4326)\n assert test_ds.rio.crs.to_epsg() == 4326\n assert out_ds.rio.crs.to_epsg() == 4326\n\n\ndef test_crs_setter__copy():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n assert test_da.rio.crs is None\n out_ds = test_da.rio.set_crs(4326, inplace=False)\n assert test_da.rio.crs is None\n assert out_ds.rio.crs.to_epsg() == 4326\n test_ds = test_da.to_dataset(name=\"test\")\n assert test_ds.rio.crs is None\n out_ds = test_ds.rio.set_crs(4326, inplace=False)\n assert test_ds.rio.crs is None\n assert out_ds.rio.crs.to_epsg() == 4326\n\n\ndef test_crs_writer__array__copy():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n assert test_da.rio.crs is None\n out_da = test_da.rio.write_crs(4326, grid_mapping_name=\"crs\")\n assert \"crs_wkt\" in out_da.coords[\"crs\"].attrs\n assert \"spatial_ref\" in out_da.coords[\"crs\"].attrs\n out_da.rio._crs = None\n assert out_da.rio.crs.to_epsg() == 4326\n test_da.rio._crs = None\n assert test_da.rio.crs is None\n assert \"crs\" not in test_da.coords\n assert out_da.encoding[\"grid_mapping\"] == \"crs\"\n\n\ndef test_crs_writer__array__inplace():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n assert test_da.rio.crs is None\n out_da = test_da.rio.write_crs(4326, inplace=True)\n assert \"crs_wkt\" in test_da.coords[\"spatial_ref\"].attrs\n assert \"spatial_ref\" in test_da.coords[\"spatial_ref\"].attrs\n assert out_da.coords[\"spatial_ref\"] == test_da.coords[\"spatial_ref\"]\n test_da.rio._crs = None\n assert test_da.rio.crs.to_epsg() == 4326\n assert test_da.encoding[\"grid_mapping\"] == \"spatial_ref\"\n assert out_da.attrs == test_da.attrs\n out_da.rio._crs = None\n assert out_da.rio.crs.to_epsg() == 4326\n\n\ndef test_crs_writer__dataset__copy():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n test_da = test_da.to_dataset(name=\"test\")\n assert test_da.rio.crs is None\n out_da = test_da.rio.write_crs(4326, grid_mapping_name=\"crs\")\n assert \"crs_wkt\" in out_da.coords[\"crs\"].attrs\n assert \"spatial_ref\" in out_da.coords[\"crs\"].attrs\n out_da.test.rio._crs = None\n assert out_da.rio.crs.to_epsg() == 4326\n assert out_da.test.encoding[\"grid_mapping\"] == \"crs\"\n # make sure input did not change the dataset\n test_da.test.rio._crs = None\n test_da.rio._crs = None\n assert test_da.rio.crs is None\n assert \"crs\" not in test_da.coords\n\n\ndef test_crs_writer__dataset__inplace():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n test_da = test_da.to_dataset(name=\"test\")\n assert test_da.rio.crs is None\n out_da = test_da.rio.write_crs(4326, inplace=True)\n assert \"crs_wkt\" in test_da.coords[\"spatial_ref\"].attrs\n assert \"spatial_ref\" in test_da.coords[\"spatial_ref\"].attrs\n assert out_da.coords[\"spatial_ref\"] == test_da.coords[\"spatial_ref\"]\n out_da.test.rio._crs = None\n assert out_da.rio.crs.to_epsg() == 4326\n test_da.test.rio._crs = None\n test_da.rio._crs = None\n assert test_da.rio.crs.to_epsg() == 4326\n assert out_da.test.encoding[\"grid_mapping\"] == \"spatial_ref\"\n assert out_da.test.attrs == test_da.test.attrs\n\n\ndef test_crs_writer__missing():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n with pytest.raises(MissingCRS):\n test_da.rio.write_crs()\n with pytest.raises(MissingCRS):\n test_da.to_dataset(name=\"test\").rio.write_crs()\n\n\ndef test_crs__dataset__different_crs(tmpdir):\n green = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n attrs={\"crs\": \"EPSG:4326\"},\n )\n blue = green.copy(deep=True)\n blue.attrs = {\"crs\": \"EPSG:32722\"}\n\n with pytest.raises(RioXarrayError, match=\"CRS in DataArrays differ in the Dataset\"):\n xarray.Dataset({\"green\": green, \"blue\": blue}).rio.crs\n\n\ndef test_clip_missing_crs():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n with pytest.raises(MissingCRS):\n test_da.rio.clip({}, 4326)\n\n\ndef test_reproject_missing_crs():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n with pytest.raises(MissingCRS):\n test_da.rio.reproject(4326)\n\n\ndef test_reproject_resolution_and_shape_transform():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n attrs={\"crs\": \"epsg:3857\"},\n )\n affine = Affine.from_gdal(0, 0.005, 0, 0, 0, 0.005)\n with pytest.raises(RioXarrayError):\n test_da.rio.reproject(4326, resolution=1, shape=(1, 1))\n with pytest.raises(RioXarrayError):\n test_da.rio.reproject(4326, resolution=1, transform=affine)\n with pytest.raises(RioXarrayError):\n test_da.rio.reproject(4326, resolution=1, shape=(1, 1), transform=affine)\n\n\ndef test_reproject_transform_missing_shape():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n attrs={\"crs\": \"epsg:3857\"},\n )\n affine = Affine.from_gdal(0, 0.005, 0, 0, 0, 0.005)\n reprojected = test_da.rio.reproject(4326, transform=affine)\n assert reprojected.rio.shape == (5, 5)\n assert reprojected.rio.transform() == affine\n\n\nclass CustomCRS:\n @property\n def wkt(self):\n return CRS.from_epsg(4326).to_wkt()\n\n def __str__(self):\n return self.wkt\n\n\ndef test_crs_get_custom():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n attrs={\"crs\": CustomCRS()},\n )\n assert test_da.rio.crs.to_epsg() == 4326\n test_ds = xarray.Dataset({\"test\": test_da})\n assert test_ds.rio.crs.to_epsg() == 4326\n\n\ndef test_get_crs_dataset():\n test_ds = xarray.Dataset()\n test_ds = test_ds.rio.write_crs(4326)\n assert test_ds.encoding[\"grid_mapping\"] == \"spatial_ref\"\n assert test_ds.rio.crs.to_epsg() == 4326\n\n\ndef test_write_crs_cf():\n test_da = xarray.DataArray(1)\n test_da = test_da.rio.write_crs(4326)\n assert test_da.encoding[\"grid_mapping\"] == \"spatial_ref\"\n assert test_da.rio.crs.to_epsg() == 4326\n assert \"spatial_ref\" in test_da.spatial_ref.attrs\n assert \"crs_wkt\" in test_da.spatial_ref.attrs\n assert test_da.spatial_ref.attrs[\"grid_mapping_name\"] == \"latitude_longitude\"\n\n\ndef test_write_crs_cf__disable_grid_mapping():\n test_da = xarray.DataArray(1)\n with rioxarray.set_options(export_grid_mapping=False):\n test_da = test_da.rio.write_crs(4326)\n assert test_da.encoding[\"grid_mapping\"] == \"spatial_ref\"\n assert test_da.rio.crs.to_epsg() == 4326\n assert \"spatial_ref\" in test_da.spatial_ref.attrs\n assert \"crs_wkt\" in test_da.spatial_ref.attrs\n assert \"grid_mapping_name\" not in test_da.spatial_ref.attrs\n\n\ndef test_write_crs__missing_geospatial_dims():\n test_da = xarray.DataArray(\n [1],\n name=\"data\",\n dims=(\"time\",),\n coords={\"time\": [1]},\n )\n assert test_da.copy().rio.write_crs(3857).rio.crs.to_epsg() == 3857\n assert test_da.to_dataset().rio.write_crs(3857).rio.crs.to_epsg() == 3857\n\n\ndef test_read_crs_cf():\n test_da = xarray.DataArray(1)\n test_da = test_da.rio.write_crs(4326)\n assert test_da.encoding[\"grid_mapping\"] == \"spatial_ref\"\n attrs = test_da.spatial_ref.attrs\n attrs.pop(\"spatial_ref\")\n attrs.pop(\"crs_wkt\")\n assert test_da.rio.crs.is_geographic\n\n\ndef test_get_crs_dataset__nonstandard_grid_mapping():\n test_ds = xarray.Dataset()\n test_ds = test_ds.rio.write_crs(4326, grid_mapping_name=\"frank\")\n assert test_ds.encoding[\"grid_mapping\"] == \"frank\"\n assert test_ds.rio.crs.to_epsg() == 4326\n\n\ndef test_get_crs_dataset__missing_grid_mapping_default():\n test_ds = xarray.open_dataset(os.path.join(TEST_INPUT_DATA_DIR, \"test_find_crs.nc\"))\n assert test_ds.rio.crs.to_epsg() == 32614\n\n\ndef test_nodata_setter():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n assert test_da.rio.nodata is None\n out_ds = test_da.rio.set_nodata(-1)\n assert out_ds.rio.nodata == -1\n\n\ndef test_nodata_setter__copy():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n assert test_da.rio.nodata is None\n out_ds = test_da.rio.set_nodata(-1, inplace=False)\n assert test_da.rio.nodata is None\n assert out_ds.rio.nodata == -1\n\n\ndef test_write_nodata__array__copy():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n assert test_da.rio.nodata is None\n out_da = test_da.rio.write_nodata(-1)\n assert test_da.rio.nodata is None\n assert out_da.attrs[\"_FillValue\"] == -1\n assert out_da.rio.nodata == -1\n out_da.rio._nodata = None\n assert out_da.rio.nodata == -1\n test_da.rio._nodata = None\n assert test_da.rio.nodata is None\n assert \"_FillValue\" not in test_da.attrs\n\n\ndef test_write_nodata__array__inplace():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n assert test_da.rio.nodata is None\n out_da = test_da.rio.write_nodata(-1, inplace=True)\n assert \"_FillValue\" in test_da.attrs\n assert out_da.attrs[\"_FillValue\"] == test_da.attrs[\"_FillValue\"]\n test_da.rio._nodata = None\n assert test_da.rio.nodata == -1\n assert out_da.attrs == test_da.attrs\n out_da.rio._nodata = None\n assert out_da.rio.nodata == -1\n\n\ndef test_write_nodata__missing():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n test_da.rio.write_nodata(None)\n assert not test_da.attrs\n assert not test_da.encoding\n\n\ndef test_write_nodata__remove():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n test_nd = test_da.rio.write_nodata(-1)\n assert not test_da.attrs\n assert test_nd.attrs[\"_FillValue\"] == -1\n test_nd.rio.write_nodata(None, inplace=True)\n assert not test_nd.attrs\n\n\ndef test_write_nodata__encoded():\n test_da = xarray.DataArray(\n numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n test_nd = test_da.rio.write_nodata(-1, inplace=True)\n assert test_nd.attrs[\"_FillValue\"] == -1\n test_nd = test_da.rio.write_nodata(-1, encoded=True, inplace=True)\n assert not test_nd.attrs\n assert test_nd.encoding[\"_FillValue\"] == -1\n assert test_nd.rio.encoded_nodata == -1\n assert numpy.isnan(test_nd.rio.nodata)\n test_nd.rio.write_nodata(None, encoded=True, inplace=True)\n assert not test_nd.attrs\n assert not test_nd.encoding\n assert test_nd.rio.encoded_nodata is None\n assert test_nd.rio.nodata is None\n\n\[email protected](\"nodata\", [-1.1, \"-1.1\"])\ndef test_write_nodata__different_dtype(nodata):\n test_da = xarray.DataArray(\n numpy.zeros((5, 5), dtype=int),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n with pytest.warns(\n UserWarning,\n match=(\n r\"The nodata value \\(-1.1\\) has been automatically changed to \"\n r\"\\(-1\\) to match the dtype of the data.\"\n ),\n ):\n test_nd = test_da.rio.write_nodata(nodata)\n assert not test_da.attrs\n assert test_nd.attrs[\"_FillValue\"] == -1\n assert test_nd.rio.nodata == -1\n\n\[email protected](\"nodata\", [-1.1, \"-1.1\"])\ndef test_nodata_reader__different_dtype(nodata):\n test_da = xarray.DataArray(\n numpy.zeros((5, 5), dtype=numpy.uint8),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n attrs={\"_FillValue\": nodata},\n )\n assert test_da.attrs[\"_FillValue\"] == nodata\n with pytest.warns(\n UserWarning,\n match=(\n r\"The nodata value \\(-1.1\\) has been automatically changed to \"\n r\"\\(255\\) to match the dtype of the data.\"\n ),\n ):\n assert test_da.rio.nodata == 255\n\n\ndef test_isel_window():\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\")\n ) as mda:\n assert (\n mda.rio.isel_window(Window.from_slices(slice(9, 12), slice(10, 12)))\n == mda.isel(x=slice(10, 12), y=slice(9, 12))\n ).all()\n\n\ndef test_isel_window_wpad():\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"MODIS_ARRAY.nc\")\n ) as mda:\n w1 = Window.from_slices(\n slice(-5, 10), slice(-5, 10), height=15, width=15, boundless=True\n )\n wb1 = rasterio.windows.bounds(w1, mda.rio.transform(recalc=True))\n res1 = mda.rio.isel_window(w1, pad=True)\n exp1 = mda.rio.pad_box(*wb1).isel(x=slice(0, 15), y=slice(0, 15))\n assert (res1 == exp1).all()\n w2 = Window.from_slices(\n slice(195, 210), slice(195, 210), height=15, width=15, boundless=True\n )\n wb2 = rasterio.windows.bounds(w2, mda.rio.transform(recalc=True))\n res2 = mda.rio.isel_window(w2, pad=True)\n exp2 = mda.rio.pad_box(*wb2).isel(x=slice(195, 210), y=slice(195, 210))\n assert (res2 == exp2).all()\n\n\ndef test_write_pyproj_crs_dataset():\n test_ds = xarray.Dataset()\n test_ds = test_ds.rio.write_crs(pCRS(4326))\n assert test_ds.encoding[\"grid_mapping\"] == \"spatial_ref\"\n assert test_ds.rio.crs.to_epsg() == 4326\n\n\ndef test_nonstandard_dims_clip__dataset():\n with open(os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim_geom.json\")) as ndj:\n geom = json.load(ndj)\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n clipped = xds.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\").rio.clip([geom])\n assert clipped.rio.width == 6\n assert clipped.rio.height == 5\n\n\ndef test_nonstandard_dims_clip__array():\n with open(os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim_geom.json\")) as ndj:\n geom = json.load(ndj)\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n clipped = xds.analysed_sst.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).rio.clip([geom])\n assert clipped.rio.width == 6\n assert clipped.rio.height == 5\n\n\ndef test_nonstandard_dims_clip_box__dataset():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n clipped = xds.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\").rio.clip_box(\n -70.51367964678269,\n -23.780199727400767,\n -70.44589567737998,\n -23.71896017814794,\n )\n assert clipped.rio.width == 7\n assert clipped.rio.height == 7\n\n\ndef test_nonstandard_dims_clip_box_array():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n clipped = xds.analysed_sst.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).rio.clip_box(\n -70.51367964678269,\n -23.780199727400767,\n -70.44589567737998,\n -23.71896017814794,\n )\n assert clipped.rio.width == 7\n assert clipped.rio.height == 7\n\n\ndef test_nonstandard_dims_slice_xy_array():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n clipped = xds.analysed_sst.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).rio.slice_xy(\n -70.51367964678269,\n -23.780199727400767,\n -70.44589567737998,\n -23.71896017814794,\n )\n assert clipped.rio.width == 7\n assert clipped.rio.height == 7\n\n\ndef test_nonstandard_dims_reproject__dataset():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n xds = xds.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\")\n reprojected = xds.rio.reproject(\"epsg:3857\")\n assert reprojected.rio.width == 11\n assert reprojected.rio.height == 11\n assert reprojected.rio.crs.to_epsg() == 3857\n\n\ndef test_nonstandard_dims_reproject__array():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n reprojected = xds.analysed_sst.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).rio.reproject(\"epsg:3857\")\n assert reprojected.rio.width == 11\n assert reprojected.rio.height == 11\n assert reprojected.rio.crs.to_epsg() == 3857\n\n\ndef test_nonstandard_dims_interpolate_na__dataset():\n pytest.importorskip(\"scipy\")\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n reprojected = xds.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).rio.interpolate_na()\n assert reprojected.rio.width == 11\n assert reprojected.rio.height == 11\n\n\ndef test_nonstandard_dims_interpolate_na__array():\n pytest.importorskip(\"scipy\")\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n reprojected = xds.analysed_sst.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).rio.interpolate_na()\n assert reprojected.rio.width == 11\n assert reprojected.rio.height == 11\n\n\ndef test_nonstandard_dims_write_nodata__array():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n reprojected = xds.analysed_sst.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).rio.write_nodata(-999)\n assert reprojected.rio.width == 11\n assert reprojected.rio.height == 11\n assert reprojected.rio.nodata == -999\n\n\ndef test_nonstandard_dims_isel_window():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n reprojected = xds.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).rio.isel_window(Window.from_slices(slice(4), slice(5)))\n assert reprojected.rio.width == 5\n assert reprojected.rio.height == 4\n\n\ndef test_nonstandard_dims_error_msg():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n with pytest.raises(MissingSpatialDimensionError, match=\"x dimension not found\"):\n xds.rio.width\n with pytest.raises(\n MissingSpatialDimensionError, match=\"Data variable: analysed_sst\"\n ):\n xds.analysed_sst.rio.width\n\n\ndef test_nonstandard_dims_find_dims():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n assert xds.rio.x_dim == \"lon\"\n assert xds.rio.y_dim == \"lat\"\n\n\ndef test_nonstandard_dims_find_dims__standard_name():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {\"standard_name\": \"longitude\"}\n xds.coords[\"lat\"].attrs = {\"standard_name\": \"latitude\"}\n assert xds.rio.x_dim == \"lon\"\n assert xds.rio.y_dim == \"lat\"\n\n\ndef test_nonstandard_dims_find_dims__standard_name__projected():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {\"standard_name\": \"projection_x_coordinate\"}\n xds.coords[\"lat\"].attrs = {\"standard_name\": \"projection_y_coordinate\"}\n assert xds.rio.x_dim == \"lon\"\n assert xds.rio.y_dim == \"lat\"\n\n\ndef test_nonstandard_dims_find_dims__axis():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds.coords[\"lon\"].attrs = {\"axis\": \"X\"}\n xds.coords[\"lat\"].attrs = {\"axis\": \"Y\"}\n assert xds.rio.x_dim == \"lon\"\n assert xds.rio.y_dim == \"lat\"\n\n\ndef test_nonstandard_dims_to_raster__dataset(tmp_path):\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\"), decode_coords=\"all\"\n ) as xds:\n xds.attrs.pop(\"grid_mapping\")\n xds.coords[\"lon\"].attrs = {}\n xds.coords[\"lat\"].attrs = {}\n xds.squeeze().rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\").rio.to_raster(\n tmp_path / \"test.tif\"\n )\n\n\ndef test_missing_crs_error_msg():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xds = xds.drop_vars(\"spatial_ref\")\n xds.attrs.pop(\"grid_mapping\")\n with pytest.raises(MissingCRS, match=\"Data variable: analysed_sst\"):\n xds.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\").rio.reproject(\n \"EPSG:4326\"\n )\n with pytest.raises(MissingCRS, match=\"Data variable: analysed_sst\"):\n xds.rio.set_spatial_dims(\n x_dim=\"lon\", y_dim=\"lat\"\n ).analysed_sst.rio.reproject(\"EPSG:4326\")\n\n\ndef test_missing_transform_bounds():\n xds = rioxarray.open_rasterio(\n os.path.join(TEST_COMPARE_DATA_DIR, \"small_dem_3m_merged.tif\"),\n parse_coordinates=False,\n )\n xds.coords[\"spatial_ref\"].attrs.pop(\"GeoTransform\")\n with pytest.raises(DimensionMissingCoordinateError):\n xds.rio.bounds()\n\n\ndef test_missing_transform_resolution():\n xds = rioxarray.open_rasterio(\n os.path.join(TEST_COMPARE_DATA_DIR, \"small_dem_3m_merged.tif\"),\n parse_coordinates=False,\n )\n xds.coords[\"spatial_ref\"].attrs.pop(\"GeoTransform\")\n with pytest.raises(DimensionMissingCoordinateError):\n xds.rio.resolution()\n\n\ndef test_shape_order():\n rds = rioxarray.open_rasterio(os.path.join(TEST_INPUT_DATA_DIR, \"tmmx_20190121.nc\"))\n assert rds.air_temperature.rio.shape == (585, 1386)\n\n\ndef test_write_transform__from_read(tmp_path):\n xds = rioxarray.open_rasterio(\n os.path.join(TEST_COMPARE_DATA_DIR, \"small_dem_3m_merged.tif\"),\n parse_coordinates=False,\n )\n out_file = tmp_path / \"test_geotransform.nc\"\n xds.to_netcdf(out_file)\n xds2 = rioxarray.open_rasterio(out_file, parse_coordinates=False)\n assert_almost_equal(tuple(xds2.rio.transform()), tuple(xds.rio.transform()))\n assert xds.spatial_ref.GeoTransform == xds2.spatial_ref.GeoTransform\n\n\ndef test_write_transform():\n test_affine = Affine.from_gdal(425047, 3.0, 0.0, 4615780, 0.0, -3.0)\n ds = xarray.Dataset()\n ds.rio.write_transform(test_affine, inplace=True)\n assert ds.spatial_ref.GeoTransform == \"425047.0 3.0 0.0 4615780.0 0.0 -3.0\"\n assert ds.rio._cached_transform() == test_affine\n assert ds.rio.transform() == test_affine\n assert ds.rio.grid_mapping == \"spatial_ref\"\n da = xarray.DataArray(1)\n da.rio.write_transform(test_affine, inplace=True)\n assert da.rio._cached_transform() == test_affine\n assert da.rio.transform() == test_affine\n assert da.spatial_ref.GeoTransform == \"425047.0 3.0 0.0 4615780.0 0.0 -3.0\"\n assert da.rio.grid_mapping == \"spatial_ref\"\n\n\ndef test_write_read_transform__non_rectilinear():\n test_affine = Affine.from_gdal(305827, 14, 9, 5223236, 9, -14)\n ds = xarray.Dataset()\n ds.rio.write_transform(test_affine, inplace=True)\n assert ds.spatial_ref.GeoTransform == \"305827.0 14.0 9.0 5223236.0 9.0 -14.0\"\n assert ds.rio._cached_transform() == test_affine\n assert ds.rio.transform() == test_affine\n assert ds.rio.grid_mapping == \"spatial_ref\"\n da = xarray.DataArray(1)\n da.rio.write_transform(test_affine, inplace=True)\n assert ds.rio._cached_transform() == test_affine\n assert ds.rio.transform() == test_affine\n assert da.spatial_ref.GeoTransform == \"305827.0 14.0 9.0 5223236.0 9.0 -14.0\"\n assert da.rio.grid_mapping == \"spatial_ref\"\n\n\ndef test_write_read_transform__non_rectilinear__rotation__warning():\n test_affine = Affine.from_gdal(305827, 14, 9, 5223236, 9, -14)\n ds = xarray.Dataset()\n ds.rio.write_transform(test_affine, inplace=True)\n with pytest.warns(\n UserWarning, match=r\"Transform that is non\\-rectilinear or with rotation found\"\n ):\n assert ds.rio.transform(recalc=True) == test_affine\n da = xarray.DataArray(1)\n da.rio.write_transform(test_affine, inplace=True)\n with pytest.warns(\n UserWarning, match=r\"Transform that is non\\-rectilinear or with rotation found\"\n ):\n assert ds.rio.transform(recalc=True) == test_affine\n\n\ndef test_missing_transform():\n ds = xarray.Dataset()\n assert ds.rio.transform() == Affine.identity()\n da = xarray.DataArray(1)\n assert da.rio.transform() == Affine.identity()\n\n\ndef test_nonstandard_dims_write_coordinate_system__geographic():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xda = xds.analysed_sst.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\")\n xda.coords[xda.rio.x_dim].attrs = {}\n xda.coords[xda.rio.y_dim].attrs = {}\n cs_array = xda.rio.write_crs(\"EPSG:4326\").rio.write_coordinate_system()\n assert cs_array.coords[cs_array.rio.x_dim].attrs == {\n \"long_name\": \"longitude\",\n \"standard_name\": \"longitude\",\n \"units\": \"degrees_east\",\n \"axis\": \"X\",\n }\n assert cs_array.coords[cs_array.rio.y_dim].attrs == {\n \"long_name\": \"latitude\",\n \"standard_name\": \"latitude\",\n \"units\": \"degrees_north\",\n \"axis\": \"Y\",\n }\n\n\ndef test_nonstandard_dims_write_coordinate_system__geographic__preserve_attrs():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n cs_array = (\n xds.analysed_sst.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\")\n .rio.write_crs(\"EPSG:4326\")\n .rio.write_coordinate_system()\n )\n assert cs_array.coords[cs_array.rio.x_dim].attrs == {\n \"long_name\": \"longitude\",\n \"standard_name\": \"longitude\",\n \"units\": \"degrees_east\",\n \"axis\": \"X\",\n \"comment\": \"geolocations inherited from the input data without correction\",\n \"valid_max\": 180.0,\n \"valid_min\": -180.0,\n }\n assert cs_array.coords[cs_array.rio.y_dim].attrs == {\n \"long_name\": \"latitude\",\n \"standard_name\": \"latitude\",\n \"units\": \"degrees_north\",\n \"axis\": \"Y\",\n \"comment\": \"geolocations inherited from the input data without correction\",\n \"valid_max\": 90.0,\n \"valid_min\": -90.0,\n }\n\n\ndef test_nonstandard_dims_write_coordinate_system__projected_ft():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xda = xds.analysed_sst.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\")\n xda.coords[xda.rio.x_dim].attrs = {}\n xda.coords[xda.rio.y_dim].attrs = {}\n cs_array = xda.rio.write_crs(\"EPSG:3418\").rio.write_coordinate_system()\n assert cs_array.coords[cs_array.rio.x_dim].attrs == {\n \"axis\": \"X\",\n \"long_name\": \"x coordinate of projection\",\n \"standard_name\": \"projection_x_coordinate\",\n \"units\": \"0.30480060960121924 metre\",\n }\n assert cs_array.coords[cs_array.rio.y_dim].attrs == {\n \"axis\": \"Y\",\n \"long_name\": \"y coordinate of projection\",\n \"standard_name\": \"projection_y_coordinate\",\n \"units\": \"0.30480060960121924 metre\",\n }\n\n\ndef test_nonstandard_dims_write_coordinate_system__no_crs():\n with xarray.open_dataset(\n os.path.join(TEST_INPUT_DATA_DIR, \"nonstandard_dim.nc\")\n ) as xds:\n xda = xds.analysed_sst.rio.set_spatial_dims(x_dim=\"lon\", y_dim=\"lat\")\n xda.coords[xda.rio.x_dim].attrs = {}\n xda.coords[xda.rio.y_dim].attrs = {}\n xda.coords[\"spatial_ref\"].attrs = {}\n cs_array = xda.rio.write_coordinate_system()\n assert cs_array.coords[cs_array.rio.x_dim].attrs == {\n \"axis\": \"X\",\n }\n assert cs_array.coords[cs_array.rio.y_dim].attrs == {\n \"axis\": \"Y\",\n }\n\n\[email protected](\n \"open_func\",\n [partial(xarray.open_dataset, mask_and_scale=False), rioxarray.open_rasterio],\n)\ndef test_grid_mapping__pre_existing(open_func):\n with open_func(os.path.join(TEST_INPUT_DATA_DIR, \"tmmx_20190121.nc\")) as xdi:\n assert xdi.rio.grid_mapping == \"crs\"\n assert xdi.air_temperature.rio.grid_mapping == \"crs\"\n\n\[email protected](\n \"open_func\",\n [partial(xarray.open_dataset, mask_and_scale=False), rioxarray.open_rasterio],\n)\ndef test_grid_mapping__change(open_func):\n with open_func(os.path.join(TEST_INPUT_DATA_DIR, \"tmmx_20190121.nc\")) as xdi:\n # part 1: check changing the data var grid mapping\n xdi[\"dummy\"] = xdi.air_temperature.copy()\n xdi.dummy.rio.write_grid_mapping(\"different_crs\", inplace=True)\n assert xdi.air_temperature.rio.grid_mapping == \"crs\"\n assert xdi.dummy.rio.grid_mapping == \"different_crs\"\n # part 2: ensure error raised when multiple exist\n with pytest.raises(RioXarrayError, match=\"Multiple grid mappings exist.\"):\n xdi.rio.grid_mapping\n # part 3: ensure that writing the grid mapping on the dataset fixes it\n xdi.rio.write_grid_mapping(\"final_crs\", inplace=True)\n assert xdi.air_temperature.rio.grid_mapping == \"final_crs\"\n assert xdi.dummy.rio.grid_mapping == \"final_crs\"\n assert xdi.rio.grid_mapping == \"final_crs\"\n\n\ndef test_grid_mapping_default():\n xarray.Dataset().rio.grid_mapping == \"spatial_ref\"\n xarray.DataArray().rio.grid_mapping == \"spatial_ref\"\n\n\ndef test_estimate_utm_crs():\n xds = rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"cog.tif\"),\n )\n if PYPROJ_LT_3:\n with pytest.raises(RuntimeError, match=r\"pyproj 3\\+ required\"):\n xds.rio.estimate_utm_crs()\n else:\n assert xds.rio.estimate_utm_crs().to_epsg() in (32618, 32655)\n assert xds.rio.reproject(\"EPSG:4326\").rio.estimate_utm_crs() == CRS.from_epsg(\n 32618\n )\n assert xds.rio.estimate_utm_crs(\"WGS 72\") in (32218, 32255)\n\n\[email protected](PYPROJ_LT_3, reason=\"pyproj 3+ required\")\ndef test_estimate_utm_crs__missing_crs():\n with pytest.raises(RuntimeError, match=r\"crs must be set to estimate UTM CRS\"):\n xarray.Dataset().rio.estimate_utm_crs(\"NAD83\")\n\n\ndef test_estimate_utm_crs__out_of_bounds():\n xds = xarray.DataArray(\n numpy.zeros((2, 2)),\n dims=(\"latitude\", \"longitude\"),\n coords={\n \"latitude\": [-90.0, -90.0],\n \"longitude\": [-5.0, 5.0],\n },\n )\n xds.rio.write_crs(\"EPSG:4326\", inplace=True)\n if PYPROJ_LT_3:\n with pytest.raises(RuntimeError, match=r\"pyproj 3\\+ required\"):\n xds.rio.estimate_utm_crs()\n else:\n with pytest.raises(RuntimeError, match=r\"Unable to determine UTM CRS\"):\n xds.rio.estimate_utm_crs()\n\n\ndef test_interpolate_na_missing_nodata():\n pytest.importorskip(\"scipy\")\n test_da = xarray.DataArray(\n name=\"missing\",\n data=numpy.zeros((5, 5)),\n dims=(\"y\", \"x\"),\n coords={\"y\": numpy.arange(1, 6), \"x\": numpy.arange(2, 7)},\n )\n match = (\n r\"nodata not found\\. Please set the nodata with \"\n r\"'rio\\.write_nodata\\(\\)'\\. Data variable: missing\"\n )\n with pytest.raises(RioXarrayError, match=match):\n test_da.rio.interpolate_na()\n with pytest.raises(RioXarrayError, match=match):\n test_da.to_dataset().rio.interpolate_na()\n" ]
[ [ "numpy.random.random", "numpy.linspace", "numpy.isnan", "numpy.arange", "numpy.testing.assert_array_equal", "numpy.testing.assert_almost_equal", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amitkarn3/PythnSyft
[ "8eaa637e1ca54c963281e847556cb14b4a76b46b", "8eaa637e1ca54c963281e847556cb14b4a76b46b" ]
[ "test/test_serde.py", "syft/frameworks/torch/tensors/interpreters/precision.py" ]
[ "\"\"\"\nThis file tests the ability for serde.py to convert complex types into\nsimple python types which are serializable by standard serialization tools.\nFor more on how/why this works, see serde.py directly.\n\"\"\"\nimport warnings\n\nfrom syft.serde import (\n _simplify,\n apply_lz4_compression,\n apply_no_compression,\n apply_zstd_compression,\n)\nfrom syft.serde import serialize\nfrom syft.serde import deserialize\nfrom syft.serde import _compress\nfrom syft.serde import _decompress\nfrom syft.serde import LZ4\nfrom syft.serde import NO_COMPRESSION\nfrom syft.serde import ZSTD\n\n\nimport syft\nfrom syft.exceptions import CompressionNotFoundException\nfrom syft.frameworks.torch.tensors.interpreters import PointerTensor\n\nimport msgpack\nimport numpy\nimport pytest\nimport torch\nfrom torch import Tensor\n\n\ndef test_tuple_simplify():\n \"\"\"This tests our ability to simplify tuple types.\n\n This test is pretty simple since tuples just serialize to\n themselves, with a tuple wrapper with the correct ID (1)\n for tuples so that the detailer knows how to interpret it.\"\"\"\n\n input = (\"hello\", \"world\")\n target = (2, (\"hello\", \"world\"))\n assert _simplify(input) == target\n\n\ndef test_list_simplify():\n \"\"\"This tests our ability to simplify list types.\n\n This test is pretty simple since lists just serialize to\n themselves, with a tuple wrapper with the correct ID (2)\n for lists so that the detailer knows how to interpret it.\"\"\"\n\n input = [\"hello\", \"world\"]\n target = (3, [\"hello\", \"world\"])\n assert _simplify(input) == target\n\n\ndef test_set_simplify():\n \"\"\"This tests our ability to simplify set objects.\n\n This test is pretty simple since sets just serialize to\n lists, with a tuple wrapper with the correct ID (3)\n for sets so that the detailer knows how to interpret it.\"\"\"\n\n input = set([\"hello\", \"world\"])\n target = (4, [\"hello\", \"world\"])\n assert _simplify(input)[0] == target[0]\n assert set(_simplify(input)[1]) == set(target[1])\n\n\ndef test_float_simplify():\n \"\"\"This tests our ability to simplify float objects.\n\n This test is pretty simple since floats just serialize to\n themselves, with no tuple/id necessary.\"\"\"\n\n input = 5.6\n target = 5.6\n assert _simplify(input) == target\n\n\ndef test_int_simplify():\n \"\"\"This tests our ability to simplify int objects.\n\n This test is pretty simple since ints just serialize to\n themselves, with no tuple/id necessary.\"\"\"\n\n input = 5\n target = 5\n assert _simplify(input) == target\n\n\ndef test_string_simplify():\n \"\"\"This tests our ability to simplify string objects.\n\n This test is pretty simple since strings just serialize to\n themselves, with no tuple/id necessary.\"\"\"\n\n input = \"hello\"\n target = \"hello\"\n assert _simplify(input) == target\n\n\ndef test_dict_simplify():\n \"\"\"This tests our ability to simplify dict objects.\n\n This test is pretty simple since dicts just serialize to\n themselves, with a tuple wrapper with the correct ID (4)\n for dicts so that the detailer knows how to interpret it.\"\"\"\n\n input = {\"hello\": \"world\"}\n target = (5, {\"hello\": \"world\"})\n assert _simplify(input) == target\n\n\ndef test_range_simplify():\n \"\"\"This tests our ability to simplify range objects.\n\n This test is pretty simple since range objs just serialize to\n themselves, with a tuple wrapper with the correct ID (5)\n for dicts so that the detailer knows how to interpret it.\"\"\"\n\n input = range(1, 3, 4)\n target = (6, (1, 3, 4))\n assert _simplify(input) == target\n\n\ndef test_torch_tensor_simplify():\n \"\"\"This tests our ability to simplify torch.Tensor objects\n\n At the time of writing, tensors simplify to a tuple where the\n first value in the tuple is the tensor's ID and the second\n value is a serialized version of the Tensor (serialized\n by PyTorch's torch.save method)\n \"\"\"\n\n # create a tensor\n input = Tensor(numpy.random.random((100, 100)))\n\n # simplify the tnesor\n output = _simplify(input)\n\n # make sure outer type is correct\n assert type(output) == tuple\n\n # make sure the object type ID is correct\n # (0 for torch.Tensor)\n assert output[0] == 0\n\n # make sure inner type is correct\n assert type(output[1]) == tuple\n\n # make sure ID is correctly encoded\n assert output[1][0] == input.id\n\n # make sure tensor data type is correct\n assert type(output[1][1]) == bytes\n\n\ndef test_ndarray_simplify():\n \"\"\"This tests our ability to simplify numpy.array objects\n\n At the time of writing, arrays simplify to an object inside\n of a tuple which specifies the ID for the np.array type (6) so\n that the detailer knows to turn the simplifed form to a np.array\n \"\"\"\n\n input = numpy.random.random((100, 100))\n output = _simplify(input)\n\n # make sure simplified type ID is correct\n assert output[0] == 7\n\n # make sure serialized form is correct\n assert type(output[1][0]) == bytes\n assert output[1][1] == input.shape\n assert output[1][2] == input.dtype.name\n\n\ndef test_ellipsis_simplify():\n \"\"\"Make sure ellipsis simplifies correctly.\"\"\"\n\n # the id indicating an ellipsis is here\n assert _simplify(Ellipsis)[0] == 9\n\n # the simplified ellipsis (empty object)\n assert _simplify(Ellipsis)[1] == b\"\"\n\n\ndef test_torch_device_simplify():\n \"\"\"Test the simplification of torch.device\"\"\"\n device = torch.device(\"cpu\")\n\n # the id indicating an torch.device is here\n assert _simplify(device)[0] == 10\n\n # the simplified torch.device\n assert _simplify(device)[1] == \"cpu\"\n\n\ndef test_pointer_tensor_simplify():\n \"\"\"Test the simplification of PointerTensor\"\"\"\n\n alice = syft.VirtualWorker(syft.torch.hook, id=\"alice\")\n input_tensor = PointerTensor(id=1000, location=alice, owner=alice)\n\n output = _simplify(input_tensor)\n\n assert output[1][0] == input_tensor.id\n assert output[1][1] == input_tensor.id_at_location\n assert output[1][2] == input_tensor.owner.id\n\n\[email protected](\"compress\", [True, False])\ndef test_torch_Tensor(compress):\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n t = Tensor(numpy.random.random((100, 100)))\n t_serialized = serialize(t)\n t_serialized_deserialized = deserialize(t_serialized)\n assert (t == t_serialized_deserialized).all()\n\n\[email protected](\"compress\", [True, False])\ndef test_torch_Tensor_convenience(compress):\n \"\"\"This test evaluates torch.Tensor.serialize()\n\n As opposed to using syft.serde.serialize(), torch objects\n have a convenience function which lets you call .serialize()\n directly on the tensor itself. This tests to makes sure it\n works correctly.\"\"\"\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n t = Tensor(numpy.random.random((100, 100)))\n t_serialized = t.serialize()\n t_serialized_deserialized = deserialize(t_serialized)\n assert (t == t_serialized_deserialized).all()\n\n\[email protected](\"compress\", [True, False])\ndef test_tuple(compress):\n # Test with a simple datatype\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n tuple = (1, 2)\n tuple_serialized = serialize(tuple)\n tuple_serialized_deserialized = deserialize(tuple_serialized)\n assert tuple == tuple_serialized_deserialized\n\n # Test with a complex data structure\n tensor_one = Tensor(numpy.random.random((100, 100)))\n tensor_two = Tensor(numpy.random.random((100, 100)))\n tuple = (tensor_one, tensor_two)\n tuple_serialized = serialize(tuple)\n tuple_serialized_deserialized = deserialize(tuple_serialized)\n # `assert tuple_serialized_deserialized == tuple` does not work, therefore it's split\n # into 3 assertions\n assert type(tuple_serialized_deserialized) == type(tuple)\n assert (tuple_serialized_deserialized[0] == tensor_one).all()\n assert (tuple_serialized_deserialized[1] == tensor_two).all()\n\n\[email protected](\"compress\", [True, False])\ndef test_bytearray(compress):\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n bytearr = bytearray(\"This is a teststring\", \"utf-8\")\n bytearr_serialized = serialize(bytearr)\n bytearr_serialized_desirialized = deserialize(bytearr_serialized)\n assert bytearr == bytearr_serialized_desirialized\n\n bytearr = bytearray(numpy.random.random((100, 100)))\n bytearr_serialized = serialize(bytearr)\n bytearr_serialized_desirialized = deserialize(bytearr_serialized)\n assert bytearr == bytearr_serialized_desirialized\n\n\[email protected](\"compress\", [True, False])\ndef test_ndarray_serde(compress):\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n arr = numpy.random.random((100, 100))\n arr_serialized = serialize(arr)\n\n arr_serialized_deserialized = deserialize(arr_serialized)\n\n assert numpy.array_equal(arr, arr_serialized_deserialized)\n\n\[email protected](\"compress_scheme\", [LZ4, ZSTD, NO_COMPRESSION])\ndef test_compress_decompress(compress_scheme):\n if compress_scheme == LZ4:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n elif compress_scheme == ZSTD:\n syft.serde._apply_compress_scheme = apply_zstd_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n original = msgpack.dumps([1, 2, 3])\n compressed = _compress(original)\n decompressed = _decompress(compressed)\n assert type(compressed) == bytes\n assert original == decompressed\n\n\[email protected](\"compress_scheme\", [LZ4, ZSTD, NO_COMPRESSION])\ndef test_compressed_serde(compress_scheme):\n if compress_scheme == LZ4:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n elif compress_scheme == ZSTD:\n syft.serde._apply_compress_scheme = apply_zstd_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n arr = numpy.random.random((100, 100))\n arr_serialized = serialize(arr)\n\n arr_serialized_deserialized = deserialize(arr_serialized)\n assert numpy.array_equal(arr, arr_serialized_deserialized)\n\n\[email protected](\"compress_scheme\", [1, 2, 3, 100])\ndef test_invalid_decompression_scheme(compress_scheme):\n # using numpy.ones because numpy.random.random is not compressed.\n arr = numpy.ones((100, 100))\n\n def some_other_compression_scheme(decompressed_input):\n # Simulate compression by removing some values\n return decompressed_input[:10], compress_scheme\n\n syft.serde._apply_compress_scheme = some_other_compression_scheme\n arr_serialized = serialize(arr)\n with pytest.raises(CompressionNotFoundException):\n _ = deserialize(arr_serialized)\n\n\[email protected](\"compress\", [True, False])\ndef test_dict(compress):\n # Test with integers\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n _dict = {1: 1, 2: 2, 3: 3}\n dict_serialized = serialize(_dict)\n dict_serialized_deserialized = deserialize(dict_serialized)\n assert _dict == dict_serialized_deserialized\n\n # Test with strings\n _dict = {\"one\": 1, \"two\": 2, \"three\": 3}\n dict_serialized = serialize(_dict)\n dict_serialized_deserialized = deserialize(dict_serialized)\n assert _dict == dict_serialized_deserialized\n\n # Test with a complex data structure\n tensor_one = Tensor(numpy.random.random((100, 100)))\n tensor_two = Tensor(numpy.random.random((100, 100)))\n _dict = {0: tensor_one, 1: tensor_two}\n dict_serialized = serialize(_dict)\n dict_serialized_deserialized = deserialize(dict_serialized)\n # `assert dict_serialized_deserialized == _dict` does not work, therefore it's split\n # into 3 assertions\n assert type(dict_serialized_deserialized) == type(_dict)\n assert (dict_serialized_deserialized[0] == tensor_one).all()\n assert (dict_serialized_deserialized[1] == tensor_two).all()\n\n\[email protected](\"compress\", [True, False])\ndef test_range_serde(compress):\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n _range = range(1, 2, 3)\n\n range_serialized = serialize(_range)\n\n range_serialized_deserialized = deserialize(range_serialized)\n\n assert _range == range_serialized_deserialized\n\n\[email protected](\"compress\", [True, False])\ndef test_list(compress):\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n # Test with integers\n _list = [1, 2]\n list_serialized = serialize(_list)\n list_serialized_deserialized = deserialize(list_serialized)\n assert _list == list_serialized_deserialized\n\n # Test with strings\n _list = [\"hello\", \"world\"]\n list_serialized = serialize(_list)\n list_serialized_deserialized = deserialize(list_serialized)\n assert _list == list_serialized_deserialized\n\n # Test with a complex data structure\n tensor_one = Tensor(numpy.random.random((100, 100)))\n tensor_two = Tensor(numpy.random.random((100, 100)))\n _list = (tensor_one, tensor_two)\n list_serialized = serialize(_list)\n list_serialized_deserialized = deserialize(list_serialized)\n # `assert list_serialized_deserialized == _list` does not work, therefore it's split\n # into 3 assertions\n assert type(list_serialized_deserialized) == type(_list)\n assert (list_serialized_deserialized[0] == tensor_one).all()\n assert (list_serialized_deserialized[1] == tensor_two).all()\n\n\[email protected](\"compress\", [True, False])\ndef test_set(compress):\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n # Test with integers\n _set = set([1, 2])\n set_serialized = serialize(_set)\n set_serialized_deserialized = deserialize(set_serialized)\n assert _set == set_serialized_deserialized\n\n # Test with strings\n _set = set([\"hello\", \"world\"])\n set_serialized = serialize(_set)\n set_serialized_deserialized = deserialize(set_serialized)\n assert _set == set_serialized_deserialized\n\n # Test with a complex data structure\n tensor_one = Tensor(numpy.random.random((100, 100)))\n tensor_two = Tensor(numpy.random.random((100, 100)))\n _set = (tensor_one, tensor_two)\n set_serialized = serialize(_set)\n set_serialized_deserialized = deserialize(set_serialized)\n # `assert set_serialized_deserialized == _set` does not work, therefore it's split\n # into 3 assertions\n assert type(set_serialized_deserialized) == type(_set)\n assert (set_serialized_deserialized[0] == tensor_one).all()\n assert (set_serialized_deserialized[1] == tensor_two).all()\n\n\[email protected](\"compress\", [True, False])\ndef test_slice(compress):\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n s = slice(0, 100, 2)\n x = numpy.random.rand(100)\n s_serialized = serialize(s)\n s_serialized_deserialized = deserialize(s_serialized)\n\n assert type(s) == type(s_serialized_deserialized)\n assert (x[s] == x[s_serialized_deserialized]).all()\n\n s = slice(40, 50)\n x = numpy.random.rand(100)\n s_serialized = serialize(s)\n s_serialized_deserialized = deserialize(s_serialized)\n\n assert type(s) == type(s_serialized_deserialized)\n assert (x[s] == x[s_serialized_deserialized]).all()\n\n\[email protected](\"compress\", [True, False])\ndef test_float(compress):\n if compress:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n x = 0.5\n y = 1.5\n\n x_serialized = serialize(x)\n x_serialized_deserialized = deserialize(x_serialized)\n\n y_serialized = serialize(y)\n y_serialized_deserialized = deserialize(y_serialized)\n\n assert x_serialized_deserialized == x\n assert y_serialized_deserialized == y\n\n\ndef test_compressed_float():\n x = 0.5\n y = 1.5\n\n x_serialized = serialize(x)\n x_serialized_deserialized = deserialize(x_serialized)\n\n y_serialized = serialize(y)\n y_serialized_deserialized = deserialize(y_serialized)\n\n assert x_serialized_deserialized == x\n assert y_serialized_deserialized == y\n\n\[email protected](\n \"compress, compress_scheme\",\n [\n (True, LZ4),\n (False, LZ4),\n (True, ZSTD),\n (False, ZSTD),\n (True, NO_COMPRESSION),\n (False, NO_COMPRESSION),\n ],\n)\ndef test_hooked_tensor(compress, compress_scheme):\n if compress:\n if compress_scheme == LZ4:\n syft.serde._apply_compress_scheme = apply_lz4_compression\n elif compress_scheme == ZSTD:\n syft.serde._apply_compress_scheme = apply_zstd_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n else:\n syft.serde._apply_compress_scheme = apply_no_compression\n\n t = Tensor(numpy.random.random((100, 100)))\n t_serialized = serialize(t)\n t_serialized_deserialized = deserialize(t_serialized)\n assert (t == t_serialized_deserialized).all()\n\n\ndef test_PointerTensor(hook, workers):\n syft.serde._apply_compress_scheme = apply_no_compression\n t = PointerTensor(\n id=1000, location=workers[\"alice\"], owner=workers[\"alice\"], id_at_location=12345\n )\n t_serialized = serialize(t)\n t_serialized_deserialized = deserialize(t_serialized)\n print(f\"t.location - {t.location}\")\n print(f\"t_serialized_deserialized.location - {t_serialized_deserialized.location}\")\n assert t.id == t_serialized_deserialized.id\n assert t.location.id == t_serialized_deserialized.location.id\n assert t.id_at_location == t_serialized_deserialized.id_at_location\n\n\[email protected](\"id\", [1000, \"1000\"])\ndef test_pointer_tensor_detail(id):\n alice = syft.VirtualWorker(syft.torch.hook, id=id)\n x = torch.tensor([1, -1, 3, 4])\n x_ptr = x.send(alice)\n x_ptr = 2 * x_ptr\n x_back = x_ptr.get()\n assert (x_back == 2 * x).all()\n\n\ndef test_numpy_tensor_serde():\n syft.serde._serialize_tensor = syft.serde.numpy_tensor_serializer\n syft.serde._deserialize_tensor = syft.serde.numpy_tensor_deserializer\n\n tensor = torch.tensor(numpy.random.random((10, 10)), requires_grad=False)\n\n tensor_serialized = serialize(tensor)\n tensor_deserialized = deserialize(tensor_serialized)\n\n # Back to Pytorch serializer\n syft.serde._serialize_tensor = syft.serde.torch_tensor_serializer\n syft.serde._deserialize_tensor = syft.serde.torch_tensor_deserializer\n\n assert torch.eq(tensor_deserialized, tensor).all()\n\n\[email protected](\"compress\", [True, False])\ndef test_additive_sharing_tensor_serde(compress, workers):\n alice, bob, james = workers[\"alice\"], workers[\"bob\"], workers[\"james\"]\n\n x = torch.tensor([[3.1, 4.3]]).fix_prec().share(alice, bob, crypto_provider=james)\n\n additive_sharing_tensor = x.child.child.child\n data = syft.serde._simplify_additive_shared_tensor(additive_sharing_tensor)\n additive_sharing_tensor_reconstructed = syft.serde._detail_additive_shared_tensor(\n syft.hook.local_worker, data\n )\n\n assert additive_sharing_tensor_reconstructed.field == additive_sharing_tensor.field\n\n assert (\n additive_sharing_tensor_reconstructed.child.keys() == additive_sharing_tensor.child.keys()\n )\n", "import syft\nimport torch\nfrom syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor\nfrom syft.frameworks.torch.overload_torch import overloaded\n\n\nclass FixedPrecisionTensor(AbstractTensor):\n def __init__(\n self,\n owner=None,\n id=None,\n field: int = (2 ** 62) - 1,\n base: int = 10,\n precision_fractional: int = 3,\n kappa: int = 1,\n tags: set = None,\n description: str = None,\n ):\n \"\"\"Initializes a Fixed Precision tensor, which encodes all decimal point\n values using an underlying integer value.\n\n The FixedPrecision enables to manipulate floats over an interface which\n supports only integers, Such as _SPDZTensor.\n\n This is done by specifying a precision p and given a float x,\n multiply it with 10**p before rounding to an integer (hence you keep\n p decimals)\n\n Args:\n owner: An optional BaseWorker object to specify the worker on which\n the tensor is located.\n id: An optional string or integer id of the FixedPrecisionTensor.\n \"\"\"\n super().__init__(tags, description)\n\n self.owner = owner\n self.id = id\n self.child = None\n\n self.field = field\n self.base = base\n self.precision_fractional = precision_fractional\n self.kappa = kappa\n self.torch_max_value = torch.tensor([round(self.field / 2)])\n\n def get_class_attributes(self):\n \"\"\"\n Specify all the attributes need to build a wrapper correctly when returning a response,\n for example precision_fractional is important when wrapping the result of a method\n on a self which is a fixed precision tensor with a non default precision_fractional.\n \"\"\"\n return {\n \"field\": self.field,\n \"base\": self.base,\n \"precision_fractional\": self.precision_fractional,\n \"kappa\": self.kappa,\n }\n\n def fix_precision(self):\n \"\"\"This method encodes the .child object using fixed precision\"\"\"\n\n rational = self.child\n\n upscaled = (rational * self.base ** self.precision_fractional).long()\n field_element = upscaled % self.field\n field_element.owner = rational.owner\n\n # Handle neg values\n gate = field_element.gt(self.torch_max_value).long()\n neg_nums = (field_element - self.field) * gate\n pos_nums = field_element * (1 - gate)\n field_element = neg_nums + pos_nums\n\n self.child = field_element\n return self\n\n def float_precision(self):\n \"\"\"this method returns a new tensor which has the same values as this\n one, encoded with floating point precision\"\"\"\n\n value = self.child.long() % self.field\n\n gate = value.native_gt(self.torch_max_value).long()\n neg_nums = (value - self.field) * gate\n pos_nums = value * (1 - gate)\n result = (neg_nums + pos_nums).float() / (self.base ** self.precision_fractional)\n\n return result\n\n def truncate(self, precision_fractional):\n truncation = self.base ** precision_fractional\n self.child /= truncation\n return self\n\n @overloaded.method\n def add(self, _self, *args, **kwargs):\n \"\"\"Add two fixed precision tensors together.\n \"\"\"\n response = getattr(_self, \"add\")(*args, **kwargs)\n\n return response\n\n __add__ = add\n\n def __iadd__(self, other):\n \"\"\"Add two fixed precision tensors together.\n \"\"\"\n self.child = self.add(other).child\n\n return self\n\n @overloaded.method\n def t(self, _self, *args, **kwargs):\n \"\"\"Transpose a tensor. Hooked is handled by the decorator\"\"\"\n response = getattr(_self, \"t\")(*args, **kwargs)\n\n return response\n\n def mul(self, *args, **kwargs):\n \"\"\"\n Hook manually mul to add the truncation part which is inherent to multiplication\n in the fixed precision setting\n \"\"\"\n # Replace all syft tensor with their child attribute\n new_self, new_args, new_kwargs = syft.frameworks.torch.hook_args.hook_method_args(\n \"mul\", self, args, kwargs\n )\n\n # Send it to the appropriate class and get the response\n response = getattr(new_self, \"mul\")(*new_args, **new_kwargs)\n\n # Put back SyftTensor on the tensors found in the response\n response = syft.frameworks.torch.hook_args.hook_response(\n \"mul\", response, wrap_type=type(self), wrap_args=self.get_class_attributes()\n )\n\n other = args[0]\n\n assert (\n self.precision_fractional == other.precision_fractional\n ), \"In mul, all args should have the same precision_fractional\"\n\n return response.truncate(other.precision_fractional)\n\n __mul__ = mul\n\n def matmul(self, *args, **kwargs):\n \"\"\"\n Hook manually matmul to add the truncation part which is inherent to multiplication\n in the fixed precision setting\n \"\"\"\n # Replace all syft tensor with their child attribute\n new_self, new_args, new_kwargs = syft.frameworks.torch.hook_args.hook_method_args(\n \"matmul\", self, args, kwargs\n )\n\n # Send it to the appropriate class and get the response\n response = getattr(new_self, \"matmul\")(*new_args, **new_kwargs)\n\n # Put back SyftTensor on the tensors found in the response\n response = syft.frameworks.torch.hook_args.hook_response(\n \"matmul\", response, wrap_type=type(self), wrap_args=self.get_class_attributes()\n )\n\n other = args[0]\n\n assert (\n self.precision_fractional == other.precision_fractional\n ), \"In matmul, all args should have the same precision_fractional\"\n\n return response.truncate(other.precision_fractional)\n\n __matmul__ = matmul\n\n @overloaded.method\n def __gt__(self, _self, other):\n result = _self.__gt__(other)\n return result * self.base ** self.precision_fractional\n\n @overloaded.method\n def __ge__(self, _self, other):\n result = _self.__ge__(other)\n return result * self.base ** self.precision_fractional\n\n @overloaded.method\n def __lt__(self, _self, other):\n result = _self.__lt__(other)\n return result * self.base ** self.precision_fractional\n\n @overloaded.method\n def __le__(self, _self, other):\n result = _self.__le__(other)\n return result * self.base ** self.precision_fractional\n\n @overloaded.method\n def eq(self, _self, other):\n result = _self.eq(other)\n return result * self.base ** self.precision_fractional\n\n @staticmethod\n @overloaded.module\n def torch(module):\n def mul(self, other):\n return self.__mul__(other)\n\n module.mul = mul\n\n def addmm(bias, input_tensor, weight):\n matmul = input_tensor.matmul(weight)\n result = bias.add(matmul)\n return result\n\n module.addmm = addmm\n\n # You can also overload functions in submodules!\n @overloaded.module\n def nn(module):\n \"\"\"\n The syntax is the same, so @overloaded.module handles recursion\n Note that we don't need to add the @staticmethod decorator\n \"\"\"\n\n @overloaded.module\n def functional(module):\n def linear(*args):\n \"\"\"\n Un-hook the function to have its detailed behaviour\n \"\"\"\n return torch.nn.functional.native_linear(*args)\n\n module.linear = linear\n\n module.functional = functional\n\n # Modules should be registered just like functions\n module.nn = nn\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Receive an instruction for a function to be applied on a FixedPrecision Tensor,\n Perform some specific action (like logging) which depends of the\n instruction content, replace in the args all the FPTensors with\n their child attribute, forward the command instruction to the\n handle_function_command of the type of the child attributes, get the\n response and replace a FixedPrecision on top of all tensors found in\n the response.\n :param command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs])\n :return: the response of the function command\n \"\"\"\n cmd, _, args, kwargs = command\n\n tensor = args[0] if not isinstance(args[0], tuple) else args[0][0]\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n cmd = cls.rgetattr(cls, cmd)\n return cmd(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: I can't manage the import issue, can you?\n # Replace all FixedPrecisionTensor with their child attribute\n new_args, new_kwargs, new_type = syft.frameworks.torch.hook_args.hook_function_args(\n cmd, args, kwargs\n )\n\n # build the new command\n new_command = (cmd, None, new_args, new_kwargs)\n\n # Send it to the appropriate class and get the response\n response = new_type.handle_func_command(new_command)\n\n # Put back FixedPrecisionTensor on the tensors found in the response\n response = syft.frameworks.torch.hook_args.hook_response(\n cmd, response, wrap_type=cls, wrap_args=tensor.get_class_attributes()\n )\n\n return response\n\n def get(self):\n \"\"\"Just a pass through. This is most commonly used when calling .get() on a\n FixedPrecisionTensor which has also been shared.\"\"\"\n class_attributes = self.get_class_attributes()\n return FixedPrecisionTensor(\n **class_attributes,\n owner=self.owner,\n tags=self.tags,\n description=self.description,\n id=self.id,\n ).on(self.child.get())\n\n def share(self, *owners, field=None, crypto_provider=None):\n self.child = self.child.share(*owners, field=field, crypto_provider=crypto_provider)\n return self\n" ]
[ [ "numpy.random.random", "numpy.array_equal", "torch.eq", "numpy.ones", "torch.tensor", "numpy.random.rand", "torch.device" ], [ "torch.nn.functional.native_linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
praveenpmin/Python
[ "513fcde7430b03a187e2c7e58302b88645388eed", "513fcde7430b03a187e2c7e58302b88645388eed", "513fcde7430b03a187e2c7e58302b88645388eed" ]
[ "numpy/broadcasting.py", "numpy/iterateoverarr.py", "numpy/arraycreateroutines.py" ]
[ "# The term broadcasting refers to the ability of NumPy to treat arrays of different shapes during \n# arithmetic operations. Arithmetic operations on arrays are usually done on corresponding elements. \n# If two arrays are of exactly the same shape, then these operations are smoothly performed.\n\nimport numpy as np \n\na = np.array([1,2,3,4]) \nb = np.array([10,20,30,40]) \nc = a * b \nprint (c)\n\nimport numpy as np \na = np.array([[0.0,0.0,0.0],[10.0,10.0,10.0],[20.0,20.0,20.0],[30.0,30.0,30.0]]) \nb = np.array([1.0,2.0,3.0]) \n \nprint ('First array:') \nprint (a) \nprint ('\\n') \n \nprint ('Second array:')\nprint (b)\nprint ('\\n') \n \nprint ('First Array + Second Array')\nprint (a + b)", "import numpy as np\na = np.arange(0,60,5)\na = a.reshape(3,4)\n\nprint ('Original array is:')\nprint (a)\nprint ('\\n')\n\nprint ('Modified array is:')\nfor x in np.nditer(a):\n print (x,)\n\n#print ('\\n') \n\n import numpy as np \na = np.arange(0,60,5) \na = a.reshape(3,4) \n \nprint ('Original array is:')\nprint (a) \nprint ('\\n') \n \nprint ('Transpose of the original array is:') \nb = a.T \nprint (b) \nprint ('\\n') \n \nprint ('Modified array is:') \nfor x in np.nditer(b): \n print (x,)\n\n# Iteration Order\nimport numpy as np\na = np.arange(0,60,5)\na = a.reshape(3,4)\nprint ('Original array is:')\nprint (a)\nprint ('\\n')\n\nprint ('Transpose of the original array is:')\nb = a.T\nprint (b)\nprint ('\\n')\n\nprint ('Sorted in C-style order:')\nc = b.copy(order = 'C')\nprint (c)\nfor x in np.nditer(c):\n print (x,)\n\nprint ('\\n')\n\nprint ('Sorted in F-style order:')\nc = b.copy(order = 'F')\nprint (c)\nfor x in np.nditer(c):\n print (x,)\n\nimport numpy as np \na = np.arange(0,60,5) \na = a.reshape(3,4) \n\nprint ('Original array is:') \nprint (a) \nprint ('\\n') \n\nprint ('Sorted in C-style order:') \nfor x in np.nditer(a, order = 'C'): \n print (x,) \nprint ('\\n') \n\nprint ('Sorted in F-style order:') \nfor x in np.nditer(a, order = 'F'): \n print (x,)\n\n# Modifying Array Values\nimport numpy as np\na = np.arange(0,60,5)\na = a.reshape(3,4)\nprint ('Original array is:')\nprint (a)\nprint ('\\n')\n\nfor x in np.nditer(a, op_flags = ['readwrite']):\n x[...] = 2*x\nprint ('Modified array is:')\nprint (a)\n\n# External Loop\nimport numpy as np \na = np.arange(0,60,5) \na = a.reshape(3,4) \n\nprint ('Original array is:') \nprint (a) \nprint ('\\n') \n\nprint ('Modified array is:')\nfor x in np.nditer(a, flags = ['external_loop'], order = 'F'):\n print (x,)\n\n# Broadcasting Iteration\nimport numpy as np \na = np.arange(0,60,5) \na = a.reshape(3,4) \n\nprint ('First array is:') \nprint (a)\nprint ('\\n') \n\nprint ('Second array is:') \nb = np.array([1, 2, 3, 4], dtype = int) \nprint (b)\nprint ('\\n')\n\nprint ('Modified array is:') \nfor x,y in np.nditer([a,b]): \n print (\"%d:%d\" % (x,y),)", "import numpy as np \nx = np.empty([3,2], dtype = int) \nprint (x)\n\n# array of five zeros. Default dtype is float \nimport numpy as np \nx = np.zeros(5) \nprint (x)\n\nimport numpy as np \nx = np.zeros((5,), dtype = np.int) \nprint (x)\n\n# custom type \nimport numpy as np \nx = np.zeros((2,2), dtype = [('x', 'i4'), ('y', 'i4')]) \nprint (x)\n\n# array of five ones. Default dtype is float \nimport numpy as np \nx = np.ones(5) \nprint (x)\n\nimport numpy as np \nx = np.ones([2,2], dtype = int) \nprint (x)" ]
[ [ "numpy.array" ], [ "numpy.arange", "numpy.array", "numpy.nditer" ], [ "numpy.zeros", "numpy.empty", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chomd90/invnet
[ "0d359e57b66f2e738812b5d660563fb4b3ab8f4a" ]
[ "models/checkers.py" ]
[ "\"\"\"\nChecker functions\n\"\"\"\n\nimport numpy as np\nimport torch\n\nPI = 3.1415\nDIM = 64.0\nSCALE = 255.0\nFIXED_CIRCLE = False\n\n\nclass CentroidFunction(torch.nn.Module):\n def __init__(self, bs, ch, sx, sy):\n super(CentroidFunction, self).__init__()\n self.x_lin = torch.nn.Parameter(torch.linspace(0, sx, sx).expand(bs, ch, sx, sy)).requires_grad_(False).cuda()\n self.y_lin = torch.nn.Parameter(torch.linspace(0, sy, sy).expand(bs, ch, sy, sx).transpose(2, 3)\n ).requires_grad_(False).cuda()\n\n def forward(self, img_batch):\n img_batch = img_batch[:, 0:-1, ...] # Dropping the very last channel.\n m00_t = img_batch.sum(dim=(2, 3))\n m01_t = torch.mul(img_batch, self.x_lin)\n m10_t = torch.mul(img_batch, self.y_lin)\n cx_t = torch.sum(m01_t, dim=(2, 3)) / (m00_t + 0.01)\n cy_t = torch.sum(m10_t, dim=(2, 3)) / (m00_t + 0.01)\n return cx_t, cy_t\n\ndef p1_fn(x, torch=True):\n #print(x.size())\n if torch:\n if FIXED_CIRCLE:\n return x.mean(dim=(1,2,3)).unsqueeze(1)\n else:\n #return x.mean(dim=(2,3))\n return x[:, 0:-1, ...].mean(dim=(2,3)) # Dropping the very last channel.\n else:\n return x.mean(axis=(1,2,3))\n\ndef p2_fn(x, torch=True):\n pass\n" ]
[ [ "torch.linspace", "torch.mul", "torch.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lbcb-sci/tarantula
[ "a7805cbb1c2e9b3378abaaf18c29922b2b787593" ]
[ "misc/plotter.py" ]
[ "#!/usr/bin/env python\nimport argparse\nimport json\nimport numpy\nimport pandas\nimport seaborn\nfrom matplotlib import pyplot\n\nseaborn.set()\nseaborn.set_style(\"white\")\nseaborn.despine()\n\nscpb = seaborn.color_palette(\"Blues\")\nscpr = seaborn.color_palette(\"Reds\")\nscpg = seaborn.cubehelix_palette(rot=-.4)\n\nbase_resolution = 100000\n\nclass Plotter:\n def __init__(self, mode, path, resolution):\n self.mode = mode\n self.path = path\n self.resolution = resolution\n # __init__\n\n def ParseJson(self):\n return json.load(open(self.path[0]))\n # ParseJson\n\n def DrawPile(self):\n try:\n data = self.ParseJson()\n except Exception:\n print(\"[tarantula::] error: wrong format\")\n exit()\n\n for id in data:\n pile = data[id]\n\n figure, ax = pyplot.subplots(1, 1, figsize = (25, 5))\n\n ax.plot(range(len(pile[\"data\"])), pile[\"data\"], label = \"data\", color = scpb[2])\n ax.set_title(pile[\"id\"])\n figure.text(0.5, 0.05, \"base\", ha = \"center\")\n figure.text(0.05, 0.5, \"coverage\", va = \"center\", rotation = \"vertical\")\n pyplot.legend(loc = \"best\")\n pyplot.savefig(str(pile[\"id\"]) + \".png\", format = 'png')\n pyplot.close(figure)\n # DrawPile\n\n def ParsePaf(self):\n paf1 = open(self.path[0], 'r')\n paf2 = open(self.path[1], 'r')\n references = {}\n connections = {}\n\n rp1 = {}\n line = \"\"\n while True:\n line = paf1.readline()\n if (len(line) == 0):\n break\n lhs_name, _, _, _, _, rhs_name, rhs_len, rhs_begin, *junk = line.split('\\t')\n rhs_len = int(rhs_len)\n rhs_begin = int(rhs_begin)\n if (rhs_name not in references and rhs_len > 10000):\n references[rhs_name] = rhs_len\n if (lhs_name not in rp1):\n rp1[lhs_name] = (rhs_name, rhs_begin)\n\n rp2 = {}\n while True:\n line = paf2.readline()\n if (len(line) == 0):\n break\n lhs_name, _, _, _, _, rhs_name, rhs_len, rhs_begin, *junk = line.split('\\t')\n rhs_len = int(rhs_len)\n rhs_begin = int(rhs_begin)\n if (rhs_name not in references and rhs_len > 10000):\n references[rhs_name] = rhs_len\n if (lhs_name not in rp2):\n rp2[lhs_name] = (rhs_name, rhs_begin)\n\n references = dict(sorted(references.items(), key = lambda item: item[1], reverse = True))\n matrix_size = 0\n for reference_name, reference_len in references.items():\n references[reference_name] = matrix_size\n matrix_size += reference_len\n\n matrix_size = matrix_size // base_resolution + 1\n matrix = numpy.zeros(shape = (matrix_size, matrix_size))\n\n for lhs_name, rhs in rp1.items():\n if (lhs_name not in rp2 or \\\n rhs[0] not in references or \\\n rp2[lhs_name][0] not in references):\n continue\n x = (rhs[1] + references[rhs[0]]) // base_resolution\n y = (rp2[lhs_name][1] + references[rp2[lhs_name][0]]) // base_resolution\n matrix[x][y] += 1\n matrix[y][x] += 1\n\n numpy.save(\"heatmap\", matrix)\n\n return matrix\n # ParsePaf\n\n def ParseSam(self):\n sam = open(self.path[0], 'r')\n references = {}\n line = \"\"\n while True: # parse header\n line = sam.readline()\n if (len(line) == 0 or line[0] != '@'):\n break\n if (line[1:3] == 'SQ'):\n reference_name = line.split('SN:')[1].split('\\t')[0]\n reference_len = int(line.split('LN:')[1].split('\\t')[0].rstrip())\n if (reference_len > 10000):\n references[reference_name] = reference_len\n if (len(references) == 0):\n raise Error\n\n references = dict(sorted(references.items(), key = lambda item: item[1], reverse = True))\n matrix_size = 0\n for reference_name, reference_len in references.items():\n references[reference_name] = matrix_size\n matrix_size += reference_len\n\n matrix_size = matrix_size // base_resolution + 1\n matrix = numpy.zeros(shape = (matrix_size, matrix_size))\n\n while True: # parse alignments\n _, flag, rhs_name, rhs_begin, quality, _, rhs_next_name, rhs_next_begin, *junk = line.split('\\t')\n if (rhs_next_name == '='):\n rhs_next_name = rhs_name\n\n flag = int(flag)\n rhs_begin = int(rhs_begin)\n rhs_next_begin = int(rhs_next_begin)\n\n # 1 - multiple segments\n # 4 - segment unmapped\n # 8 - next segment unmapped\n # 64 - first segment\n # 256 - secondary alignment\n if ( (flag & 1) and \\\n not (flag & 4) and \\\n not (flag & 8) and \\\n (flag & 64) and \\\n not (flag & 256)):\n if (rhs_name in references and \\\n rhs_next_name in references):\n x = (rhs_begin + references[rhs_name]) // base_resolution\n y = (rhs_next_begin + references[rhs_next_name]) // base_resolution\n matrix[x][y] += 1\n matrix[y][x] += 1\n\n line = sam.readline()\n if (len(line) == 0):\n break\n\n numpy.save(\"heatmap\", matrix)\n\n return matrix\n # ParseSam\n\n def ParseNumpy(self):\n return numpy.load(self.path[0])\n # ParseNumpy\n\n def DrawHeatmap(self):\n if (self.resolution < base_resolution):\n return\n\n try:\n matrix = self.ParseNumpy()\n except Exception:\n try:\n matrix = self.ParseSam()\n except Exception:\n try:\n matrix = self.ParsePaf()\n except:\n print(\"[tarantula::] error: wrong format\")\n exit()\n\n shrink_size = self.resolution // base_resolution\n heatmap_size = matrix.shape[0] // shrink_size\n deleted = [matrix.shape[0] - 1 - i for i in range(matrix.shape[0] % shrink_size)]\n\n heatmap = numpy.delete(matrix, deleted, axis = 0)\n heatmap = numpy.delete(heatmap, deleted, axis = 1)\n heatmap = heatmap.reshape((heatmap_size, shrink_size, heatmap_size, shrink_size)).sum(axis = 1).sum(axis = 2)\n heatmap = numpy.clip(heatmap, 0, 1000)\n\n fig, ax = pyplot.subplots(figsize = (80, 60))\n\n ax = seaborn.heatmap(heatmap,\n xticklabels = False,\n yticklabels = False,\n cmap = seaborn.cm.rocket_r,\n ax = ax)\n\n pyplot.savefig(\"heatmap_\" + str(self.resolution) + \".png\", format = 'png')\n pyplot.close()\n # DrawHeatmap\n\n def Run(self):\n if (self.mode == \"heatmap\"):\n self.DrawHeatmap()\n elif (self.mode == \"pile\"):\n self.DrawPile()\n return\n # Run\n# Plotter\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description = \"Plotter is a tool for drawing heatmaps and pile-o-grams\",\n formatter_class = argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"mode\",\n help = \"draw either the [heatmap] or [pile]-o-grams\")\n parser.add_argument(\"path\",\n help = \"heatmap: SAM | 2x PAF | npy - pile: JSON\",\n nargs = \"*\")\n parser.add_argument(\"--resolution\",\n help = \"heatmap resolution in bp\",\n default = base_resolution)\n\n args = parser.parse_args()\n plotter = Plotter(args.mode, args.path, args.resolution)\n plotter.Run()\n# __main__\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.clip", "matplotlib.pyplot.subplots", "numpy.save", "numpy.delete", "matplotlib.pyplot.close", "numpy.load", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
acgardner/plntter
[ "d8d5b8e1fb2d7c1c4e21ec9e66cbb3c7419a7825" ]
[ "tests/test_attitude.py" ]
[ "from plntter.utils.attitude import AttitudeSolver, AttitudeTransform\nfrom plntter.utils.vector import Vector\n\nimport numpy as np\n\n\ndef test_QMethod() -> None:\n r1,r2,r3 = Vector.random(), Vector.random(), Vector.random()\n b1,b2,b3 = Vector.random(), Vector.random(), Vector.random()\n R,B = np.vstack((r1,r2,r3)), np.vstack((b1,b2,b3))\n var = AttitudeTransform.arcsec_to_rad(10)\n q = AttitudeSolver.QMethod(B,R,var)\n print(q.val)\n" ]
[ [ "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eubr-bigsea/Compss-Python
[ "09ab7c474c8badc9932de3e1148f62ffba16b0b2" ]
[ "tests/benchmark/aggregation/test.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom ddf_library.ddf import DDF\nfrom ddf_library.utils import generate_info\n\nfrom pycompss.api.api import compss_barrier\nfrom pycompss.api.task import task\n\nimport pandas as pd\nimport numpy as np\nimport time\n\n\n@task(returns=2)\ndef generate_partition(size, col_feature, col_label, frag):\n df = pd.DataFrame({col_feature: np.random.randint(0, 100000, size=size),\n col_label: np.random.randint(0, 100000, size=size)})\n\n info = generate_info(df, frag)\n return df, info\n\n\ndef generate_data(total_size, nfrag, col_feature, col_label):\n dfs = [[] for _ in range(nfrag)]\n info = [[] for _ in range(nfrag)]\n\n size = total_size // nfrag\n sizes = [size for _ in range(nfrag)]\n sizes[-1] += (total_size - sum(sizes))\n\n for f, s in enumerate(sizes):\n dfs[f], info[f] = generate_partition(s, col_feature, col_label, f)\n\n return dfs, info\n\n\nif __name__ == \"__main__\":\n\n n_rows = int(sys.argv[1])\n n_frag = int(sys.argv[2])\n col1 = 'col_1'\n col_label = 'group'\n\n t1 = time.time()\n df_list, info = generate_data(n_rows, n_frag, col1, col_label)\n ddf1 = DDF().import_data(df_list, info)\n compss_barrier()\n t2 = time.time()\n print(\"Time to generate and import data - t2-t1:\", t2 - t1)\n\n ddf1 = ddf1.group_by([col1])\\\n .first([col1])\\\n .count([col1], alias=['Counter'])\\\n .cache()\n compss_barrier()\n t3 = time.time()\n print(\"Time to aggragate t3-t2:\", t3 - t2)\n print(\"t_all:\", t3 - t1)\n # ddf1.show()\n" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hamling-ling/ShaRinGan
[ "dbf2a462a07e0473e0a7bb19fe8f3c864d25ce06" ]
[ "src/app/audio_streamer.py" ]
[ "import pyaudio\nimport time\nimport numpy as np\nimport audio_utility as au\n\nclass AudioStreamer():\n def __init__(self, input_device_name, output_device_name):\n self.input_device_name = input_device_name\n self.output_device_name = output_device_name\n self.channels = 1 # mono micriphone\n self.rate = 44100 # CD quality\n self.format = pyaudio.paInt16\n self.seq = 0\n\n def get_inout_devices(self):\n input_device = None\n output_device = None\n retry_counter = 0\n while retry_counter < 10:\n input_device = au.get_pyaudio_device(self.p, self.input_device_name)\n output_device = au.get_pyaudio_device(self.p, self.output_device_name)\n if(input_device is not None and output_device is not None):\n break\n if(input_device is None):\n print(\"retrying to get audio input device\", self.input_device_name)\n if(output_device is None):\n print(\"retrying to gete audio output device\", self.output_device_name)\n\n # Re-create pyaudio and try again\n self.p.terminate()\n self.p = pyaudio.PyAudio()\n time.sleep(1)\n retry_counter = retry_counter + 1\n return input_device, output_device\n\n def open_device(self, callback_context, callback):\n self.p = pyaudio.PyAudio()\n input_device, output_device = self.get_inout_devices()\n if(input_device is None):\n msg = \"input device {0} not found\".format(self.input_device_name)\n self.p.terminate()\n raise ValueError(msg)\n if(output_device is None):\n msg = \"output device {0} not found\".format(self.output_device_name)\n self.p.terminate()\n raise ValueError(msg)\n\n self.user_callback = callback\n self.user_context = callback_context\n self.stream = self.p.open(\n input_device_index=input_device.get('index'),\n output_device_index=output_device.get('index'),\n format=self.format,\n channels=self.channels,\n rate=self.rate,\n frames_per_buffer=1024,\n output=True,\n input=True,\n stream_callback=self.data_arrived,\n start=False )\n print(self.input_device_name, \" opend for input\")\n print(self.output_device_name, \" opend for output\")\n\n def close_device(self):\n self.callback = None\n self.stream.close()\n self.p.terminate()\n self.stream = None\n self.p = None\n\n def start_streaming(self):\n self.stream.start_stream()\n\n def stop_streaming(self):\n self.stream.stop_stream()\n\n def is_streaming(self):\n return self.stream.is_active()\n\n def data_arrived(self, in_data, frame_count, time_info, status):\n # convert binary array to int16, then normalize to float\n in_floats=np.frombuffer(in_data, dtype=\"int16\")/np.float32(32768.0)\n \n # callback and receive output data\n start_time = time.time()\n out_floats = self.user_callback(self.user_context, self.seq, in_floats)\n milli_sec = ((time.time()-start_time)*1000)\n if(22.0 < milli_sec):\n print(\"took \", milli_sec, \"ms might be dropping frame data\")\n self.seq = self.seq + 1\n\n # convert returned data from callback to pyaudio data\n denorm=out_floats*32768\n out_data16 = denorm.astype(np.int16)\n out_data = out_data16.tobytes()\n \n return (out_data, pyaudio.paContinue)\n\n def close(self):\n self.p.terminate()\n" ]
[ [ "numpy.frombuffer", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shanenak/social-data
[ "89d4c6972158df353bc16e3a5403fa53cc255684", "89d4c6972158df353bc16e3a5403fa53cc255684" ]
[ "run.py", "queries.py" ]
[ "import os\nimport pandas as pd\nimport streamlit as st\n\nimport data_explorer\nimport eviction_analysis\nimport equity_explorer\nimport queries\nimport analysis\nimport utils\nfrom constants import STATES\n\n# Pandas options\npd.set_option('max_rows', 25)\npd.set_option('max_columns', 12)\npd.set_option('expand_frame_repr', True)\npd.set_option('large_repr', 'truncate')\npd.options.display.float_format = '{:.2f}'.format\n\nPAGES = [\n 'Data Explorer',\n 'Equity Explorer',\n 'Eviction Analysis'\n]\n\ndef print_summary(df: pd.DataFrame, output: str):\n print('*** Results ***')\n if 'Rank' in df.columns:\n print('* Shown in order by overall priority, higher values mean higher priority.')\n df.sort_values('Rank', ascending=False, inplace=True)\n print(df['Rank'])\n print('Normalized analysis data is located at {o}'.format(o=output[:-5]) + '_overall_vulnerability.xlsx')\n elif len(df) > 1:\n print('* Shown in order by relative risk, higher values mean higher relative risk.')\n df.sort_values('Relative Risk', ascending=False, inplace=True)\n print(df['Relative Risk'])\n print('Normalized analysis data is located at {o}'.format(o=output[:-5]) + '_overall_vulnerability.xlsx')\n else:\n print('Fetched single county data')\n\n print('Raw fetched data is located at {o}'.format(o=output))\n print('Done!')\n\n\ndef run_shell() -> pd.DataFrame:\n task = input(\n 'Analyze a single county (1), multiple counties (2), all the counties in a state (3), or a nation-wide analysis (4)? [default: 1]') \\\n .strip()\n if task == '1' or task == '':\n res = input('Enter the county and state to analyze (ie: Jefferson County, Colorado):')\n res = res.strip().split(',')\n cost_of_evictions = input(\n 'Run an analysis to estimate the cost to avoid evictions? (Y/n) ')\n cost_of_evictions.strip()\n county = res[0].strip().lower()\n state = res[1].strip().lower()\n df = queries.get_county_data(state, [county])\n\n if cost_of_evictions == 'y' or cost_of_evictions == '':\n df = analysis.calculate_cost_estimate(df, rent_type='fmr')\n\n utils.output_table(df, 'Output/' + county.capitalize() + '.xlsx')\n print_summary(df, 'Output/' + county.capitalize() + '.xlsx')\n return df\n elif task == '2':\n state = input(\"Which state are you looking for? (ie: California)\").strip()\n counties = input('Please specify one or more counties, separated by commas.').strip().split(',')\n df = queries.get_county_data(state, counties)\n cost_of_evictions = input(\n 'Run an analysis to estimate the cost to avoid evictions? (Y/n) ')\n if cost_of_evictions == 'y' or cost_of_evictions == '':\n df = analysis.calculate_cost_estimate(df, rent_type='fmr')\n\n utils.output_table(df, 'Output/' + state + '_selected_counties.xlsx')\n analysis_df = analysis.rank_counties(df, state + '_selected_counties')\n print_summary(analysis_df, 'Output/' + state + '_selected_counties.xlsx')\n return df\n elif task == '3':\n state = input(\"Which state are you looking for? (ie: California)\").strip()\n df = queries.get_county_data(state)\n cost_of_evictions = input(\n 'Run an analysis to estimate the cost to avoid evictions? (Y/n) ')\n if cost_of_evictions == 'y' or cost_of_evictions == '':\n df = analysis.calculate_cost_estimate(df, rent_type='fmr')\n\n utils.output_table(df, 'Output/' + state + '.xlsx')\n analysis_df = analysis.rank_counties(df, state)\n print_summary(analysis_df, 'Output/' + state + '.xlsx')\n temp = df.copy()\n temp.reset_index(inplace=True)\n counties = temp['County Name'].to_list()\n geom = queries.get_county_geoms(counties, state.lower())\n df = df.merge(geom, on='County Name', how='outer')\n return df\n elif task == '4':\n frames = []\n for state in STATES:\n df = queries.get_county_data(state)\n frames.append(df)\n natl_df = pd.concat(frames)\n cost_of_evictions = input(\n 'Run an analysis to estimate the cost to avoid evictions (Y/n) ')\n if cost_of_evictions == 'y' or cost_of_evictions == '':\n df = analysis.calculate_cost_estimate(natl_df, rent_type='fmr')\n\n utils.output_table(natl_df, 'Output/US_national.xlsx')\n analysis_df = analysis.rank_counties(natl_df, 'US_national')\n print_summary(analysis_df, 'Output/US_national.xlsx')\n return df\n else:\n raise Exception('INVALID INPUT! Enter a valid task number.')\n\n\ndef run_UI():\n st.set_page_config(\n page_title=\"Arup Social Data\",\n page_icon=\"🏠\",\n initial_sidebar_state=\"expanded\",\n menu_items={\n 'Report a bug': \"https://github.com/arup-group/social-data/issues/new/choose\",\n 'About': \"\"\" \n If you're seeing this, we would love your contribution! If you find bugs, please reach out or create an issue on our \n [GitHub](https://github.com/arup-group/social-data) repository. If you find that this interface doesn't do what you need it to, you can create an feature request \n at our repository or better yet, contribute a pull request of your own. You can reach out to the team on LinkedIn or \n Twitter if you have questions or feedback.\n \n More documentation and contribution details are at our [GitHub Repository](https://github.com/arup-group/social-data).\n \n This app is the result of hard work by our team:\n - [Jared Stock 🐦](https://twitter.com/jaredstock) \n - [Angela Wilson 🐦](https://twitter.com/AngelaWilson925) (alum)\n - Sam Lustado\n - Lingyi Chen\n - Kevin McGee (alum)\n - Jen Combs\n - Zoe Temco\n - Prashuk Jain (alum)\n - Sanket Shah (alum)\n\n\n Special thanks to Julieta Moradei and Kamini Ayer from New Story, Kristin Maun from the city of Tulsa, \n Emily Walport, Irene Gleeson, and Elizabeth Joyce with Arup's Community Engagment team, and everyone else who has given feedback \n and helped support this work. Also thanks to the team at Streamlit for their support of this work.\n\n The analysis and underlying data are provided as-is as an open source project under an [MIT license](https://github.com/arup-group/social-data/blob/master/LICENSE). \n\n Made by [Arup](https://www.arup.com/).\n \"\"\"\n }\n )\n st.sidebar.title('Arup Social Data')\n\n page=st.sidebar.radio('Navigation', PAGES, index=st.session_state.page)\n\n st.experimental_set_query_params(page=page)\n\n if page == 'Eviction Analysis':\n st.sidebar.write(\"\"\"\n ## About\n \n The Eviction Analysis tool is targeted at providing data and and context around evictions at the county level. It provides a _Relative Risk Index_, which represents the varying relative risk of eviction in the selected counties. You can also estimate the cost to avoid evictions per month based on the number of people at risk and the cost of rent in the counties selected. \n \"\"\")\n eviction_analysis.eviction_UI()\n\n elif page == 'Equity Explorer':\n st.sidebar.write(\"\"\"\n ## About\n\n The Equity Explorer is a set of Arup-designed analyses to identify vulnerable geographies at the census tract level. It also includes a transit analysis that shows relative access to transit and other equity indicators for the identified equity geographies. \n \"\"\")\n equity_explorer.census_equity_explorer()\n else:\n st.sidebar.write(\"\"\"\n ## About\n\n The Data Explorer is an interface to allow you to explore the data available in our database and do some initial analysis. In total we have over 2 million rows and over 400 unique features with coverage across the 50 US states and expanding to the District of Columbia and Puerto Rico. You can use this interface to combine multiple datasets and export raw data as an Excel file. \n \n Datasets vary between county and census tract resolution and data may not exist for all counties or tracts. Some features may not work for all states/territories. \n \"\"\")\n st.title(\"Data Explorer\")\n subcol_1, subcol_2 = st.columns(2)\n with subcol_1:\n st.session_state.data_type = st.radio(\"Data resolution:\", ('County Level', 'Census Tracts'), index=0)\n with subcol_2:\n # Todo: implement for census level too\n if st.session_state.data_type =='County Level':\n st.session_state.data_format = st.radio('Data format', ['Raw Values', 'Per Capita', 'Per Square Mile'], 0)\n\n if st.session_state.data_type == 'County Level':\n data_explorer.county_data_explorer()\n else:\n data_explorer.census_data_explorer()\n\n\nif __name__ == '__main__':\n\n if not os.path.exists('Output'):\n os.makedirs('Output')\n if st._is_running_with_streamlit:\n url_params = st.experimental_get_query_params()\n if 'loaded' not in st.session_state:\n print('init state')\n if len(url_params.keys()) == 0:\n st.experimental_set_query_params(page='Data Explorer')\n url_params = st.experimental_get_query_params()\n\n st.session_state.page = PAGES.index(url_params['page'][0])\n st.session_state['data_type'] = 'County Level'\n st.session_state['data_format'] = 'Raw Values'\n st.session_state['loaded'] = False\n\n\n run_UI()\n else:\n run_shell()\n", "import os\r\nimport sys\r\nimport psycopg2\r\nimport pandas as pd\r\nimport geopandas as gpd\r\nfrom sqlalchemy import create_engine\r\nfrom shapely import wkb\r\nimport streamlit as st\r\nfrom sklearn import preprocessing\r\n\r\nimport credentials\r\nfrom constants import STATES\r\n\r\nFRED_TABLES = [\r\n 'burdened_households',\r\n # 'homeownership_rate',\r\n 'income_inequality',\r\n 'population_below_poverty',\r\n # 'resident_population',\r\n 'single_parent_households',\r\n 'snap_benefits_recipients',\r\n 'unemployment_rate',\r\n]\r\n\r\nSTATIC_TABLES = [\r\n 'chmura_economic_vulnerability_index',\r\n 'fair_market_rents'\r\n 'median_rents',\r\n]\r\n\r\nSTATIC_COLUMNS = {\r\n 'chmura_economic_vulnerability_index': ['VulnerabilityIndex', 'Rank'],\r\n 'fair_market_rents': ['fmr_0', 'fmr_1', 'fmr_2', 'fmr_3', 'fmr_4'],\r\n 'median_rents': ['rent50_0', 'rent50_1', 'rent50_2', 'rent50_3', 'rent50_4']\r\n}\r\n\r\nTABLE_HEADERS = {\r\n 'burdened_households': 'Burdened Households',\r\n 'homeownership_rate': 'Home Ownership',\r\n 'income_inequality': 'Income Inequality',\r\n 'population_below_poverty': 'Population Below Poverty Line',\r\n 'single_parent_households': 'Single Parent Households',\r\n 'snap_benefits_recipients': 'SNAP Benefits Recipients',\r\n 'unemployment_rate': 'Unemployment Rate',\r\n 'resident_population': 'Resident Population',\r\n}\r\n\r\nEQUITY_COUNTY_HEADERS = [\r\n 'Age 19 or Under', 'Age 65 or Over',\r\n 'Non-White Population (%)'\r\n]\r\n\r\nCENSUS_HEADERS = [\r\n 'People of Color (%)', '200% Below Poverty Level (%)',\r\n 'People with Disability (%)', 'Age 19 or Under (%)', 'Age 65 or Over (%)',\r\n 'Limited English Proficiency (%)', 'Single Parent Family (%)', 'Zero-Vehicle Household (%)'\r\n]\r\n\r\nEQUITY_CENSUS_POC_LOW_INCOME = [\r\n 'People of Color', \"200% Below Poverty Level\"\r\n]\r\n\r\nEQUITY_CENSUS_REMAINING_HEADERS = [\r\n 'People with Disability', 'Age 19 or Under', 'Age 65 or Over',\r\n 'Limited English Proficiency', 'Single Parent Family', 'Zero-Vehicle Household'\r\n]\r\n\r\nTRANSPORT_CENSUS_HEADERS = [\r\n 'Zero-Vehicle Households (%)',\r\n 'Vehicle Miles Traveled',\r\n 'No Computer Households (%)',\r\n 'Renter Occupied Units (%)',\r\n 'Drive Alone Commuters (%)',\r\n # 'Drive Alone (#)',\r\n 'Average Commute Time (min)',\r\n 'People of Color (%)', \"200% Below Poverty Level (%)\"\r\n]\r\n\r\nPOSITIVE_TRANSPORT_CENSUS_HEADERS = [\r\n 'Walkability Index',\r\n 'Public Transport Commuters (%)', 'Bicycle Commuters (%)'\r\n]\r\n\r\nTABLE_UNITS = {\r\n 'burdened_households': '%',\r\n 'homeownership_rate': '%',\r\n 'income_inequality': 'Ratio',\r\n 'population_below_poverty': '%',\r\n 'single_parent_households': '%',\r\n 'snap_benefits_recipients': 'Persons',\r\n 'unemployment_rate': '%',\r\n 'resident_population': 'Thousands of Persons',\r\n 'Zero-Vehicle Households (%)': '%',\r\n 'Vehicle Miles Traveled': ' miles',\r\n 'No Computer Households (%)': '%',\r\n 'Renter Occupied Units (%)': '%',\r\n 'Drive Alone Commuters (%)': '%',\r\n # 'Drive Alone (#)': '',\r\n 'Average Commute Time (min)': ' min',\r\n 'People of Color (%)': '%',\r\n \"200% Below Poverty Level (%)\": '%',\r\n 'Walkability Index': '',\r\n 'Public Transport Commuters (%)': '%',\r\n 'Bicycle Commuters (%)': '%'\r\n}\r\n\r\n# @st.cache(allow_output_mutation=True, hash_funcs={\"_thread.RLock\": lambda _: None})\r\nCENSUS_TABLES = ['population_below_poverty_double',\r\n 'commuting_characteristics',\r\n 'disability_status',\r\n 'educational_attainment',\r\n 'employment_status',\r\n 'english_proficiency',\r\n 'family_type',\r\n 'hispanic_or_latino_origin_by_race',\r\n 'household_job_availability',\r\n 'household_technology_availability',\r\n 'household_vehicle_availability',\r\n 'housing_units_in_structure',\r\n 'level_of_urbanicity',\r\n 'occupants_per_bedroom',\r\n 'poverty_status',\r\n 'resident_population_census_tract',\r\n 'sex_by_age',\r\n 'sex_of_workers_by_vehicles_available',\r\n 'trip_miles',\r\n 'walkability_index'\r\n ]\r\n\r\nEQUITY_CENSUS_TABLES = ['poverty_status',\r\n # 'resident_population_census_tract',\r\n 'population_below_poverty_double',\r\n 'sex_by_age',\r\n 'english_proficiency', 'household_vehicle_availability',\r\n 'hispanic_or_latino_origin_by_race', 'disability_status',\r\n 'family_type'\r\n ]\r\n\r\nTRANSPORT_CENSUS_TABLES = ['population_below_poverty_double',\r\n 'hispanic_or_latino_origin_by_race',\r\n 'household_vehicle_availability',\r\n 'level_of_urbanicity',\r\n 'trip_miles',\r\n 'walkability_index',\r\n 'housing_units_in_structure',\r\n 'median_household_income',\r\n 'household_technology_availability',\r\n 'commuting_characteristics']\r\n\r\n\r\ndef init_engine():\r\n engine = create_engine(\r\n f'postgresql://{credentials.DB_USER}:{credentials.DB_PASSWORD}@{credentials.DB_HOST}:{credentials.DB_PORT}/{credentials.DB_NAME}')\r\n return engine\r\n\r\n\r\ndef init_connection():\r\n if st.secrets:\r\n conn = psycopg2.connect(**st.secrets[\"postgres\"])\r\n else:\r\n conn = psycopg2.connect(\r\n user=credentials.DB_USER,\r\n password=credentials.DB_PASSWORD,\r\n host=credentials.DB_HOST,\r\n port=credentials.DB_PORT,\r\n dbname=credentials.DB_NAME\r\n )\r\n return conn\r\n\r\n\r\ndef write_table(df: pd.DataFrame, table: str):\r\n engine = init_engine()\r\n df.to_sql(table, engine, if_exists='replace', method='multi')\r\n\r\n\r\ndef all_counties_query(where: str = None) -> pd.DataFrame:\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n query = f\"SELECT DISTINCT county_name, state_name, county_id FROM id_index\"\r\n if where:\r\n query += f\" WHERE {where}\"\r\n query += \";\"\r\n cur.execute(query)\r\n colnames = [desc[0] for desc in cur.description]\r\n results = cur.fetchall()\r\n conn.commit()\r\n df = pd.DataFrame(results, columns=colnames)\r\n return df\r\n\r\n\r\ndef table_names_query() -> list:\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n cur.execute(\"\"\"SELECT table_name FROM information_schema.tables\r\n WHERE table_schema = 'public'\r\n \"\"\")\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n res = [_[0] for _ in results]\r\n return res\r\n\r\n\r\ndef postgis_query() -> pd.DataFrame:\r\n conn = init_connection()\r\n shapes_query = f\"SELECT * FROM NTM_shapes\"\r\n stops_query = f\"SELECT * FROM NTM_stops\"\r\n shapes_df = gpd.GeoDataFrame.from_postgis(shapes_query, conn)\r\n stops_df = gpd.GeoDataFrame.from_postgis(stops_query, conn)\r\n return shapes_df, stops_df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef read_table(table: str, columns: list = None, where: str = None, order_by: str = None,\r\n order: str = 'ASC', fred=False) -> pd.DataFrame:\r\n conn = init_connection()\r\n if not fred:\r\n if columns is not None:\r\n cols = ', '.join(columns)\r\n query = f\"SELECT {cols} FROM {table}\"\r\n else:\r\n query = f\"SELECT * FROM {table}\"\r\n if where is not None:\r\n query += f\" WHERE {where}\"\r\n if order_by is not None:\r\n query += f\"ORDER BY {order_by} {order}\"\r\n else:\r\n if fred:\r\n query = f\"\"\"SELECT {table}.* FROM {table},\r\n (SELECT county_id,max(date) as date\r\n FROM {table}\r\n GROUP BY county_id) max_county\r\n WHERE {table}.{where}\r\n AND {table}.county_id=max_county.county_id\r\n AND {table}.date=max_county.date\"\"\"\r\n query += ';'\r\n df = pd.read_sql(query, con=conn)\r\n return df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef latest_data_census_tracts(state: str, counties: list, tables: list) -> pd.DataFrame:\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n tracts_df = census_tracts_geom_query(counties, state)\r\n counties_str = str(tuple(counties)).replace(',)', ')')\r\n where_clause = f\"WHERE id_index.state_name ='{state}' AND id_index.county_name IN {counties_str}\"\r\n\r\n for table_name in tables:\r\n query = f\"\"\"SELECT {table_name}.*, id_index.county_name, id_index.county_id, id_index.state_name, id_index.tract_id,\r\n resident_population_census_tract.tot_population_census_2010\r\n FROM {table_name} \r\n INNER JOIN id_index ON {table_name}.tract_id = id_index.tract_id\r\n INNER JOIN resident_population_census_tract ON {table_name}.tract_id = resident_population_census_tract.tract_id\r\n {where_clause};\"\"\"\r\n cur.execute(query)\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n colnames = [desc[0] for desc in cur.description]\r\n df = pd.DataFrame(results, columns=colnames)\r\n df = df.loc[:, ~df.columns.duplicated()]\r\n\r\n df.rename({'tract_id': 'Census Tract'}, axis=1, inplace=True)\r\n\r\n tracts_df = tracts_df.merge(df, on=\"Census Tract\", how=\"inner\", suffixes=('', '_y'))\r\n tracts_df.drop(tracts_df.filter(regex='_y$').columns.tolist(), axis=1, inplace=True)\r\n tracts_df = tracts_df.loc[:, ~tracts_df.columns.duplicated()]\r\n return tracts_df\r\n\r\n\r\ndef load_distributions() -> tuple:\r\n metro_areas = generic_select_query('housing_stock_distribution', [\r\n 'location',\r\n '0_br_pct',\r\n '1_br_pct',\r\n '2_br_pct',\r\n '3_br_pct',\r\n '4_br_pct'\r\n ])\r\n locations = list(metro_areas['location'])\r\n metro_areas.set_index('location', inplace=True)\r\n\r\n return metro_areas, locations\r\n\r\n\r\ndef policy_query() -> pd.DataFrame:\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n cur.execute(\r\n 'SELECT county_id as county_id, policy_value as \"Policy Value\", countdown as \"Countdown\" '\r\n 'FROM policy'\r\n )\r\n colnames = [desc[0] for desc in cur.description]\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n return pd.DataFrame(results, columns=colnames)\r\n\r\n\r\ndef latest_data_single_table(table_name: str, require_counties: bool = True) -> pd.DataFrame:\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n cur.execute(\r\n 'SELECT DISTINCT ON (county_id) '\r\n 'county_id, date AS \"{} Date\", value AS \"{} ({})\" '\r\n 'FROM {} '\r\n 'ORDER BY county_id , \"date\" DESC'.format(TABLE_HEADERS[table_name], TABLE_HEADERS[table_name],\r\n TABLE_UNITS[table_name], table_name))\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n colnames = [desc[0] for desc in cur.description]\r\n\r\n df = pd.DataFrame(results, columns=colnames)\r\n if require_counties:\r\n counties_df = all_counties_query()\r\n df = counties_df.merge(df)\r\n return df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef fred_query(counties_str: str) -> pd.DataFrame:\r\n frames = []\r\n for table_name in FRED_TABLES:\r\n # Todo: update in database and remove new suffix\r\n f_df = read_table(f\"{table_name}_new\", where=f\"county_id in {counties_str}\", columns=[table_name, 'county_id'],\r\n fred=True)\r\n f_df.drop(['date', 'state_name', 'county_name'], axis=1, inplace=True)\r\n frames.append(f_df)\r\n fred_df = pd.concat(frames, axis=1)\r\n fred_df = fred_df.loc[:, ~fred_df.columns.duplicated()]\r\n fred_df = fred_df.astype(float)\r\n chmura_df = static_data_single_table('chmura_economic_vulnerability_index', ['VulnerabilityIndex'])\r\n fred_df = fred_df.merge(chmura_df, how='outer', on='county_id', suffixes=('', '_DROP')).filter(\r\n regex='^(?!.*_DROP)')\r\n return fred_df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef get_all_county_data(state: str, counties: list) -> pd.DataFrame:\r\n if counties:\r\n counties_str = \"(\" + \",\".join([\"'\" + str(_) + \"'\" for _ in counties]) + \")\"\r\n demo_df = read_table('county_demographics', where=f\"county_id in {counties_str}\")\r\n fred_df = fred_query(counties_str)\r\n demo_df = demo_df.merge(fred_df, on='county_id', how='inner', suffixes=('', '_DROP')).filter(\r\n regex='^(?!.*_DROP)')\r\n\r\n else:\r\n demo_df = read_table('county_demographics', where=f\"state_name='{state}';\")\r\n counties = all_counties_query(f\"state_name='{state}'\")\r\n county_ids = counties['county_id'].to_list()\r\n counties_str = \"(\" + \",\".join([\"'\" + str(_) + \"'\" for _ in county_ids]) + \")\"\r\n fred_df = fred_query(counties_str=counties_str)\r\n demo_df = demo_df.merge(fred_df, on='county_id', how='inner', suffixes=('', '_DROP')).filter(\r\n regex='^(?!.*_DROP)')\r\n\r\n demo_df['Non-White Population'] = (demo_df['black'] + demo_df['ameri_es'] + demo_df['asian'] + demo_df[\r\n 'hawn_pi'] + demo_df['hispanic'] + demo_df['other'] + demo_df['mult_race'])\r\n demo_df['Age 19 or Under'] = (\r\n demo_df['age_under5'] + demo_df['age_5_9'] + demo_df['age_10_14'] + demo_df['age_15_19'])\r\n demo_df['Age 65 or Over'] = (demo_df['age_65_74'] + demo_df['age_75_84'] + demo_df['age_85_up'])\r\n demo_df['Non-White Population (%)'] = demo_df['Non-White Population'] / demo_df['population'] * 100\r\n demo_df['fips'] = demo_df['fips'].astype(int)\r\n\r\n demo_df.rename({\r\n 'state_name': 'State',\r\n 'county_name': 'County Name',\r\n 'hse_units': 'Housing Units',\r\n 'vacant': 'Vacant Units',\r\n 'renter_occ': 'Renter Occupied Units',\r\n 'med_age': 'Median Age',\r\n 'white': 'White Population',\r\n 'black': 'Black Population',\r\n 'ameri_es': 'Native American Population',\r\n 'asian': 'Asian Population',\r\n 'hawn_pi': 'Pacific Islander Population',\r\n 'hispanic': 'Hispanic Population',\r\n 'other': 'Other Population',\r\n 'mult_race': 'Multiple Race Population',\r\n 'males': 'Male Population',\r\n 'females': 'Female Population',\r\n 'population': 'Total Population',\r\n }, axis=1, inplace=True)\r\n demo_df.drop_duplicates(inplace=True)\r\n demo_df.fillna(0, inplace=True)\r\n return demo_df\r\n\r\n\r\ndef static_data_single_table(table_name: str, columns: list) -> pd.DataFrame:\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n str_columns = ', '.join('\"{}\"'.format(c) for c in columns)\r\n query = 'SELECT county_id, {} FROM {} '.format(str_columns, table_name)\r\n cur.execute(query)\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n colnames = [desc[0] for desc in cur.description]\r\n df = pd.DataFrame(results, columns=colnames)\r\n # counties_df = all_counties_query()\r\n # df = counties_df.merge(df, how='outer')\r\n return df\r\n\r\n\r\ndef generic_select_query(table_name: str, columns: list, where: str = None) -> pd.DataFrame:\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n str_columns = ', '.join('\"{}\"'.format(c) for c in columns)\r\n query = 'SELECT {} FROM {} '.format(str_columns, table_name)\r\n if where is not None:\r\n query += f'WHERE {where}'\r\n cur.execute(query)\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n colnames = [desc[0] for desc in cur.description]\r\n df = pd.DataFrame(results, columns=colnames)\r\n return df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef get_county_geoms(counties_list: list, state: str) -> pd.DataFrame:\r\n conn = init_connection()\r\n counties_list = [_.replace(\"'\", \"''\") for _ in counties_list]\r\n counties = \"(\" + \",\".join([\"'\" + str(_) + \"'\" for _ in counties_list]) + \")\"\r\n cur = conn.cursor()\r\n query = f\"SELECT * FROM county_geoms WHERE state_name='{state}' AND county_name in {counties};\"\r\n cur.execute(query)\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n colnames = [desc[0] for desc in cur.description]\r\n df = pd.DataFrame(results, columns=colnames)\r\n parcels = []\r\n for parcel in df['geom']:\r\n geom = wkb.loads(parcel, hex=True)\r\n parcels.append(geom.simplify(tolerance=0.001, preserve_topology=True))\r\n geom_df = pd.DataFrame()\r\n geom_df['county_id'] = df['county_id']\r\n geom_df['County Name'] = df['county_name']\r\n geom_df['State'] = df['state_name']\r\n geom_df['Area sqmi'] = df['sqmi']\r\n geom_df['geom'] = pd.Series(parcels)\r\n return geom_df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef get_county_geoms_by_id(counties_list: list) -> pd.DataFrame:\r\n conn = init_connection()\r\n counties = \"(\" + \",\".join([\"'\" + str(_) + \"'\" for _ in counties_list]) + \")\"\r\n cur = conn.cursor()\r\n query = f\"SELECT * FROM county_geoms WHERE county_id in {counties};\"\r\n cur.execute(query)\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n colnames = [desc[0] for desc in cur.description]\r\n df = pd.DataFrame(results, columns=colnames)\r\n parcels = []\r\n for parcel in df['geom']:\r\n geom = wkb.loads(parcel, hex=True)\r\n parcels.append(geom.simplify(tolerance=0.001, preserve_topology=True))\r\n geom_df = pd.DataFrame()\r\n geom_df['county_id'] = df['county_id']\r\n geom_df['County Name'] = df['county_name']\r\n geom_df['State'] = df['state_name']\r\n geom_df['Area sqmi'] = df['sqmi']\r\n geom_df['geom'] = pd.Series(parcels)\r\n return geom_df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef census_tracts_geom_query(counties, state) -> pd.DataFrame:\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n if len(counties) > 1:\r\n where_clause = 'WHERE id_index.state_name = ' + \"'\" + state + \"'\" + ' ' + 'AND id_index.county_name IN ' + str(\r\n tuple(counties))\r\n if len(counties) == 1:\r\n where_clause = 'WHERE id_index.state_name = ' + \"'\" + state + \"'\" + ' ' + 'AND id_index.county_name IN (' + \"'\" + \\\r\n counties[0] + \"'\" + ')'\r\n query = f\"\"\"\r\n SELECT id_index.county_name, id_index.state_name, census_tracts_geom.tract_id, census_tracts_geom.geom\r\n FROM id_index\r\n INNER JOIN census_tracts_geom ON census_tracts_geom.tract_id=id_index.tract_id\r\n {where_clause};\r\n \"\"\"\r\n cur.execute(query)\r\n colnames = [desc[0] for desc in cur.description]\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n df = pd.DataFrame(results, columns=colnames)\r\n parcels = []\r\n for parcel in df['geom']:\r\n geom = wkb.loads(parcel, hex=True)\r\n parcels.append(geom.simplify(tolerance=0.00055, preserve_topology=False))\r\n geom_df = pd.DataFrame()\r\n geom_df['Census Tract'] = df['tract_id']\r\n geom_df['geom'] = pd.Series(parcels)\r\n return geom_df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef get_transit_stops_geoms(columns: list = [], where: str = None) -> pd.DataFrame:\r\n conn = init_connection()\r\n if len(columns) > 0:\r\n cols = ', '.join(columns)\r\n query = f\"SELECT {cols} FROM ntm_stops\"\r\n else:\r\n query = f\"\"\"SELECT * FROM ntm_stops\"\"\"\r\n if where is not None:\r\n query += f\" WHERE {where}\"\r\n query += ';'\r\n df = gpd.read_postgis(query, conn)\r\n return df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef get_transit_shapes_geoms(columns: list = [], where: str = None) -> pd.DataFrame:\r\n conn = init_connection()\r\n if len(columns) > 0:\r\n cols = ', '.join(columns)\r\n query = f\"SELECT {cols} FROM ntm_shapes\"\r\n else:\r\n query = f\"\"\"SELECT * FROM ntm_shapes\"\"\"\r\n if where is not None:\r\n query += f\" WHERE {where}\"\r\n query += ';'\r\n df = gpd.read_postgis(query, conn)\r\n return df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef static_data_all_table() -> pd.DataFrame:\r\n counties_df = all_counties_query()\r\n for table_name in STATIC_TABLES:\r\n table_output = static_data_single_table(table_name, STATIC_COLUMNS[table_name])\r\n counties_df = counties_df.merge(table_output)\r\n return counties_df\r\n\r\n\r\ndef output_data(df: pd.DataFrame, table_name: str = 'fred_tables', ext: str = 'xlsx') -> str:\r\n path = f'Output/{table_name}.{ext}'\r\n if ext == 'pk':\r\n df.to_pickle(path)\r\n elif ext == 'xlsx':\r\n df.to_excel(path)\r\n elif ext == 'csv':\r\n df.to_csv(path)\r\n else:\r\n print('Only .pk, .csv, and .xlsx outputs are currently supported.')\r\n sys.exit()\r\n return path\r\n\r\n\r\ndef fmr_data():\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n cur.execute('SELECT state_full as \"State\", countyname as \"County Name\" FROM fair_market_rents;')\r\n colnames = [desc[0] for desc in cur.description]\r\n results = cur.fetchall()\r\n conn.commit()\r\n\r\n return pd.DataFrame(results, columns=colnames)\r\n\r\n\r\ndef filter_state(data: pd.DataFrame, state: str) -> pd.DataFrame:\r\n return data[data['State'].str.lower() == state.lower()]\r\n\r\n\r\ndef filter_counties(data: pd.DataFrame, counties: list) -> pd.DataFrame:\r\n counties = [_.lower() for _ in counties]\r\n return data[data['County Name'].str.lower().isin(counties)]\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef load_all_data() -> pd.DataFrame:\r\n if os.path.exists(\"Output/all_tables.xlsx\"):\r\n try:\r\n res = input('Previous data found. Use data from local `all_tables.xlsx`? [y/N]')\r\n if res.lower() == 'y' or res.lower() == 'yes':\r\n df = pd.read_excel('Output/all_tables.xlsx')\r\n else:\r\n df = get_all_county_data()\r\n except:\r\n print('Something went wrong with the Excel file. Falling back to database query.')\r\n df = get_all_county_data()\r\n else:\r\n df = get_all_county_data()\r\n\r\n return df\r\n\r\n\r\ndef clean_data(df: pd.DataFrame) -> pd.DataFrame:\r\n df.set_index(['State', 'County Name'], drop=True, inplace=True)\r\n\r\n df.rename({'Vulnerability Index': 'COVID Vulnerability Index'}, axis=1, inplace=True)\r\n\r\n df = df.loc[:, ~df.columns.str.contains('^Unnamed')]\r\n df = df.loc[:, ~df.columns.duplicated()]\r\n\r\n return df\r\n\r\n\r\ndef clean_equity_data(data: pd.DataFrame) -> pd.DataFrame:\r\n data['Age 19 or Under'] = (\r\n data['female_under_5'] + data['female_5_to_9'] + data['female_10_to_14'] +\r\n data['female_15_to_17'] + data['female_18_and_19'] +\r\n data['male_under_5'] + data['male_5_to_9'] + data['male_10_to_14'] +\r\n data['male_15_to_17'] + data['male_18_and_19']\r\n )\r\n data['Age 65 or Over'] = (\r\n data['female_65_and_66'] + data['female_67_to_69'] + data['female_70_to_74'] +\r\n data['female_75_to_79'] + data['female_80_to_84'] + data['female_85_and_over'] +\r\n data['male_65_and_66'] + data['male_67_to_69'] + data['male_70_to_74'] +\r\n data['male_75_to_79'] + data['male_80_to_84'] + data['male_85_and_over']\r\n )\r\n\r\n data.rename({'below_pov_level': 'Below Poverty Level', '200_below_pov_level': '200% Below Poverty Level'}, axis=1,\r\n inplace=True)\r\n\r\n data['total_w_a_disability'] = (data['male_under_5_w_a_disability'] + data['male_5_to_17_w_a_disability'] + data[\r\n 'male_18_to_34_w_a_disability'] +\r\n data['male_35_to_64_w_a_disability'] + data['male_65_to_74_w_a_disability'] + data[\r\n 'male_75_and_over_w_a_disability'] +\r\n data['female_under_5_w_a_disability'] + data['female_5_to_17_w_a_disability'] +\r\n data['female_18_to_34_w_a_disability'] +\r\n data['female_35_to_64_w_a_disability'] + data['female_65_to_74_w_a_disability'] +\r\n data['female_75_and_over_w_a_disability']\r\n )\r\n\r\n data['speak_eng_not_well'] = (\r\n data['foreign_speak_spanish_speak_eng_not_well'] + data['foreign_speak_spanish_speak_eng_not_at_all'] +\r\n data['foreign_speak_other_indo-euro_speak_eng_not_well'] + data[\r\n 'foreign_speak_other_indo-euro_speak_eng_not_at_all'] +\r\n data['foreign_speak_asian_or_pac_isl_lang_speak_eng_not_well'] + data[\r\n 'foreign_speak_asian_or_pac_isl_lang_speak_eng_not_at_all'] +\r\n data['foreign_speak_other_speak_eng_not_well'] + data['foreign_speak_other_speak_eng_not_at_all']\r\n )\r\n\r\n data['single_parent'] = data['other_male_householder_no_spouse_w_kids'] + data[\r\n 'other_female_householder_no_spouse_w_kids']\r\n\r\n data['non-white'] = data['total_population'] - data['not_hisp_or_latino_white']\r\n\r\n data['People with Disability (%)'] = data['total_w_a_disability'] / (data['male'] + data['female'])\r\n data['200% Below Poverty Level (%)'] = data['200% Below Poverty Level'] / data[\r\n 'population_for_whom_poverty_status_is_determined']\r\n data['Age 19 or Under (%)'] = data['Age 19 or Under'] / data['total_population']\r\n data['Age 65 or Over (%)'] = data['Age 65 or Over'] / data['total_population']\r\n data['Limited English Proficiency (%)'] = data['speak_eng_not_well'] / (data['native'] + data['foreign_born'])\r\n data['Single Parent Family (%)'] = data['single_parent'] / data['total_families']\r\n data['Zero-Vehicle Household (%)'] = data['percent_hh_0_veh']\r\n data['People of Color (%)'] = data['non-white'] / data['total_population']\r\n\r\n for header in (EQUITY_CENSUS_POC_LOW_INCOME + EQUITY_CENSUS_REMAINING_HEADERS):\r\n data[header + ' (%)'] = round(data[header + ' (%)'] * 100)\r\n\r\n data['criteria_A'] = 0\r\n data['criteria_B'] = 0\r\n\r\n data['Criteria A'] = False\r\n data['Criteria B'] = False\r\n\r\n return data\r\n\r\n\r\ndef clean_transport_data(data: pd.DataFrame, epc: pd.DataFrame) -> pd.DataFrame:\r\n data['walkability_index'] = round(data['walkability_index'])\r\n data['number_drive_alone'] = data['percent_drive_alone'] * data['total_workers_commute']\r\n data.drop(['total_workers_commute'], axis=1, inplace=True)\r\n\r\n data['non-white'] = data['total_population'] - data['not_hisp_or_latino_white']\r\n data['People of Color (%)'] = 100 * (data['non-white'] / data['total_population'])\r\n data['No Computer Households (%)'] = 100 * (data['household_no_computing_device'] / (\r\n data['household_no_computing_device'] + data['household_computer'] + data[\r\n 'household_smartphone_no_computer'] + data['household_no_internet'] + data['household_broadband']))\r\n data['200% Below Poverty Level (%)'] = 100 * (\r\n data['200_below_pov_level'] / data['population_for_whom_poverty_status_is_determined'])\r\n data['Renter Occupied Units (%)'] = 100 * (data['renter-occ_units'] / data['occupied_housing_units'])\r\n\r\n data.rename({\r\n 'percent_hh_0_veh': 'Zero-Vehicle Households (%)',\r\n 'vehicle_miles_traveled': 'Vehicle Miles Traveled',\r\n # 'household_no_computing_device': 'No Computer Households',\r\n # 'household_no_internet': 'No Internet Households',\r\n 'percent_drive_alone': 'Drive Alone Commuters (%)',\r\n # 'number_drive_alone': 'Drive Alone (#)',\r\n 'mean_travel_time': \"Average Commute Time (min)\",\r\n 'walkability_index': \"Walkability Index\",\r\n 'percent_public_transport': 'Public Transport Commuters (%)',\r\n 'percent_bicycle': 'Bicycle Commuters (%)'\r\n },\r\n axis=1, inplace=True)\r\n\r\n averages = {}\r\n epc_averages = {}\r\n\r\n for x in TRANSPORT_CENSUS_HEADERS:\r\n averages[x] = data[x].mean()\r\n epc_averages[x] = data.loc[data['Census Tract'].isin(epc['Census Tract'])][x].mean()\r\n transport_epc = data.loc[data['Census Tract'].isin(epc['Census Tract'])]\r\n\r\n normalized_data = data.copy()\r\n normalized_data[TRANSPORT_CENSUS_HEADERS] = preprocessing.MinMaxScaler().fit_transform(\r\n normalized_data[TRANSPORT_CENSUS_HEADERS])\r\n\r\n return transport_epc, data, normalized_data, averages, epc_averages\r\n\r\n\r\ndef get_equity_geographies(epc: pd.DataFrame, coeff: float) -> pd.DataFrame:\r\n concentration_thresholds = dict()\r\n averages = dict()\r\n\r\n for header in (EQUITY_CENSUS_POC_LOW_INCOME + EQUITY_CENSUS_REMAINING_HEADERS):\r\n averages[header] = epc[header + ' (%)'].mean()\r\n concentration_thresholds[header] = averages[header] + coeff * epc[header + ' (%)'].std()\r\n epc[header + '_check'] = epc[header + ' (%)'].apply(lambda x: x > concentration_thresholds[header])\r\n epc[header + '_check'] = epc[header + '_check'].astype(int)\r\n\r\n epc['criteria_A'] = epc[[x + '_check' for x in EQUITY_CENSUS_POC_LOW_INCOME]].sum(axis=1, numeric_only=True)\r\n epc['Criteria A'] = epc['criteria_A'].apply(lambda x: bool(x == 2))\r\n\r\n epc['criteria_B'] = epc[[x + '_check' for x in EQUITY_CENSUS_REMAINING_HEADERS]].sum(axis=1, numeric_only=True)\r\n temp = epc['200% Below Poverty Level (%)'].apply(lambda x: x > concentration_thresholds['200% Below Poverty Level'])\r\n epc['Criteria B'] = (epc['criteria_B'].apply(lambda x: bool(x >= 3)) + temp.astype(int)) == 2\r\n\r\n df = epc\r\n\r\n epc['Criteria'] = epc[['Criteria A', 'Criteria B']].apply(\r\n lambda x: 'Equity Geography (Meets Both Criteria)' if (x['Criteria A'] & x['Criteria B']) else\r\n ('Equity Geography (Meets Criteria A)' if x['Criteria A'] else\r\n ('Equity Geography (Meets Criteria B)' if x['Criteria B'] else 'Not selected as an Equity Geography')),\r\n axis=1)\r\n # epc['Criteria'] = epc.apply(lambda x: 'Both' if (x['Criteria A'] | x['Criteria B']) else 'Other')\r\n epc = epc.loc[(epc['Criteria A'] | epc['Criteria B'])]\r\n df['Category'] = (df['Criteria A'].apply(lambda x: bool(x)) | df['Criteria B'].apply(lambda x: bool(x)))\r\n df['Category'] = df['Category'].apply(lambda x: 'Equity Geography' if x is True else 'Other')\r\n\r\n epc_averages = {}\r\n for header in (EQUITY_CENSUS_POC_LOW_INCOME + EQUITY_CENSUS_REMAINING_HEADERS):\r\n epc_averages[header] = epc[header + ' (%)'].mean()\r\n\r\n return epc, df, concentration_thresholds, averages, epc_averages\r\n\r\n\r\ndef get_existing_policies(df: pd.DataFrame) -> pd.DataFrame:\r\n policy_df = policy_query()\r\n temp_df = df.merge(policy_df, on='county_id')\r\n if not temp_df.empty and len(df) == len(temp_df):\r\n if st._is_running_with_streamlit:\r\n if st.checkbox('Use existing policy data?'):\r\n return temp_df\r\n else:\r\n res = input('Policy data found in database. Use this data? [Y/n]').strip()\r\n if res.lower() == 'y' or res.lower() == 'yes' or res == '':\r\n return temp_df\r\n\r\n else:\r\n policy_df = pd.read_excel('Policy Workbook.xlsx', sheet_name='Analysis Data')\r\n temp_df = df.merge(policy_df, on='County Name')\r\n if not temp_df.empty and len(df) == len(temp_df):\r\n return temp_df\r\n else:\r\n print(\r\n \"INFO: Policy data not found. Check that you've properly filled in the Analysis Data page in `Policy Workbook.xlsx` with the counties you're analyzing.\")\r\n\r\n return df\r\n\r\n\r\[email protected]_memo(ttl=1200)\r\ndef get_county_data(state: str, county_ids: list = None, policy: bool = False):\r\n df = get_all_county_data(state, county_ids)\r\n\r\n df = clean_data(df)\r\n return df\r\n\r\n\r\[email protected]_memo(ttl=3600)\r\ndef get_national_county_data() -> pd.DataFrame:\r\n frames = []\r\n for s in STATES:\r\n tmp_df = get_county_data(s)\r\n frames.append(tmp_df)\r\n df = pd.concat(frames)\r\n return df\r\n\r\n\r\[email protected]_memo(ttl=3600)\r\ndef get_national_county_geom_data(counties: list) -> pd.DataFrame:\r\n frames = []\r\n for c in counties:\r\n tmp_df = get_county_geoms()\r\n frames.append(tmp_df)\r\n df = pd.concat(frames)\r\n return df\r\n\r\n\r\ndef test_new_counties():\r\n conn = init_connection()\r\n cur = conn.cursor()\r\n query = f\"SELECT * FROM esri_counties;\"\r\n cur.execute(query)\r\n results = cur.fetchall()\r\n conn.commit()\r\n colnames = [desc[0] for desc in cur.description]\r\n esri_df = pd.DataFrame(results, columns=colnames)\r\n\r\n query = f\"SELECT * FROM id_index;\"\r\n cur.execute(query)\r\n results = cur.fetchall()\r\n conn.commit()\r\n colnames = [desc[0] for desc in cur.description]\r\n idx_df = pd.DataFrame(results, columns=colnames)\r\n idx_df.drop(['index', 'tract_id', 'state_id', 'state_name'], axis=1, inplace=True)\r\n\r\n new_df = esri_df.copy()\r\n # new_df = new_df[['state_name', 'name', 'state_fips', 'fips', 'wkb_geometry', 'sqmi']]\r\n new_df.rename({\"state_fips\": \"state_id\"}, axis=1, inplace=True)\r\n new_df['county_id'] = new_df['fips'].astype(int)\r\n new_df.drop(['wkb_geometry', 'shape_area', 'shape_length', 'name'], inplace=True, axis=1)\r\n print(new_df.shape)\r\n print(new_df.head(n=200))\r\n\r\n # new_df.to_csv('Output/new_county_geoms.csv')\r\n # new_df.drop(['county_name'], axis=1, inplace=True)\r\n\r\n merge_df = pd.merge(new_df, idx_df, on='county_id', how='left', validate='one_to_many')\r\n merge_df.drop_duplicates(inplace=True)\r\n # merge_df.drop(['state_name_y','name'], axis=1, inplace=True)\r\n print(merge_df.shape)\r\n # col5, col6 = st.columns(2)\r\n # with col5:\r\n print(merge_df.head(n=150))\r\n merge_df.to_csv('Output/demographics.csv')\r\n # with col6:\r\n # st.write(merge_df.tail(n=150))\r\n\r\n\r\nif __name__ == '__main__':\r\n # latest_data_census_tracts('California', ['Contra Costa County'],\r\n # ['household_technology_availability', 'disability_status'])\r\n # args = {k: v for k, v in [i.split('=') for i in sys.argv[1:] if '=' in i]}\r\n # table = args.get('--table', None)\r\n # output_format = args.get('--output', None)\r\n #\r\n # if table:\r\n # df = latest_data_single_table(table)\r\n # else:\r\n # df = latest_data_all_tables()\r\n #\r\n # if output_format:\r\n # if table:\r\n # path = output_data(df, table_name=table, ext=output_format)\r\n # else:\r\n # path = output_data(df, ext=output_format)\r\n # else:\r\n # if table:\r\n # path = output_data(df, table_name=table)\r\n # else:\r\n # path = output_data(df)\r\n #\r\n # print('Successful query returned. Output at {}.'.format(path))\r\n test_new_counties()\r\n # df=pd.read_csv('Output/clean_counties.csv')\r\n # print('writing....')\r\n # write_table(df,'county_geoms')\r\n # df=pd.read_csv('Output/demographics.csv')\r\n # print('writing....')\r\n # write_table(df,'county_demographics')\r\n" ]
[ [ "pandas.set_option", "pandas.concat" ], [ "pandas.concat", "pandas.merge", "pandas.read_excel", "pandas.Series", "pandas.DataFrame", "sklearn.preprocessing.MinMaxScaler", "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
raalesir/sim
[ "9bd994b1dedd05ca88ab9f25cbca3bc28cadc04b" ]
[ "sim/overlaps.py" ]
[ "\"\"\"\n calculates number of configurations for a ring grid phantom polymer and\n overlap distribution for the chain.\n\"\"\"\n\nimport math\nfrom math import comb\nfrom collections import Counter\nimport json\nimport os\nimport matplotlib.pyplot as plt\n\n\nclass Overlap:\n \"\"\"\n calculating number of overlaps for a ring grid polymer of given N\n \"\"\"\n\n def __init__(self, N):\n \"\"\"\n N -- number of monomers\n \"\"\"\n self.n = N\n self.n_conformations = self.n_conform()\n self.overlaps_hist = None\n self.indexes = self.calculate_steps()\n self.dict = self.make_steps()\n self.encoded_conformations = None\n\n # self.keep_result = []\n\n\n\n\n def __str__(self):\n return \"overlaps for %i beads has %i conformations\" % (self.n, self.n_conformations)\n\n def n_conform(self):\n \"\"\"\n \"\"\"\n r = 0\n for i in range(self.n // 2 + 1):\n for j in range(self.n // 2 - i + 1):\n r = r + math.factorial(self.n) / math.factorial(i)**2 / math.factorial(j)**2 / math.factorial(\n self.n // 2 - i - j)**2\n return r\n\n def fun(self, d, res):\n if sum(d.values()) == 0:\n Overlap.keep_result(res)\n return\n else:\n for k in [item for item in d.keys() if d[item] > 0]:\n r = res\n r += k\n\n tmp = d.copy()\n tmp[k] -= 1\n\n self.fun(tmp, r)\n\n\n def calculate_steps(self):\n \"\"\"\n given number of monomers, n, produce the indexes (i,j,k)\n as the number of steps to make in positive and negative direction\n \"\"\"\n res = []\n for i in range(self.n // 2 + 1):\n for j in range(self.n // 2 - i + 1):\n res.append((i, j, self.n // 2 - i - j))\n return res\n\n\n def make_step(self, tup):\n \"\"\"\n encodes single index\n :return:\n :rtype:\n \"\"\"\n res = []\n d = {}\n d['i+'] = tup[0]\n d['i-'] = tup[0]\n d['j+'] = tup[1]\n d['j-'] = tup[1]\n d['k+'] = tup[2]\n d['k-'] = tup[2]\n\n res.append(d)\n\n return res\n\n\n def make_steps(self):\n \"\"\"\n encodes indexes in dict\n \"\"\"\n res = []\n for tup in self.indexes:\n d = {}\n d['i+'] = tup[0]\n d['i-'] = tup[0]\n d['j+'] = tup[1]\n d['j-'] = tup[1]\n d['k+'] = tup[2]\n d['k-'] = tup[2]\n\n res.append(d)\n return res\n\n # @static\n def keep_result(data):\n \"\"\"\n \"\"\"\n Overlap.keep_result.all.append(data)\n\n\n def calculate_all_conformations(self):\n Overlap.keep_result.all = []\n\n for entry in self.dict:\n self.fun(entry, '')\n\n\n def encode_single_conformation(self, conformation):\n \"\"\"\n\n :param conformation:\n :type conformation:\n :return:\n :rtype:\n \"\"\"\n\n conf_encoded = []\n start = [0, 0, 0]\n for symbol in [conformation[i:i + 2] for i in range(0, len(conformation), 2)]:\n if symbol == 'k+':\n start[2] += 1\n elif symbol == 'k-':\n start[2] -= 1\n elif symbol == 'i+':\n start[0] += 1\n elif symbol == 'i-':\n start[0] -= 1\n elif symbol == 'j+':\n start[1] += 1\n elif symbol == 'j-':\n start[1] -= 1\n conf_encoded.append(tuple(start.copy()))\n\n return conf_encoded\n\n\n def encode_to_coords(self):\n \"\"\"\n encodes results to coordinates\n \"\"\"\n res = []\n for conformation in Overlap.keep_result.all:\n conf_encoded = []\n start = [0, 0, 0]\n for symbol in [conformation[i:i + 2] for i in range(0, len(conformation), 2)]:\n if symbol == 'k+':\n start[2] += 1\n elif symbol == 'k-':\n start[2] -= 1\n elif symbol == 'i+':\n start[0] += 1\n elif symbol == 'i-':\n start[0] -= 1\n elif symbol == 'j+':\n start[1] += 1\n elif symbol == 'j-':\n start[1] -= 1\n conf_encoded.append(tuple(start.copy()))\n\n res.append(conf_encoded)\n\n self.encoded_conformations = res\n\n\n\n def get_overlaps(self):\n \"\"\"\n \"\"\"\n overlaps = []\n for conf in self.encoded_conformations:\n overlaps.append(sum([comb(lst, 2) for lst in Counter(conf).values()]))\n\n counts = Counter(overlaps)\n\n self.overlaps_hist = dict(counts)\n\n\n def get_overlaps_histogram(self):\n\n fname = \"counts_%i.json\" % (self.n)\n if not os.path.isfile(fname):\n\n self.calculate_all_conformations()\n self.encode_to_coords()\n self.get_overlaps()\n\n else:\n dct = open(fname, 'r').read()\n dct = json.loads(dct)\n self.overlaps_hist = dict(zip([int(el) for el in dct.keys()], dct.values()))\n\n return self.overlaps_hist\n\n def save_overlaps_histogram(self):\n fname = \"counts_%i.json\" % (self.n)\n if not os.path.isfile(fname):\n json.dump(self.overlaps_hist, open(fname, 'w'))\n\n\n def plot_overlaps_histogram(self):\n\n self.overlaps_hist = dict(zip(self.overlaps_hist.keys(), [v/sum(self.overlaps_hist.values()) for v in self.overlaps_hist.values()]))\n plt.bar(self.overlaps_hist.keys(), self.overlaps_hist.values())\n plt.yscale('log')\n plt.xlabel('number of overlaps')\n plt.ylabel('number of conformations')\n\n\n\nif __name__==\"__main__\":\n\n overlaps = Overlap(8)\n print(overlaps)\n\n print(overlaps.get_overlaps_histogram())\n\n overlaps.save_overlaps_histogram()\n\n overlaps.plot_overlaps_histogram()\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yscale", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dpressel/ComerNet
[ "db7c93e936f33c814c6dc6bd7b765ab660f59f85" ]
[ "make_emb.py" ]
[ "import torch\nfrom convert_mw import bert,tokenizer,bert_type\nfrom pytorch_pretrained_bert import BertModel\ntorch.cuda.set_device(0)\ntorch.cuda.manual_seed(1234)\ntorch.manual_seed(1234)\nbmodel = BertModel.from_pretrained(bert_type)\nbmodel.eval()\nbmodel.to('cuda')\n\ntgtD=torch.load('data/save_data.tgt.dict')\nemb=[]\nitl={i:v for (v,i) in tgtD.items()}\nfor i in range(len(tgtD)):\n label = itl[i]\n x1=tokenizer.convert_tokens_to_ids(label.split())\n if i > len(tgtD)-5:\n print(label)\n print(x1)\n encoded_layers, _ =bmodel(torch.LongTensor(x1).cuda().unsqueeze(0),token_type_ids=None, attention_mask=None) \n x=torch.stack(encoded_layers,-1).mean(-1).mean(-2)\n emb.append(x.detach().cpu())\nx=torch.cat(emb,0)\ntorch.save(x,'emb_tgt_mw.pt')\nprint(x.shape)\nprint(x.numpy())\n" ]
[ [ "torch.LongTensor", "torch.cuda.manual_seed", "torch.cat", "torch.cuda.set_device", "torch.manual_seed", "torch.load", "torch.stack", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
christinazavou/O-CNN
[ "88cda0aea9bf07e14686fff1fe476e8080296dcf", "88cda0aea9bf07e14686fff1fe476e8080296dcf" ]
[ "tensorflow/mycode/src/tf_layer_utils.py", "tensorflow/script/network_ae.py" ]
[ "import tensorflow as tf\n\n\ndef make_weights(shape, name='weights'):\n return tf.Variable(tf.truncated_normal(shape=shape, stddev=0.05), name=name)\n\n\ndef make_biases(shape, name='biases'):\n return tf.Variable(tf.constant(0.05, shape=shape), name=name)\n\n\ndef convolution_layer(prev_layer, f_size, inp_c, out_c, stride_s):\n _weights = make_weights([f_size, f_size, inp_c, out_c])\n _bias = make_biases([out_c])\n return tf.add(tf.nn.conv2d(prev_layer, _weights, [1, stride_s, stride_s, 1], padding='SAME'), _bias)\n\n\ndef pool_layer(prev_layer, size, stride_s):\n kernel = [1, size, size, 1]\n stride = [1, stride_s, stride_s, 1]\n return tf.nn.max_pool(prev_layer, kernel, stride, padding='SAME')\n\n\ndef activation_layer(prev_layer, type):\n if type == 'relu':\n return tf.nn.relu(prev_layer)\n else:\n raise NotImplemented('unsupported activation type')\n\n\ndef flat_layer(inp):\n input_size = inp.get_shape().as_list()\n if len(input_size) != 4:\n raise NotImplemented('flat layer unsupported for input with dim != 4')\n output_size = input_size[-1] * input_size[-2] * input_size[-3]\n return tf.reshape(inp, [-1, output_size]), output_size\n\n\ndef fc_layer(prev_layer, h_in, h_out):\n _weights = make_weights([h_in, h_out])\n _bias = make_biases([h_out])\n return tf.add(tf.matmul(prev_layer, _weights), _bias)\n\n\ndef dropout_layer(prev_layer, prob):\n return tf.nn.dropout(prev_layer, prob)\n", "import tensorflow as tf\nfrom ocnn import *\n\n\nclass AutoEncoderOcnn:\n def __init__(self, flags):\n self.flags = flags\n\n def octree_encoder(self, octree, training, reuse=False):\n\n debug_checks = {}\n\n flags = self.flags\n depth, nout = flags.depth, flags.nout\n channel = [4, nout, 128, 64, 32, 16, 8]\n with tf.variable_scope('ocnn_encoder', reuse=reuse):\n with tf.variable_scope('signal_gt'):\n data = octree_property(octree, property_name=\"feature\", dtype=tf.float32,\n depth=depth, channel=flags.channel)\n debug_checks['ocnn_encoder/signal_gt/fd'] = data\n data = tf.reshape(data, [1, flags.channel, -1, 1])\n debug_checks['ocnn_encoder/signal_gt/fdreshaped'] = data\n \n for d in range(depth, 1, -1):\n with tf.variable_scope('depth_%d' % d):\n data = octree_conv_bn_relu(data, octree, d, channel[d], training)\n debug_checks['ocnn_encoder/depth_%d/convolved_data'%d] = data\n data, _ = octree_max_pool(data, octree, d)\n debug_checks['ocnn_encoder/depth_%d/convolved_data_pooled'%d] = data\n \n with tf.variable_scope('depth_1'):\n data = downsample(data, channel[1], training)\n debug_checks['ocnn_encoder/depth_1/downsampled_data'] = data\n\n with tf.variable_scope('code'):\n code = conv2d_bn(data, channel[1], kernel_size=1, stride=1, training=training)\n code = tf.nn.tanh(code)\n debug_checks['ocnn_encoder/code'] = code\n return code, debug_checks\n\n def octree_decoder(self, code, octree, training, reuse=False):\n\n debug_checks = {}\n\n flags = self.flags\n depth = flags.depth\n channel = [512, 256, 128, 64, 32, 16, 8]\n with tf.variable_scope('ocnn_decoder', reuse=reuse): \n label_gt = [None]*10\n with tf.variable_scope('label_gt'):\n for d in range(2, depth + 1):\n label = octree_property(octree, property_name=\"split\", dtype=tf.float32, \n depth=d, channel=1)\n debug_checks['ocnn_decoder/label_gt/split'] = label\n label_gt[d] = tf.reshape(tf.cast(label, dtype=tf.int32), [-1])\n debug_checks['ocnn_decoder/label_gt/split_reshaped'] = label_gt\n\n with tf.variable_scope('signal_gt'):\n signal_gt = octree_property(octree, property_name=\"feature\", dtype=tf.float32, \n depth=depth, channel=flags.channel)\n debug_checks['ocnn_decoder/signal_gt/feature'] = signal_gt\n signal_gt = tf.reshape(signal_gt, [1, flags.channel, -1, 1])\n debug_checks['ocnn_decoder/signal_gt/feature_reshaped'] = signal_gt\n\n data = code\n with tf.variable_scope('depth_1'):\n data = upsample(data, channel[1], training)\n debug_checks['ocnn_decoder/depth_1'] = data\n\n loss = []; accu = []; \n for d in range(2, depth + 1):\n with tf.variable_scope('depth_%d' % d):\n data = octree_upsample(data, octree, d-1, channel[d], training)\n debug_checks['ocnn_decoder/depth_%d/upsampled'%d] = data\n data = octree_conv_bn_relu(data, octree, d, channel[d], training)\n debug_checks['ocnn_decoder/depth_%d/upsampled_convolved'%d] = data\n \n with tf.variable_scope('predict_%d' % d):\n logit, label = predict_label(data, 2, 32, training)\n debug_checks['ocnn_decoder/predict_%d/logit'%d] = logit\n debug_checks['ocnn_decoder/predict_%d/label'%d] = label\n\n with tf.variable_scope('loss_%d' % d):\n logit = tf.transpose(tf.squeeze(logit, [0,3])) # (1, C, H, 1) -> (H, C)\n debug_checks['ocnn_decoder/loss_%d/logit'%d] = logit\n loss.append(softmax_loss(logit, label_gt[d], num_class=2))\n accu.append(label_accuracy(label, label_gt[d]))\n\n if d == depth:\n with tf.variable_scope('regress_%d' % d):\n signal = predict_signal(data, flags.channel, 32, training)\n debug_checks['ocnn_decoder/regress_%d/signal'%d] = signal\n loss.append(regress_loss(signal, signal_gt))\n\n return loss, accu, debug_checks\n\n def octree_decode_shape(self, code, training, reuse=False):\n\n debug_checks = {}\n\n flags = self.flags\n depth = flags.depth\n channel = [512, 256, 128, 64, 32, 16, 8]\n with tf.variable_scope('ocnn_decoder', reuse=reuse):\n with tf.variable_scope('octree_0'):\n displace = False if flags.channel < 4 else True\n octree = octree_new(batch_size=1, channel=flags.channel, has_displace=displace)\n debug_checks[\"ocnn_decoder/octree_0\"] = octree\n with tf.variable_scope('octree_1'):\n octree = octree_grow(octree, target_depth=1, full_octree=True)\n debug_checks[\"ocnn_decoder/octree_1\"] = octree\n with tf.variable_scope('octree_2'):\n octree = octree_grow(octree, target_depth=2, full_octree=True)\n debug_checks[\"ocnn_decoder/octree_2\"] = octree\n\n data = code\n with tf.variable_scope('depth_1'):\n data = upsample(data, channel[1], training)\n debug_checks[\"ocnn_decoder/depth_1/upsampled_data\"] = data\n\n for d in range(2, depth + 1):\n with tf.variable_scope('depth_%d' % d):\n data = octree_upsample(data, octree, d-1, channel[d], training)\n debug_checks[\"ocnn_decoder/depth_%d/upsampled_data\"%d] = data\n data = octree_conv_bn_relu(data, octree, d, channel[d], training)\n debug_checks[\"ocnn_decoder/depth_%d/convolved_data\"%d] = data\n \n with tf.variable_scope('predict_%d' % d):\n _, label = predict_label(data, 2, 32, training)\n debug_checks[\"ocnn_decoder/predict_%d/label\"%d] = label\n\n with tf.variable_scope('octree_%d' % d, reuse=True):\n octree = octree_update(octree, label, depth=d, mask=1)\n debug_checks[\"ocnn_decoder/octree_%d/updated_octree\"%d] = octree\n # octree = octree_update(octree, label_gt[d], depth=d, mask=1)\n if d < depth:\n with tf.variable_scope('octree_%d' % (d+1)):\n octree = octree_grow(octree, target_depth=d+1, full_octree=False)\n debug_checks[\"ocnn_decoder/octree_%d/grown_octree\"%d] = octree\n else:\n with tf.variable_scope('regress_%d' % d):\n signal = predict_signal(data, flags.channel, 32, training)\n signal = normalize_signal(signal)\n debug_checks[\"ocnn_decoder/regress_%d/normalized_signal\"%d] = signal\n signal = octree_mask(signal, label, mask=0)\n debug_checks[\"ocnn_decoder/regress_%d/normalized_signal_masked\"] = signal\n with tf.variable_scope('octree_%d' % d, reuse=True):\n octree = octree_set_property(octree, signal, property_name=\"feature\", depth=depth)\n debug_checks[\"ocnn_decoder/octree_%d/feature_set_octree\"%d] = octree\n return octree, debug_checks\n\nclass AutoEncoderResnet:\n def __init__(self, flags):\n self.flags = flags\n\n def octree_encoder(self, octree, training, reuse=False):\n\n debug_checks = {}\n\n flags = self.flags\n depth, nout = flags.depth, flags.nout\n channels = [4, nout, 256, 256, 128, 64, 32, 16]\n with tf.variable_scope('ocnn_encoder', reuse=reuse):\n with tf.variable_scope('signal_gt'):\n data = octree_property(octree, property_name=\"feature\", dtype=tf.float32,\n depth=depth, channel=flags.channel)\n debug_checks['ocnn_encoder/signal_gt/fd'] = data\n data = tf.reshape(data, [1, flags.channel, -1, 1])\n debug_checks['ocnn_encoder/signal_gt/fdreshaped'] = data\n\n with tf.variable_scope(\"front\"):\n data = octree_conv_bn_relu(data, octree, depth, channels[depth], training)\n debug_checks['ocnn_encoder/front/convolved_data'] = data\n\n for d in range(depth, 2, -1):\n for i in range(0, flags.resblock_num):\n with tf.variable_scope('resblock_%d_%d' % (d, i)):\n data = octree_resblock(data, octree, d, channels[d], 1, training)\n debug_checks['ocnn_encoder/resblock_d%d/octree'%d] = data\n with tf.variable_scope('down_%d' % d):\n data = octree_conv_bn_relu(data, octree, d, channels[d-1], training,\n stride=2, kernel_size=[2])\n debug_checks['ocnn_encoder/down_%d/convolved_data'%d] = data\n\n with tf.variable_scope('code'):\n # code = conv2d_bn(data, channels[1], kernel_size=1, stride=1, training=training)\n debug_checks['ocnn_encoder/code/beforeconv'] = data\n code = octree_conv1x1_bn(data, flags.nout, training=training)\n debug_checks['ocnn_encoder/code/afterconv'] = data\n code = tf.nn.tanh(code)\n return code, debug_checks\n\n def octree_decoder(self, code, octree, training, reuse=False):\n\n debug_checks = {}\n\n flags = self.flags\n depth = flags.depth\n channels = [4, 64, 256, 256, 128, 64, 32, 16]\n with tf.variable_scope('ocnn_decoder', reuse=reuse): \n data = code\n loss, accu = [], []\n for d in range(2, depth + 1):\n for i in range(0, flags.resblock_num):\n with tf.variable_scope('resblock_%d_%d' % (d, i)):\n data = octree_resblock(data, octree, d, channels[d], 1, training)\n debug_checks[\"ocnn_decoderv/resblock_%d/octree\"%d] = data\n\n with tf.variable_scope('predict_%d' % d):\n logit, label = predict_label(data, 2, 32, training)\n debug_checks[\"ocnn_decoder/predict_%d/logit\"%d] = logit\n debug_checks[\"ocnn_decoder/predict_%d/label\"%d] = label\n logit = tf.transpose(tf.squeeze(logit, [0,3])) # (1, C, H, 1) -> (H, C)\n debug_checks[\"ocnn_decoder/predict_%d/logit\"%d] = logit\n\n with tf.variable_scope('loss_%d' % d):\n with tf.variable_scope('label_gt'):\n label_gt = octree_property(octree, property_name=\"split\", dtype=tf.float32, depth=d, channel=1)\n debug_checks[\"ocnn_decoder/loss_%d/label_gt/split\"%d] = label_gt\n label_gt = tf.reshape(tf.cast(label_gt, dtype=tf.int32), [-1])\n debug_checks[\"ocnn_decoder/loss_%d/label_gt/split_reshaped\"] = label_gt\n loss.append(softmax_loss(logit, label_gt, num_class=2))\n accu.append(label_accuracy(label, label_gt))\n\n if d == depth:\n with tf.variable_scope('regress_%d' % d):\n signal = predict_signal(data, flags.channel, 32, training)\n debug_checks[\"ocnn_decoder/regress_%d/predict_signal\"%d] = signal\n\n with tf.variable_scope('loss_regress'):\n with tf.variable_scope('signal_gt'):\n signal_gt = octree_property(octree, property_name=\"feature\",\n dtype=tf.float32, depth=depth, channel=flags.channel)\n debug_checks[\"ocnn_decoder/loss_regress/signal_gt/featureprop\"] = signal_gt\n signal_gt = tf.reshape(signal_gt, [1, flags.channel, -1, 1])\n debug_checks[\"ocnn_decoder/loss_regress/signal_gt/featureprop_reshaped\"] = signal_gt\n loss.append(regress_loss(signal, signal_gt))\n\n if d < depth:\n with tf.variable_scope('up_%d' % d):\n data = octree_deconv_bn_relu(data, octree, d, channels[d-1], training,\n stride=2, kernel_size=[2])\n debug_checks[\"ocnn_decoder/up_%d/convolved_data\"%d] = data\n return loss, accu, debug_checks\n\n def octree_decode_shape(self, code, training, reuse=False):\n\n debug_checks = {}\n\n flags = self.flags\n depth = flags.depth\n channels = [4, 64, 256, 256, 128, 64, 32, 16]\n with tf.variable_scope('ocnn_decoder', reuse=reuse):\n with tf.variable_scope('octree_0'):\n displace = False if flags.channel < 4 else True\n octree = octree_new(batch_size=1, channel=flags.channel, has_displace=displace)\n debug_checks[\"ocnn_decoder/octree_0\"] = octree\n with tf.variable_scope('octree_1'):\n octree = octree_grow(octree, target_depth=1, full_octree=True)\n debug_checks[\"ocnn_decoder/octree_1\"] = octree\n with tf.variable_scope('octree_2'):\n octree = octree_grow(octree, target_depth=2, full_octree=True)\n debug_checks[\"ocnn_decoder/octree_2\"] = octree\n\n data = code\n for d in range(2, depth + 1):\n for i in range(0, flags.resblock_num):\n with tf.variable_scope('ocnn_decoder/resblock_%d_%d' % (d, i)):\n data = octree_resblock(data, octree, d, channels[d], 1, training)\n debug_checks[\"ocnn_decoder/resblock_%d_%d/data\" %(d, i)] = data\n \n with tf.variable_scope('predict_%d' % d):\n _, label = predict_label(data, 2, 32, training)\n debug_checks[\"ocnn_decoder/predict_%d/label\" %d] = label\n\n with tf.variable_scope('octree_%d' % d, reuse=True):\n octree = octree_update(octree, label, depth=d, mask=1)\n debug_checks[\"ocnn_decoder/octree_%d/updated_octree\" %d] = octree\n if d < depth:\n with tf.variable_scope('octree_%d' % (d+1)):\n octree = octree_grow(octree, target_depth=d+1, full_octree=False)\n debug_checks[\"ocnn_decoder/octree_%d/grown_octree\"%d] = octree\n else:\n with tf.variable_scope('regress_%d' % d):\n signal = predict_signal(data, flags.channel, 32, training)\n signal = normalize_signal(signal)\n debug_checks[\"ocnn_decoder/regress_%d/normalized_signal\"] = signal\n signal = octree_mask(signal, label, mask=0)\n debug_checks[\"ocnn_decoder/regress_%d/normalized_signal_masked\"] = signal\n with tf.variable_scope('octree_%d' % d, reuse=True):\n octree = octree_set_property(octree, signal, property_name=\"feature\", depth=depth)\n debug_checks[\"ocnn_decoder/octree_%d/feature_set_octree\"%d] = octree\n \n if d < depth:\n with tf.variable_scope('up_%d' % d):\n data = octree_deconv_bn_relu(data, octree, d, channels[d-1], training,\n stride=2, kernel_size=[2])\n debug_checks[\"ocnn_decoder/up_%d\"%d] = data\n return octree, debug_checks\n\n\ndef make_autoencoder(flags):\n if flags.name == 'ocnn':\n return AutoEncoderOcnn(flags)\n elif flags.name == 'resnet':\n return AutoEncoderResnet(flags)\n else:\n pass\n" ]
[ [ "tensorflow.nn.relu", "tensorflow.matmul", "tensorflow.constant", "tensorflow.truncated_normal", "tensorflow.nn.max_pool", "tensorflow.reshape", "tensorflow.nn.dropout", "tensorflow.nn.conv2d" ], [ "tensorflow.reshape", "tensorflow.cast", "tensorflow.squeeze", "tensorflow.nn.tanh", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
blahster/tf-models
[ "eaa4a000ef8e5f094764c42a590bb1c49b7b6f7c", "eaa4a000ef8e5f094764c42a590bb1c49b7b6f7c", "eaa4a000ef8e5f094764c42a590bb1c49b7b6f7c", "eaa4a000ef8e5f094764c42a590bb1c49b7b6f7c", "eaa4a000ef8e5f094764c42a590bb1c49b7b6f7c", "eaa4a000ef8e5f094764c42a590bb1c49b7b6f7c" ]
[ "syntaxnet/dragnn/python/sentence_io_test.py", "tutorials/image/cifar10/cifar10.py", "slim/nets/vgg.py", "slim/download_and_convert_data.py", "syntaxnet/dragnn/python/spec_builder.py", "tutorials/image/cifar10/cifar10_multi_gpu_train.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.platform import googletest\n\nfrom dragnn.python import sentence_io\nfrom syntaxnet import sentence_pb2\n\nimport syntaxnet.load_parser_ops\n\nFLAGS = tf.app.flags.FLAGS\nif not hasattr(FLAGS, 'test_srcdir'):\n FLAGS.test_srcdir = ''\nif not hasattr(FLAGS, 'test_tmpdir'):\n FLAGS.test_tmpdir = tf.test.get_temp_dir()\n\n\nclass ConllSentenceReaderTest(test_util.TensorFlowTestCase):\n\n def setUp(self):\n # This dataset contains 54 sentences.\n self.filepath = os.path.join(\n FLAGS.test_srcdir,\n 'syntaxnet/testdata/mini-training-set')\n self.batch_size = 20\n\n def assertParseable(self, reader, expected_num, expected_last):\n sentences, last = reader.read()\n self.assertEqual(expected_num, len(sentences))\n self.assertEqual(expected_last, last)\n for s in sentences:\n pb = sentence_pb2.Sentence()\n pb.ParseFromString(s)\n self.assertGreater(len(pb.token), 0)\n\n def testReadFirstSentence(self):\n reader = sentence_io.ConllSentenceReader(self.filepath, 1)\n sentences, last = reader.read()\n self.assertEqual(1, len(sentences))\n pb = sentence_pb2.Sentence()\n pb.ParseFromString(sentences[0])\n self.assertFalse(last)\n self.assertEqual(\n u'I knew I could do it properly if given the right kind of support .',\n pb.text)\n\n def testReadFromTextFile(self):\n reader = sentence_io.ConllSentenceReader(self.filepath, self.batch_size)\n self.assertParseable(reader, self.batch_size, False)\n self.assertParseable(reader, self.batch_size, False)\n self.assertParseable(reader, 14, True)\n self.assertParseable(reader, 0, True)\n self.assertParseable(reader, 0, True)\n\n def testReadAndProjectivize(self):\n reader = sentence_io.ConllSentenceReader(\n self.filepath, self.batch_size, projectivize=True)\n self.assertParseable(reader, self.batch_size, False)\n self.assertParseable(reader, self.batch_size, False)\n self.assertParseable(reader, 14, True)\n self.assertParseable(reader, 0, True)\n self.assertParseable(reader, 0, True)\n\n\nif __name__ == '__main__':\n googletest.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Builds the CIFAR-10 network.\n\nSummary of available functions:\n\n # Compute input images and labels for training. If you would like to run\n # evaluations, use inputs() instead.\n inputs, labels = distorted_inputs()\n\n # Compute inference on the model inputs to make a prediction.\n predictions = inference(inputs)\n\n # Compute the total loss of the prediction with respect to the labels.\n loss = loss(predictions, labels)\n\n # Create a graph to run one step of training with respect to the loss.\n train_op = train(loss, global_step)\n\"\"\"\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport tarfile\n\nfrom six.moves import urllib\nimport tensorflow as tf\n\nimport cifar10_input\n\nFLAGS = tf.app.flags.FLAGS\n\n# Basic model parameters.\ntf.app.flags.DEFINE_integer('batch_size', 128,\n \"\"\"Number of images to process in a batch.\"\"\")\ntf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data',\n \"\"\"Path to the CIFAR-10 data directory.\"\"\")\ntf.app.flags.DEFINE_boolean('use_fp16', False,\n \"\"\"Train the model using fp16.\"\"\")\n\n# Global constants describing the CIFAR-10 data set.\nIMAGE_SIZE = cifar10_input.IMAGE_SIZE\nNUM_CLASSES = cifar10_input.NUM_CLASSES\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\n\n\n# Constants describing the training process.\nMOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.\nNUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.\nLEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.\nINITIAL_LEARNING_RATE = 0.1 # Initial learning rate.\n\n# If a model is trained with multiple GPUs, prefix all Op names with tower_name\n# to differentiate the operations. Note that this prefix is removed from the\n# names of the summaries when visualizing a model.\nTOWER_NAME = 'tower'\n\nDATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'\n\n\ndef _activation_summary(x):\n \"\"\"Helper to create summaries for activations.\n\n Creates a summary that provides a histogram of activations.\n Creates a summary that measures the sparsity of activations.\n\n Args:\n x: Tensor\n Returns:\n nothing\n \"\"\"\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.summary.histogram(tensor_name + '/activations', x)\n tf.summary.scalar(tensor_name + '/sparsity',\n tf.nn.zero_fraction(x))\n\n\ndef _variable_on_cpu(name, shape, initializer):\n \"\"\"Helper to create a Variable stored on CPU memory.\n\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n\n Returns:\n Variable Tensor\n \"\"\"\n with tf.device('/cpu:0'):\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var\n\n\ndef _variable_with_weight_decay(name, shape, stddev, wd):\n \"\"\"Helper to create an initialized Variable with weight decay.\n\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n\n Returns:\n Variable Tensor\n \"\"\"\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\n var = _variable_on_cpu(\n name,\n shape,\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n\ndef distorted_inputs():\n \"\"\"Construct distorted input for CIFAR training using the Reader ops.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n \"\"\"\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\n images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,\n batch_size=FLAGS.batch_size)\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n return images, labels\n\n\ndef inputs(eval_data):\n \"\"\"Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n\n Raises:\n ValueError: If no data_dir\n \"\"\"\n if not FLAGS.data_dir:\n raise ValueError('Please supply a data_dir')\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\n images, labels = cifar10_input.inputs(eval_data=eval_data,\n data_dir=data_dir,\n batch_size=FLAGS.batch_size)\n if FLAGS.use_fp16:\n images = tf.cast(images, tf.float16)\n labels = tf.cast(labels, tf.float16)\n return images, labels\n\n\ndef inference(images):\n \"\"\"Build the CIFAR-10 model.\n\n Args:\n images: Images returned from distorted_inputs() or inputs().\n\n Returns:\n Logits.\n \"\"\"\n # We instantiate all variables using tf.get_variable() instead of\n # tf.Variable() in order to share variables across multiple GPU training runs.\n # If we only ran this model on a single GPU, we could simplify this function\n # by replacing all instances of tf.get_variable() with tf.Variable().\n #\n # conv1\n with tf.variable_scope('conv1') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 3, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv1)\n\n # pool1\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\n padding='SAME', name='pool1')\n # norm1\n norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm1')\n\n # conv2\n with tf.variable_scope('conv2') as scope:\n kernel = _variable_with_weight_decay('weights',\n shape=[5, 5, 64, 64],\n stddev=5e-2,\n wd=0.0)\n conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))\n pre_activation = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\n _activation_summary(conv2)\n\n # norm2\n norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,\n name='norm2')\n # pool2\n pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n\n # local3\n with tf.variable_scope('local3') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(pool2, [FLAGS.batch_size, -1])\n dim = reshape.get_shape()[1].value\n weights = _variable_with_weight_decay('weights', shape=[dim, 384],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))\n local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n _activation_summary(local3)\n\n # local4\n with tf.variable_scope('local4') as scope:\n weights = _variable_with_weight_decay('weights', shape=[384, 192],\n stddev=0.04, wd=0.004)\n biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))\n local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name)\n _activation_summary(local4)\n\n # linear layer(WX + b),\n # We don't apply softmax here because\n # tf.nn.sparse_softmax_cross_entropy_with_logits accepts the unscaled logits\n # and performs the softmax internally for efficiency.\n with tf.variable_scope('softmax_linear') as scope:\n weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],\n stddev=1/192.0, wd=0.0)\n biases = _variable_on_cpu('biases', [NUM_CLASSES],\n tf.constant_initializer(0.0))\n softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name)\n _activation_summary(softmax_linear)\n\n return softmax_linear\n\n\ndef loss(logits, labels):\n \"\"\"Add L2Loss to all the trainable variables.\n\n Add summary for \"Loss\" and \"Loss/avg\".\n Args:\n logits: Logits from inference().\n labels: Labels from distorted_inputs or inputs(). 1-D tensor\n of shape [batch_size]\n\n Returns:\n Loss tensor of type float.\n \"\"\"\n # Calculate the average cross entropy loss across the batch.\n labels = tf.cast(labels, tf.int64)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits, name='cross_entropy_per_example')\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n tf.add_to_collection('losses', cross_entropy_mean)\n\n # The total loss is defined as the cross entropy loss plus all of the weight\n # decay terms (L2 loss).\n return tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n\ndef _add_loss_summaries(total_loss):\n \"\"\"Add summaries for losses in CIFAR-10 model.\n\n Generates moving average for all losses and associated summaries for\n visualizing the performance of the network.\n\n Args:\n total_loss: Total loss from loss().\n Returns:\n loss_averages_op: op for generating moving averages of losses.\n \"\"\"\n # Compute the moving average of all individual losses and the total loss.\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\n losses = tf.get_collection('losses')\n loss_averages_op = loss_averages.apply(losses + [total_loss])\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Name each loss as '(raw)' and name the moving average version of the loss\n # as the original loss name.\n tf.summary.scalar(l.op.name + ' (raw)', l)\n tf.summary.scalar(l.op.name, loss_averages.average(l))\n\n return loss_averages_op\n\n\ndef train(total_loss, global_step):\n \"\"\"Train CIFAR-10 model.\n\n Create an optimizer and apply to all trainable variables. Add moving\n average for all trainable variables.\n\n Args:\n total_loss: Total loss from loss().\n global_step: Integer Variable counting the number of training steps\n processed.\n Returns:\n train_op: op for training.\n \"\"\"\n # Variables that affect learning rate.\n num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size\n decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n LEARNING_RATE_DECAY_FACTOR,\n staircase=True)\n tf.summary.scalar('learning_rate', lr)\n\n # Generate moving averages of all losses and associated summaries.\n loss_averages_op = _add_loss_summaries(total_loss)\n\n # Compute gradients.\n with tf.control_dependencies([loss_averages_op]):\n opt = tf.train.GradientDescentOptimizer(lr)\n grads = opt.compute_gradients(total_loss)\n\n # Apply gradients.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n tf.summary.histogram(var.op.name + '/gradients', grad)\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n MOVING_AVERAGE_DECAY, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n with tf.control_dependencies([apply_gradient_op, variables_averages_op]):\n train_op = tf.no_op(name='train')\n\n return train_op\n\n\ndef maybe_download_and_extract():\n \"\"\"Download and extract the tarball from Alex's website.\"\"\"\n dest_directory = FLAGS.data_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')\n if not os.path.exists(extracted_dir_path):\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains model definitions for versions of the Oxford VGG network.\n\nThese model definitions were introduced in the following technical report:\n\n Very Deep Convolutional Networks For Large-Scale Image Recognition\n Karen Simonyan and Andrew Zisserman\n arXiv technical report, 2015\n PDF: http://arxiv.org/pdf/1409.1556.pdf\n ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf\n CC-BY-4.0\n\nMore information can be obtained from the VGG website:\nwww.robots.ox.ac.uk/~vgg/research/very_deep/\n\nUsage:\n with slim.arg_scope(vgg.vgg_arg_scope()):\n outputs, end_points = vgg.vgg_a(inputs)\n\n with slim.arg_scope(vgg.vgg_arg_scope()):\n outputs, end_points = vgg.vgg_16(inputs)\n\n@@vgg_a\n@@vgg_16\n@@vgg_19\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n\ndef vgg_arg_scope(weight_decay=0.0005):\n \"\"\"Defines the VGG arg scope.\n\n Args:\n weight_decay: The l2 regularization coefficient.\n\n Returns:\n An arg_scope.\n \"\"\"\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\n activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(weight_decay),\n biases_initializer=tf.zeros_initializer()):\n with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:\n return arg_sc\n\n\ndef vgg_a(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_a',\n fc_conv_padding='VALID'):\n \"\"\"Oxford Net VGG 11-Layers version A Example.\n\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n fc_conv_padding: the type of padding to use for the fully connected layer\n that is implemented as a convolutional layer. Use 'SAME' padding if you\n are applying the network in a fully convolutional manner and want to\n get a prediction map downsampled by a factor of 32 as an output. Otherwise,\n the output prediction map will be (input / 32) - 6 in case of 'VALID' padding.\n\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.max_pool2d],\n outputs_collections=end_points_collection):\n net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')\n net = slim.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')\n net = slim.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')\n net = slim.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')\n net = slim.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')\n net = slim.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout6')\n net = slim.conv2d(net, 4096, [1, 1], scope='fc7')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout7')\n net = slim.conv2d(net, num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\nvgg_a.default_image_size = 224\n\n\ndef vgg_16(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_16',\n fc_conv_padding='VALID'):\n \"\"\"Oxford Net VGG 16-Layers version D Example.\n\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n fc_conv_padding: the type of padding to use for the fully connected layer\n that is implemented as a convolutional layer. Use 'SAME' padding if you\n are applying the network in a fully convolutional manner and want to\n get a prediction map downsampled by a factor of 32 as an output. Otherwise,\n the output prediction map will be (input / 32) - 6 in case of 'VALID' padding.\n\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],\n outputs_collections=end_points_collection):\n net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')\n net = slim.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')\n net = slim.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')\n net = slim.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')\n net = slim.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')\n net = slim.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout6')\n net = slim.conv2d(net, 4096, [1, 1], scope='fc7')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout7')\n net = slim.conv2d(net, num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\nvgg_16.default_image_size = 224\n\n\ndef vgg_19(inputs,\n num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='vgg_19',\n fc_conv_padding='VALID'):\n \"\"\"Oxford Net VGG 19-Layers version E Example.\n\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: number of predicted classes.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n outputs. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n fc_conv_padding: the type of padding to use for the fully connected layer\n that is implemented as a convolutional layer. Use 'SAME' padding if you\n are applying the network in a fully convolutional manner and want to\n get a prediction map downsampled by a factor of 32 as an output. Otherwise,\n the output prediction map will be (input / 32) - 6 in case of 'VALID' padding.\n\n Returns:\n the last op containing the log predictions and end_points dict.\n \"\"\"\n with tf.variable_scope(scope, 'vgg_19', [inputs]) as sc:\n end_points_collection = sc.name + '_end_points'\n # Collect outputs for conv2d, fully_connected and max_pool2d.\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],\n outputs_collections=end_points_collection):\n net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')\n net = slim.max_pool2d(net, [2, 2], scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')\n net = slim.max_pool2d(net, [2, 2], scope='pool2')\n net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')\n net = slim.max_pool2d(net, [2, 2], scope='pool3')\n net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')\n net = slim.max_pool2d(net, [2, 2], scope='pool4')\n net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')\n net = slim.max_pool2d(net, [2, 2], scope='pool5')\n # Use conv2d instead of fully_connected layers.\n net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout6')\n net = slim.conv2d(net, 4096, [1, 1], scope='fc7')\n net = slim.dropout(net, dropout_keep_prob, is_training=is_training,\n scope='dropout7')\n net = slim.conv2d(net, num_classes, [1, 1],\n activation_fn=None,\n normalizer_fn=None,\n scope='fc8')\n # Convert end_points_collection into a end_point dict.\n end_points = slim.utils.convert_collection_to_dict(end_points_collection)\n if spatial_squeeze:\n net = tf.squeeze(net, [1, 2], name='fc8/squeezed')\n end_points[sc.name + '/fc8'] = net\n return net, end_points\nvgg_19.default_image_size = 224\n\n# Alias\nvgg_d = vgg_16\nvgg_e = vgg_19\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Downloads and converts a particular dataset.\n\nUsage:\n```shell\n\n$ python download_and_convert_data.py \\\n --dataset_name=mnist \\\n --dataset_dir=/tmp/mnist\n\n$ python download_and_convert_data.py \\\n --dataset_name=cifar10 \\\n --dataset_dir=/tmp/cifar10\n\n$ python download_and_convert_data.py \\\n --dataset_name=flowers \\\n --dataset_dir=/tmp/flowers\n\n$ python download_and_convert_data.py \\\n --dataset_name=wheatrust \\\n --dataset_dir=/tmp/wheatrust\n```\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom datasets import download_and_convert_cifar10\nfrom datasets import download_and_convert_flowers\nfrom datasets import download_and_convert_mnist\nfrom datasets import convert_wheatrust\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string(\n 'dataset_name',\n None,\n 'The name of the dataset to convert, one of \"cifar10\", \"flowers\", \"mnist\", \"wheatrust\".')\n\ntf.app.flags.DEFINE_string(\n 'dataset_dir',\n None,\n 'The directory where the output TFRecords and temporary files are saved.')\n\n\ndef main(_):\n if not FLAGS.dataset_name:\n raise ValueError('You must supply the dataset name with --dataset_name')\n if not FLAGS.dataset_dir:\n raise ValueError('You must supply the dataset directory with --dataset_dir')\n\n if FLAGS.dataset_name == 'cifar10':\n download_and_convert_cifar10.run(FLAGS.dataset_dir)\n elif FLAGS.dataset_name == 'flowers':\n download_and_convert_flowers.run(FLAGS.dataset_dir)\n elif FLAGS.dataset_name == 'mnist':\n download_and_convert_mnist.run(FLAGS.dataset_dir)\n elif FLAGS.dataset_name == 'wheatrust':\n convert_wheatrust.run(FLAGS.dataset_dir)\n else:\n raise ValueError(\n 'dataset_name [%s] was not recognized.' % FLAGS.dataset_dir)\n\nif __name__ == '__main__':\n tf.app.run()\n\n", "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utils for building DRAGNN specs.\"\"\"\n\n\nimport tensorflow as tf\n\nfrom dragnn.protos import spec_pb2\nfrom dragnn.python import lexicon\nfrom syntaxnet.ops import gen_parser_ops\nfrom syntaxnet.util import check\n\n\nclass ComponentSpecBuilder(object):\n \"\"\"Wrapper to help construct SyntaxNetComponent specifications.\n\n This class will help make sure that ComponentSpec's are consistent with the\n expectations of the SyntaxNet Component backend. It contains defaults used to\n create LinkFeatureChannel specifications according to the network_unit and\n transition_system of the source compnent. It also encapsulates common recipes\n for hooking up FML and translators.\n\n Attributes:\n spec: The dragnn.ComponentSpec proto.\n \"\"\"\n\n def __init__(self,\n name,\n builder='DynamicComponentBuilder',\n backend='SyntaxNetComponent'):\n \"\"\"Initializes the ComponentSpec with some defaults for SyntaxNet.\n\n Args:\n name: The name of this Component in the pipeline.\n builder: The component builder type.\n backend: The component backend type.\n \"\"\"\n self.spec = spec_pb2.ComponentSpec(\n name=name,\n backend=self.make_module(backend),\n component_builder=self.make_module(builder))\n\n def make_module(self, name, **kwargs):\n \"\"\"Forwards kwargs to easily created a RegisteredModuleSpec.\n\n Note: all kwargs should be string-valued.\n\n Args:\n name: The registered name of the module.\n **kwargs: Proto fields to be specified in the module.\n\n Returns:\n Newly created RegisteredModuleSpec.\n \"\"\"\n return spec_pb2.RegisteredModuleSpec(\n registered_name=name, parameters=kwargs)\n\n def default_source_layer(self):\n \"\"\"Returns the default source_layer setting for this ComponentSpec.\n\n Usually links are intended for a specific layer in the network unit.\n For common network units, this returns the hidden layer intended\n to be read by recurrent and cross-component connections.\n\n Returns:\n String name of default network layer.\n\n Raises:\n ValueError: if no default is known for the given setup.\n \"\"\"\n for network, default_layer in [('FeedForwardNetwork', 'layer_0'),\n ('LayerNormBasicLSTMNetwork', 'state_h_0'),\n ('LSTMNetwork', 'layer_0'),\n ('IdentityNetwork', 'input_embeddings')]:\n if self.spec.network_unit.registered_name.endswith(network):\n return default_layer\n\n raise ValueError('No default source for network unit: %s' %\n self.spec.network_unit)\n\n def default_token_translator(self):\n \"\"\"Returns the default source_translator setting for token representations.\n\n Most links are token-based: given a target token index, retrieve a learned\n representation for that token from this component. This depends on the\n transition system; e.g. we should make sure that left-to-right sequence\n models reverse the incoming token index when looking up representations from\n a right-to-left model.\n\n Returns:\n String name of default translator for this transition system.\n\n Raises:\n ValueError: if no default is known for the given setup.\n \"\"\"\n transition_spec = self.spec.transition_system\n if transition_spec.registered_name == 'arc-standard':\n return 'shift-reduce-step'\n\n if transition_spec.registered_name in ('shift-only', 'tagger'):\n if 'left_to_right' in transition_spec.parameters:\n if transition_spec.parameters['left_to_right'] == 'false':\n return 'reverse-token'\n return 'identity'\n\n raise ValueError('Invalid transition spec: %s' % str(transition_spec))\n\n def add_token_link(self, source=None, source_layer=None, **kwargs):\n \"\"\"Adds a link to source's token representations using default settings.\n\n Constructs a LinkedFeatureChannel proto and adds it to the spec, using\n defaults to assign the name, component, translator, and layer of the\n channel. The user must provide fml and embedding_dim.\n\n Args:\n source: SyntaxComponentBuilder object to pull representations from.\n source_layer: Optional override for a source layer instead of the default.\n **kwargs: Forwarded arguments to the LinkedFeatureChannel proto.\n \"\"\"\n if source_layer is None:\n source_layer = source.default_source_layer()\n\n self.spec.linked_feature.add(\n name=source.spec.name,\n source_component=source.spec.name,\n source_layer=source_layer,\n source_translator=source.default_token_translator(),\n **kwargs)\n\n def add_rnn_link(self, source_layer=None, **kwargs):\n \"\"\"Adds a recurrent link to this component using default settings.\n\n This adds the connection to the previous time step only to the network. It\n constructs a LinkedFeatureChannel proto and adds it to the spec, using\n defaults to assign the name, component, translator, and layer of the\n channel. The user must provide the embedding_dim only.\n\n Args:\n source_layer: Optional override for a source layer instead of the default.\n **kwargs: Forwarded arguments to the LinkedFeatureChannel proto.\n \"\"\"\n if source_layer is None:\n source_layer = self.default_source_layer()\n\n self.spec.linked_feature.add(\n name='rnn',\n source_layer=source_layer,\n source_component=self.spec.name,\n source_translator='history',\n fml='constant',\n **kwargs)\n\n def set_transition_system(self, *args, **kwargs):\n \"\"\"Shorthand to set transition_system using kwargs.\"\"\"\n self.spec.transition_system.CopyFrom(self.make_module(*args, **kwargs))\n\n def set_network_unit(self, *args, **kwargs):\n \"\"\"Shorthand to set network_unit using kwargs.\"\"\"\n self.spec.network_unit.CopyFrom(self.make_module(*args, **kwargs))\n\n def add_fixed_feature(self, **kwargs):\n \"\"\"Shorthand to add a fixed_feature using kwargs.\"\"\"\n self.spec.fixed_feature.add(**kwargs)\n\n def add_link(self, source, source_layer=None, source_translator='identity',\n name=None, **kwargs):\n \"\"\"Add a link using default naming and layers only.\"\"\"\n if source_layer is None:\n source_layer = source.default_source_layer()\n if name is None:\n name = source.spec.name\n self.spec.linked_feature.add(\n source_component=source.spec.name, source_layer=source_layer,\n name=name, source_translator=source_translator,\n **kwargs)\n\n def fill_from_resources(self, resource_path, tf_master=''):\n \"\"\"Fills in feature sizes and vocabularies using SyntaxNet lexicon.\n\n Must be called before the spec is ready to be used to build TensorFlow\n graphs. Requires a SyntaxNet lexicon built at the resource_path. Using the\n lexicon, this will call the SyntaxNet custom ops to return the number of\n features and vocabulary sizes based on the FML specifications and the\n lexicons. It will also compute the number of actions of the transition\n system.\n\n This will often CHECK-fail if the spec doesn't correspond to a valid\n transition system or feature setup.\n\n Args:\n resource_path: Path to the lexicon.\n tf_master: TensorFlow master executor (string, defaults to '' to use the\n local instance).\n \"\"\"\n check.IsTrue(\n self.spec.transition_system.registered_name,\n 'Set a transition system before calling fill_from_resources().')\n\n context = lexicon.create_lexicon_context(resource_path)\n for key, value in self.spec.transition_system.parameters.iteritems():\n context.parameter.add(name=key, value=value)\n\n context.parameter.add(\n name='brain_parser_embedding_dims',\n value=';'.join(\n [str(x.embedding_dim) for x in self.spec.fixed_feature]))\n context.parameter.add(\n name='brain_parser_features',\n value=';'.join([x.fml for x in self.spec.fixed_feature]))\n context.parameter.add(\n name='brain_parser_predicate_maps',\n value=';'.join(['' for x in self.spec.fixed_feature]))\n context.parameter.add(\n name='brain_parser_embedding_names',\n value=';'.join([x.name for x in self.spec.fixed_feature]))\n context.parameter.add(\n name='brain_parser_transition_system',\n value=self.spec.transition_system.registered_name)\n\n # Propagate information from SyntaxNet C++ backends into the DRAGNN\n # self.spec.\n with tf.Session(tf_master) as sess:\n feature_sizes, domain_sizes, _, num_actions = sess.run(\n gen_parser_ops.feature_size(task_context_str=str(context)))\n self.spec.num_actions = int(num_actions)\n for i in xrange(len(feature_sizes)):\n self.spec.fixed_feature[i].size = int(feature_sizes[i])\n self.spec.fixed_feature[i].vocabulary_size = int(domain_sizes[i])\n\n for i in xrange(len(self.spec.linked_feature)):\n self.spec.linked_feature[i].size = len(\n self.spec.linked_feature[i].fml.split(' '))\n\n for resource in context.input:\n self.spec.resource.add(name=resource.name).part.add(\n file_pattern=resource.part[0].file_pattern)\n\n\ndef complete_master_spec(master_spec, lexicon_corpus, output_path,\n tf_master=''):\n \"\"\"Finishes a MasterSpec that defines the network config.\n\n Given a MasterSpec that defines the DRAGNN architecture, completes the spec so\n that it can be used to build a DRAGNN graph and run training/inference.\n\n Args:\n master_spec: MasterSpec.\n lexicon_corpus: the corpus to be used with the LexiconBuilder.\n output_path: directory to save resources to.\n tf_master: TensorFlow master executor (string, defaults to '' to use the\n local instance).\n\n Returns:\n None, since the spec is changed in-place.\n \"\"\"\n if lexicon_corpus:\n lexicon.build_lexicon(output_path, lexicon_corpus)\n\n # Use Syntaxnet builder to fill out specs.\n for i, spec in enumerate(master_spec.component):\n builder = ComponentSpecBuilder(spec.name)\n builder.spec = spec\n builder.fill_from_resources(output_path, tf_master=tf_master)\n master_spec.component[i].CopyFrom(builder.spec)\n\n\ndef default_targets_from_spec(spec):\n \"\"\"Constructs a default set of TrainTarget protos from a DRAGNN spec.\n\n For each component in the DRAGNN spec, it adds a training target for that\n component's oracle. It also stops unrolling the graph with that component. It\n skips any 'shift-only' transition systems which have no oracle. E.g.: if there\n are three components, a 'shift-only', a 'tagger', and a 'arc-standard', it\n will construct two training targets, one for the tagger and one for the\n arc-standard parser.\n\n Arguments:\n spec: DRAGNN spec.\n\n Returns:\n List of TrainTarget protos.\n \"\"\"\n component_targets = [\n spec_pb2.TrainTarget(\n name=component.name,\n max_index=idx + 1,\n unroll_using_oracle=[False] * idx + [True])\n for idx, component in enumerate(spec.component)\n if not component.transition_system.registered_name.endswith('shift-only')\n ]\n return component_targets\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A binary to train CIFAR-10 using multiple GPU's with synchronous updates.\n\nAccuracy:\ncifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256\nepochs of data) as judged by cifar10_eval.py.\n\nSpeed: With batch_size 128.\n\nSystem | Step Time (sec/batch) | Accuracy\n--------------------------------------------------------------------\n1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)\n1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)\n2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)\n3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps\n4 Tesla K20m | ~0.10 | ~84% at 30K steps\n\nUsage:\nPlease see the tutorial and website for how to download the CIFAR-10\ndata set, compile the program and train the model.\n\nhttp://tensorflow.org/tutorials/deep_cnn/\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os.path\nimport re\nimport time\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nimport cifar10\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',\n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_integer('max_steps', 1000000,\n \"\"\"Number of batches to run.\"\"\")\ntf.app.flags.DEFINE_integer('num_gpus', 1,\n \"\"\"How many GPUs to use.\"\"\")\ntf.app.flags.DEFINE_boolean('log_device_placement', False,\n \"\"\"Whether to log device placement.\"\"\")\n\n\ndef tower_loss(scope):\n \"\"\"Calculate the total loss on a single tower running the CIFAR model.\n\n Args:\n scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'\n\n Returns:\n Tensor of shape [] containing the total loss for a batch of data\n \"\"\"\n # Get images and labels for CIFAR-10.\n images, labels = cifar10.distorted_inputs()\n\n # Build inference Graph.\n logits = cifar10.inference(images)\n\n # Build the portion of the Graph calculating the losses. Note that we will\n # assemble the total_loss using a custom function below.\n _ = cifar10.loss(logits, labels)\n\n # Assemble all of the losses for the current tower only.\n losses = tf.get_collection('losses', scope)\n\n # Calculate the total loss for the current tower.\n total_loss = tf.add_n(losses, name='total_loss')\n\n # Attach a scalar summary to all individual losses and the total loss; do the\n # same for the averaged version of the losses.\n for l in losses + [total_loss]:\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)\n tf.summary.scalar(loss_name, l)\n\n return total_loss\n\n\ndef average_gradients(tower_grads):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n\n Note that this function provides a synchronization point across all towers.\n\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, _ in grad_and_vars:\n # Add 0 dimension to the gradients to represent the tower.\n expanded_g = tf.expand_dims(g, 0)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(expanded_g)\n\n # Average over the 'tower' dimension.\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads\n\n\ndef train():\n \"\"\"Train CIFAR-10 for a number of steps.\"\"\"\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n # Create a variable to count the number of train() calls. This equals the\n # number of batches processed * FLAGS.num_gpus.\n global_step = tf.get_variable(\n 'global_step', [],\n initializer=tf.constant_initializer(0), trainable=False)\n\n # Calculate the learning rate schedule.\n num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /\n FLAGS.batch_size)\n decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)\n\n # Decay the learning rate exponentially based on the number of steps.\n lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,\n global_step,\n decay_steps,\n cifar10.LEARNING_RATE_DECAY_FACTOR,\n staircase=True)\n\n # Create an optimizer that performs gradient descent.\n opt = tf.train.GradientDescentOptimizer(lr)\n\n # Calculate the gradients for each model tower.\n tower_grads = []\n with tf.variable_scope(tf.get_variable_scope()):\n for i in xrange(FLAGS.num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:\n # Calculate the loss for one tower of the CIFAR model. This function\n # constructs the entire CIFAR model but shares the variables across\n # all towers.\n loss = tower_loss(scope)\n\n # Reuse variables for the next tower.\n tf.get_variable_scope().reuse_variables()\n\n # Retain the summaries from the final tower.\n summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)\n\n # Calculate the gradients for the batch of data on this CIFAR tower.\n grads = opt.compute_gradients(loss)\n\n # Keep track of the gradients across all towers.\n tower_grads.append(grads)\n\n # We must calculate the mean of each gradient. Note that this is the\n # synchronization point across all towers.\n grads = average_gradients(tower_grads)\n\n # Add a summary to track the learning rate.\n summaries.append(tf.summary.scalar('learning_rate', lr))\n\n # Add histograms for gradients.\n for grad, var in grads:\n if grad is not None:\n summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))\n\n # Apply the gradients to adjust the shared variables.\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\n\n # Add histograms for trainable variables.\n for var in tf.trainable_variables():\n summaries.append(tf.summary.histogram(var.op.name, var))\n\n # Track the moving averages of all trainable variables.\n variable_averages = tf.train.ExponentialMovingAverage(\n cifar10.MOVING_AVERAGE_DECAY, global_step)\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\n\n # Group all updates to into a single train op.\n train_op = tf.group(apply_gradient_op, variables_averages_op)\n\n # Create a saver.\n saver = tf.train.Saver(tf.global_variables())\n\n # Build the summary operation from the last tower summaries.\n summary_op = tf.summary.merge(summaries)\n\n # Build an initialization operation to run below.\n init = tf.global_variables_initializer()\n\n # Start running operations on the Graph. allow_soft_placement must be set to\n # True to build towers on GPU, as some of the ops do not have GPU\n # implementations.\n sess = tf.Session(config=tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=FLAGS.log_device_placement))\n sess.run(init)\n\n # Start the queue runners.\n tf.train.start_queue_runners(sess=sess)\n\n summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)\n\n for step in xrange(FLAGS.max_steps):\n start_time = time.time()\n _, loss_value = sess.run([train_op, loss])\n duration = time.time() - start_time\n\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 10 == 0:\n num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = duration / FLAGS.num_gpus\n\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '\n 'sec/batch)')\n print (format_str % (datetime.now(), step, loss_value,\n examples_per_sec, sec_per_batch))\n\n if step % 100 == 0:\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Save the model checkpoint periodically.\n if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:\n checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n cifar10.maybe_download_and_extract()\n if tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.DeleteRecursively(FLAGS.train_dir)\n tf.gfile.MakeDirs(FLAGS.train_dir)\n train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.python.platform.googletest.main", "tensorflow.test.get_temp_dir" ], [ "tensorflow.device", "tensorflow.get_variable", "tensorflow.control_dependencies", "tensorflow.nn.max_pool", "tensorflow.cast", "tensorflow.train.ExponentialMovingAverage", "tensorflow.nn.l2_loss", "tensorflow.app.flags.DEFINE_string", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.summary.scalar", "tensorflow.nn.conv2d", "tensorflow.get_collection", "tensorflow.app.flags.DEFINE_integer", "tensorflow.truncated_normal_initializer", "tensorflow.train.exponential_decay", "tensorflow.trainable_variables", "tensorflow.matmul", "tensorflow.train.GradientDescentOptimizer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.no_op", "tensorflow.add_to_collection", "tensorflow.summary.histogram", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.nn.zero_fraction", "tensorflow.constant_initializer", "tensorflow.nn.lrn", "tensorflow.variable_scope" ], [ "tensorflow.variable_scope", "tensorflow.zeros_initializer", "tensorflow.squeeze" ], [ "tensorflow.app.flags.DEFINE_string", "tensorflow.app.run" ], [ "tensorflow.Session" ], [ "tensorflow.device", "tensorflow.concat", "tensorflow.gfile.DeleteRecursively", "tensorflow.gfile.Exists", "tensorflow.global_variables", "tensorflow.train.ExponentialMovingAverage", "tensorflow.gfile.MakeDirs", "tensorflow.app.flags.DEFINE_string", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.group", "tensorflow.summary.scalar", "tensorflow.add_n", "tensorflow.Graph", "tensorflow.get_collection", "tensorflow.app.flags.DEFINE_integer", "tensorflow.train.exponential_decay", "tensorflow.ConfigProto", "tensorflow.name_scope", "tensorflow.trainable_variables", "tensorflow.app.run", "numpy.isnan", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.summary.merge", "tensorflow.summary.histogram", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.train.start_queue_runners", "tensorflow.expand_dims", "tensorflow.constant_initializer", "tensorflow.get_variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
scrasmussen/euler-enigma
[ "3faa7acee5cf48d21d081bfb8e1ecf58ef66814a" ]
[ "wse/checkmatches.py" ]
[ "#\n# checkmatches.py\n# A non-spoiler top prowrestling match finder\n# list from http://www.profightdb.com/top-rated-matches.html\n# For copyright see LICENSE.md\n# Author: Soren Rasmussen github: scrasmussen\n#\n\nINTERWEBZ=False\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom random import randint\nfrom datetime import datetime as dt\nfrom dateutil.parser import parse\nimport pandas as pd\n\n\ndef sortTopMatches(df):\n df.to_csv(\"topMatches.csv\", index=False, header=False)\n\ndef sortByDate(df):\n df[\"DATE\"] =pd.to_datetime(df.DATE)\n df.sort_values('DATE',inplace=True)\n # print(df)\n df.to_csv(\"sortByDate.csv\", index=False, header=False)\n\n\nprint(\"Start\")\nif (INTERWEBZ):\n SEARCHURL=\"http://www.profightdb.com/top-rated-matches.html\"\n req = urlopen(SEARCHURL);\n soup = BeautifulSoup(req,\"lxml\")\nelse:\n soup = BeautifulSoup(open(\"./top-rated-matches.html\"),\"lxml\")\nmatches = soup.findAll(class_=\"chequered\")\ntable = soup.find('table')\n\nRATING=[]\nDATE=[]\nSHOW=[]\nMATCH=[]\n# print(table)\nfor row in table.find_all(\"tr\"):\n cell = row.find_all(\"td\")\n if ((len(cell) == 6)):\n RATING.append(cell[0].text.strip())\n DATE.append(parse(cell[1].text.strip()).strftime(\"%d, %b %Y\"))\n\n SHOW.append(cell[2].text.lstrip().replace('\\n',':'))\n if (randint(0,1)):\n match = cell[3].text.lstrip() + \"vs \" + cell[4].text.lstrip()\n else:\n match = cell[4].text.lstrip() + \"vs \" + cell[3].text.lstrip()\n MATCH.append(match)\n\ndf = pd.DataFrame(RATING)\ndf[\"DATE\"]=DATE\ndf[\"SHOW\"]=SHOW\ndf[\"MATCH\"]=MATCH\ndf.insert(0,\"CHECK\",'[ ]')\n\n# Save the sorted by ranking\n# sortTopMatches(df)\n# Save the sorted by date list\n# sortByDate(df)\nprint(\"Fin\")\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
eshanking/fears
[ "8d69af08c5aba9fefdbf962ab568c2ca58276c0d" ]
[ "figure_code/seascape_v_landscape_fig.py" ]
[ "from fears.utils import plotter, results_manager\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# data_folder = 'results_10272021_0000'\n# exp_info_file = 'experiment_info_10272021_0000.p'\ndata_folder = 'results_11112021_0000'\nexp_info_file = 'experiment_info_11112021_0000.p'\nexp_folder,exp_info = results_manager.get_experiment_results(data_folder,\n exp_info_file)\n# fitness axes\nfig,ax = plt.subplots(nrows=2,ncols=2,figsize=(7,5))\nlinewidth = 2\n\nlabelsize=8\n\nf,ax[0,0] = plotter.plot_fitness_curves(exp_info.p_landscape,\n ax=ax[0,0],\n show_legend=False,\n show_axes_labels=False,\n labelsize=labelsize,\n linewidth=linewidth)\n\nax[0,0].set_xticks([10**-3,10**-1,10**1,10**3,10**5])\nax[0,0].xaxis.tick_top()\n\nf,ax[0,1] = plotter.plot_fitness_curves(exp_info.p_seascape,\n ax=ax[0,1], \n show_legend=False,\n show_axes_labels=False,\n labelsize=labelsize,\n linewidth=linewidth)\n\nax[0,1].set_xticks([10**-3,10**-1,10**1,10**3,10**5])\nax[0,1].xaxis.tick_top()\n\n# timecourse axes\nlandscape_exp = exp_folder[0]\ndata = results_manager.get_data(landscape_exp)\ncounts = data[:,0:4]\ndc = exp_info.p_landscape.drug_curve\ndrug_kwargs = {'color':'black',\n 'alpha':0.5,\n 'linestyle':'--'}\n\nax[1,0],drug_ax = plotter.plot_timecourse_to_axes(exp_info.p_landscape,\n counts,\n ax[1,0],\n labelsize=labelsize,\n linewidth=linewidth,\n drug_curve=dc,\n # drug_curve_linestyle='--',\n drug_curve_label=False,\n drug_kwargs=drug_kwargs)\n\ndrug_ax.set_ylim([10**-5,10**7])\ndrug_ax.set_yticks([10**-3,10**1,10**5])\n\nseascape_exp = exp_folder[1]\ndata = results_manager.get_data(seascape_exp)\ncounts = data[:,0:4]\nax[1,1],drug_ax = plotter.plot_timecourse_to_axes(exp_info.p_landscape,\n counts,\n ax[1,1],\n labelsize=labelsize,\n linewidth=linewidth,\n # drug_curve_linestyle='--',\n drug_curve=dc,\n drug_kwargs=drug_kwargs)\n\ndrug_ax.set_ylim([10**-5,10**7])\ndrug_ax.set_yticks([10**-3,10**1,10**5])\n\n# landscape axes\n\nnull_ax = ax[0,0]\nconc = [exp_info.first_dose,exp_info.second_dose,exp_info.third_dose]\ncmap = 'Blues'\nedgecolor='black'\ntextcolor='goldenrod'\npad = -0.35\n\nyl = null_ax.get_ylim()\nydata = np.arange(yl[0],yl[1],0.1)\n\nfor c in conc:\n plotter.add_landscape_to_fitness_curve(c,null_ax,exp_info.p_landscape,\n textcolor=textcolor,\n cmap=cmap,\n edgecolor=edgecolor,\n linewidths=0.5,\n textsize=9,\n position='bottom',\n vert_lines_ydata=ydata,\n pad=pad)\n \nsea_ax = ax[0,1]\n\nfor i in range(len(conc)-1):\n c = conc[i]\n plotter.add_landscape_to_fitness_curve(c,sea_ax,exp_info.p_seascape,\n textcolor=textcolor,\n cmap=cmap,\n edgecolor=edgecolor,\n linewidths=0.5,\n textsize=9,\n position='bottom',\n vert_lines_ydata=ydata,\n pad=pad)\n\nc = conc[-1]\n# cbax = fig.add_subplot()\nl1 = plotter.add_landscape_to_fitness_curve(c,sea_ax,exp_info.p_seascape,\n textcolor=textcolor,\n cmap=cmap,\n edgecolor=edgecolor,\n linewidths=0.5,\n textsize=9,\n position='bottom',\n vert_lines_ydata=ydata,\n pad=pad,\n colorbar=True)\n\n# reposition axes\n# w = 0.3\n# h = 0.27\nw = 0.26\nh = 0.22\n\n# wspace = (1-2*w)/3\nwspace = (1-2*w)/2.7\nhspace = (1-2*h)/2.7\n\nbottom = np.array([[1-hspace-h,1-hspace-h],[hspace,hspace]])\nleft = np.array([[wspace,1-wspace-w],[wspace,1-wspace-w]])\n\nfor a in ax[0,:]:\n # a.set_ylabel('Growth rate',fontsize=labelsize)\n a.set_xlabel('Drug concentration ($\\u03BC$M)',fontsize=labelsize)\n a.xaxis.set_label_position('top') \n \nfor a in ax[1,:]:\n # a.set_ylabel('Cell count',labelpad=0,fontsize=labelsize)\n a.set_xlabel('Days',fontsize=labelsize)\n \nax[1,0].set_ylabel('Cell count',labelpad=0,fontsize=labelsize)\nax[0,0].set_ylabel('Growth rate',fontsize=labelsize)\n \nax[1,1].legend(frameon=False,fontsize=7,\n bbox_to_anchor=(-0.75, -0.45, 1., .102), loc='lower left',\n ncol=4, mode=\"expand\", borderaxespad=0.)\n \nfor row in range(2):\n for col in range(2):\n a = ax[row,col]\n pos = [left[row,col],bottom[row,col],w,h]\n a.set_position(pos)\n \nax[0,0].annotate('a.', xy=(-0.15,1.05), xycoords='axes fraction')\nax[1,0].annotate('c.', xy=(-0.15,1.05), xycoords='axes fraction')\nax[0,1].annotate('b.', xy=(-0.15,1.05), xycoords='axes fraction')\nax[1,1].annotate('d.', xy=(-0.15,1.05), xycoords='axes fraction')\n \nresults_manager.save_fig(fig,'seascape_v_landscape.pdf',bbox_inches='tight')" ]
[ [ "numpy.arange", "numpy.array", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Stonepia/pytorch
[ "82006ba46074226a071c25dd2e03dc4828941544", "82006ba46074226a071c25dd2e03dc4828941544", "82006ba46074226a071c25dd2e03dc4828941544" ]
[ "torch/autograd/__init__.py", "torch/multiprocessing/__init__.py", "test/distributed/test_distributed_fork.py" ]
[ "\"\"\"\n``torch.autograd`` provides classes and functions implementing automatic\ndifferentiation of arbitrary scalar valued functions. It requires minimal\nchanges to the existing code - you only need to declare :class:`Tensor` s\nfor which gradients should be computed with the ``requires_grad=True`` keyword.\nAs of now, we only support autograd for floating point :class:`Tensor` types (\nhalf, float, double and bfloat16) and complex :class:`Tensor` types (cfloat, cdouble).\n\"\"\"\nimport torch\nimport warnings\n\nfrom torch.types import _TensorOrTensors\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Union\n\nfrom .variable import Variable\nfrom .function import Function, NestedIOFunction\nfrom .gradcheck import gradcheck, gradgradcheck\nfrom .grad_mode import no_grad, enable_grad, set_grad_enabled\nfrom .anomaly_mode import detect_anomaly, set_detect_anomaly\nfrom ..overrides import has_torch_function, handle_torch_function\nfrom . import functional\nfrom . import forward_ad\n\n__all__ = ['Variable', 'Function', 'backward', 'grad_mode']\n\n_OptionalTensor = Optional[torch.Tensor]\n\ndef _make_grads(outputs: Sequence[torch.Tensor], grads: Sequence[_OptionalTensor]) -> Tuple[_OptionalTensor, ...]:\n new_grads: List[_OptionalTensor] = []\n for out, grad in zip(outputs, grads):\n if isinstance(grad, torch.Tensor):\n if not out.shape == grad.shape:\n raise RuntimeError(\"Mismatch in shape: grad_output[\"\n + str(grads.index(grad)) + \"] has a shape of \"\n + str(grad.shape) + \" and output[\"\n + str(outputs.index(out)) + \"] has a shape of \"\n + str(out.shape) + \".\")\n if out.dtype.is_complex != grad.dtype.is_complex:\n raise RuntimeError(\"For complex Tensors, both grad_output and output\"\n \" are required to have the same dtype.\"\n \" Mismatch in dtype: grad_output[\"\n + str(grads.index(grad)) + \"] has a dtype of \"\n + str(grad.dtype) + \" and output[\"\n + str(outputs.index(out)) + \"] has a dtype of \"\n + str(out.dtype) + \".\")\n new_grads.append(grad)\n elif grad is None:\n if out.requires_grad:\n if out.numel() != 1:\n raise RuntimeError(\"grad can be implicitly created only for scalar outputs\")\n new_grads.append(torch.ones_like(out, memory_format=torch.preserve_format))\n else:\n new_grads.append(None)\n else:\n raise TypeError(\"gradients can be either Tensors or None, but got \" +\n type(grad).__name__)\n return tuple(new_grads)\n\n\ndef _tensor_or_tensors_to_tuple(tensors: Optional[_TensorOrTensors], length: int) -> Tuple[_OptionalTensor, ...]:\n if tensors is None:\n return (None, ) * length\n if isinstance(tensors, torch.Tensor):\n return (tensors, )\n return tuple(tensors)\n\n\ndef backward(\n tensors: _TensorOrTensors,\n grad_tensors: Optional[_TensorOrTensors] = None,\n retain_graph: Optional[bool] = None,\n create_graph: bool = False,\n grad_variables: Optional[_TensorOrTensors] = None,\n inputs: Optional[_TensorOrTensors] = None,\n) -> None:\n r\"\"\"Computes the sum of gradients of given tensors w.r.t. graph leaves.\n\n The graph is differentiated using the chain rule. If any of ``tensors``\n are non-scalar (i.e. their data has more than one element) and require\n gradient, then the Jacobian-vector product would be computed, in this\n case the function additionally requires specifying ``grad_tensors``.\n It should be a sequence of matching length, that contains the \"vector\"\n in the Jacobian-vector product, usually the gradient of the differentiated\n function w.r.t. corresponding tensors (``None`` is an acceptable value for\n all tensors that don't need gradient tensors).\n\n This function accumulates gradients in the leaves - you might need to zero\n ``.grad`` attributes or set them to ``None`` before calling it.\n See :ref:`Default gradient layouts<default-grad-layouts>`\n for details on the memory layout of accumulated gradients.\n\n .. note::\n Using this method with ``create_graph=True`` will create a reference cycle\n between the parameter and its gradient which can cause a memory leak.\n We recommend using ``autograd.grad`` when creating the graph to avoid this.\n If you have to use this function, make sure to reset the ``.grad`` fields of your\n parameters to ``None`` after use to break the cycle and avoid the leak.\n\n .. note::\n\n If you run any forward ops, create ``grad_tensors``, and/or call ``backward``\n in a user-specified CUDA stream context, see\n :ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.\n\n Args:\n tensors (Sequence[Tensor] or Tensor): Tensors of which the derivative will be\n computed.\n grad_tensors (Sequence[Tensor or None] or Tensor, optional): The \"vector\" in\n the Jacobian-vector product, usually gradients w.r.t. each element of\n corresponding tensors. None values can be specified for scalar Tensors or\n ones that don't require grad. If a None value would be acceptable for all\n grad_tensors, then this argument is optional.\n retain_graph (bool, optional): If ``False``, the graph used to compute the grad\n will be freed. Note that in nearly all cases setting this option to ``True``\n is not needed and often can be worked around in a much more efficient\n way. Defaults to the value of ``create_graph``.\n create_graph (bool, optional): If ``True``, graph of the derivative will\n be constructed, allowing to compute higher order derivative products.\n Defaults to ``False``.\n inputs (Sequence[Tensor] or Tensor, optional): Inputs w.r.t. which the gradient\n be will accumulated into ``.grad``. All other Tensors will be ignored. If\n not provided, the gradient is accumulated into all the leaf Tensors that\n were used to compute the attr::tensors. All the provided inputs must be leaf\n Tensors.\n \"\"\"\n if grad_variables is not None:\n warnings.warn(\"'grad_variables' is deprecated. Use 'grad_tensors' instead.\")\n if grad_tensors is None:\n grad_tensors = grad_variables\n else:\n raise RuntimeError(\"'grad_tensors' and 'grad_variables' (deprecated) \"\n \"arguments both passed to backward(). Please only \"\n \"use 'grad_tensors'.\")\n if inputs is not None and len(inputs) == 0:\n raise RuntimeError(\"'inputs' argument to backward() cannot be empty.\")\n\n tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tuple(tensors)\n inputs = (inputs,) if isinstance(inputs, torch.Tensor) else \\\n tuple(inputs) if inputs is not None else tuple()\n\n grad_tensors_ = _tensor_or_tensors_to_tuple(grad_tensors, len(tensors))\n grad_tensors_ = _make_grads(tensors, grad_tensors_)\n if retain_graph is None:\n retain_graph = create_graph\n\n Variable._execution_engine.run_backward(\n tensors, grad_tensors_, retain_graph, create_graph, inputs,\n allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag\n\n\ndef grad(\n outputs: _TensorOrTensors,\n inputs: _TensorOrTensors,\n grad_outputs: Optional[_TensorOrTensors] = None,\n retain_graph: Optional[bool] = None,\n create_graph: bool = False,\n only_inputs: bool = True,\n allow_unused: bool = False\n) -> Tuple[torch.Tensor, ...]:\n r\"\"\"Computes and returns the sum of gradients of outputs w.r.t. the inputs.\n\n ``grad_outputs`` should be a sequence of length matching ``output``\n containing the \"vector\" in Jacobian-vector product, usually the pre-computed\n gradients w.r.t. each of the outputs. If an output doesn't require_grad,\n then the gradient can be ``None``).\n\n If ``only_inputs`` is ``True``, the function will only return a list of gradients\n w.r.t the specified inputs. If it's ``False``, then gradient w.r.t. all remaining\n leaves will still be computed, and will be accumulated into their ``.grad``\n attribute.\n\n .. note::\n\n If you run any forward ops, create ``grad_outputs``, and/or call ``grad``\n in a user-specified CUDA stream context, see\n :ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.\n\n Args:\n outputs (sequence of Tensor): outputs of the differentiated function.\n inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be\n returned (and not accumulated into ``.grad``).\n grad_outputs (sequence of Tensor): The \"vector\" in the Jacobian-vector product.\n Usually gradients w.r.t. each output. None values can be specified for scalar\n Tensors or ones that don't require grad. If a None value would be acceptable\n for all grad_tensors, then this argument is optional. Default: None.\n retain_graph (bool, optional): If ``False``, the graph used to compute the grad\n will be freed. Note that in nearly all cases setting this option to ``True``\n is not needed and often can be worked around in a much more efficient\n way. Defaults to the value of ``create_graph``.\n create_graph (bool, optional): If ``True``, graph of the derivative will\n be constructed, allowing to compute higher order derivative products.\n Default: ``False``.\n allow_unused (bool, optional): If ``False``, specifying inputs that were not\n used when computing outputs (and therefore their grad is always zero)\n is an error. Defaults to ``False``.\n \"\"\"\n outputs = (outputs,) if isinstance(outputs, torch.Tensor) else tuple(outputs)\n inputs = (inputs,) if isinstance(inputs, torch.Tensor) else tuple(inputs)\n overridable_args = outputs + inputs\n if has_torch_function(overridable_args):\n return handle_torch_function(\n grad,\n overridable_args,\n outputs,\n inputs,\n grad_outputs=grad_outputs,\n retain_graph=retain_graph,\n create_graph=create_graph,\n only_inputs=only_inputs,\n allow_unused=allow_unused,\n )\n\n if not only_inputs:\n warnings.warn(\"only_inputs argument is deprecated and is ignored now \"\n \"(defaults to True). To accumulate gradient for other \"\n \"parts of the graph, please use torch.autograd.backward.\")\n\n grad_outputs_ = _tensor_or_tensors_to_tuple(grad_outputs, len(outputs))\n grad_outputs_ = _make_grads(outputs, grad_outputs_)\n\n if retain_graph is None:\n retain_graph = create_graph\n\n return Variable._execution_engine.run_backward(\n outputs, grad_outputs_, retain_graph, create_graph,\n inputs, allow_unused, accumulate_grad=False)\n\n\n# This function applies in case of gradient checkpointing for memory\n# optimization. Currently, gradient checkpointing is supported only if the\n# execution engine is invoked through torch.autograd.backward() and its\n# inputs argument is not passed. It is not supported for torch.autograd.grad().\n# This is because if inputs are specified, the gradient won't be calculated for\n# anything else e.g. model parameters like weights, bias etc.\n#\n# This function returns whether the checkpointing is valid i.e. torch.autograd.backward\n# or not i.e. torch.autograd.grad. The implementation works by maintaining a thread\n# local variable in torch/csrc/autograd/engine.cpp which looks at the NodeTask\n# in the stack and before a NodeTask is executed in evaluate_function, it\n# checks for whether reentrant backwards is imperative or not.\n# See https://github.com/pytorch/pytorch/pull/4594 for more discussion/context\ndef _is_checkpoint_valid():\n return Variable._execution_engine.is_checkpoint_valid()\n\n\ndef variable(*args, **kwargs):\n warnings.warn(\"torch.autograd.variable(...) is deprecated, use torch.tensor(...) instead\")\n return torch.tensor(*args, **kwargs)\n\nif not torch._C._autograd_init():\n raise RuntimeError(\"autograd initialization failed\")\n\n# Import all native method/classes\nfrom torch._C._autograd import (DeviceType, ProfilerActivity, ProfilerState, ProfilerConfig, ProfilerEvent,\n _enable_profiler_legacy, _disable_profiler_legacy, _profiler_enabled,\n _enable_record_function, _set_empty_test_observer, kineto_available)\n\nif kineto_available():\n from torch._C._autograd import (ProfilerResult, KinetoEvent,\n _prepare_profiler, _enable_profiler, _disable_profiler)\n\nfrom . import profiler\n", "\"\"\"\ntorch.multiprocessing is a wrapper around the native :mod:`multiprocessing`\nmodule. It registers custom reducers, that use shared memory to provide shared\nviews on the same data in different processes. Once the tensor/storage is moved\nto shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible\nto send it to other processes without making any copies.\n\nThe API is 100% compatible with the original module - it's enough to change\n``import multiprocessing`` to ``import torch.multiprocessing`` to have all the\ntensors sent through the queues or shared via other mechanisms, moved to shared\nmemory.\n\nBecause of the similarity of APIs we do not document most of this package\ncontents, and we recommend referring to very good docs of the original module.\n\"\"\"\nimport torch\nimport sys\nfrom .reductions import init_reductions\nimport multiprocessing\n\n__all__ = ['set_sharing_strategy', 'get_sharing_strategy',\n 'get_all_sharing_strategies']\n\n\nfrom multiprocessing import *\n\n\n__all__ += multiprocessing.__all__ # type: ignore[attr-defined]\n\n\n# This call adds a Linux specific prctl(2) wrapper function to this module.\n# See https://github.com/pytorch/pytorch/pull/14391 for more information.\ntorch._C._multiprocessing_init()\n\n\n\"\"\"Add helper function to spawn N processes and wait for completion of any of\nthem. This depends `mp.get_context` which was added in Python 3.4.\"\"\"\nfrom .spawn import spawn, SpawnContext, start_processes, ProcessContext, \\\n ProcessRaisedException, ProcessExitedException\n\n\nif sys.platform == 'darwin' or sys.platform == 'win32':\n _sharing_strategy = 'file_system'\n _all_sharing_strategies = {'file_system'}\nelse:\n _sharing_strategy = 'file_descriptor'\n _all_sharing_strategies = {'file_descriptor', 'file_system'}\n\n\ndef set_sharing_strategy(new_strategy):\n \"\"\"Sets the strategy for sharing CPU tensors.\n\n Args:\n new_strategy (str): Name of the selected strategy. Should be one of\n the values returned by :func:`get_all_sharing_strategies()`.\n \"\"\"\n global _sharing_strategy\n assert new_strategy in _all_sharing_strategies\n _sharing_strategy = new_strategy\n\n\ndef get_sharing_strategy():\n \"\"\"Returns the current strategy for sharing CPU tensors.\"\"\"\n return _sharing_strategy\n\n\ndef get_all_sharing_strategies():\n \"\"\"Returns a set of sharing strategies supported on a current system.\"\"\"\n return _all_sharing_strategies\n\n\ninit_reductions()\n", "import os\nimport sys\nimport tempfile\nfrom functools import wraps\nimport torch\nimport torch.cuda\nimport torch.distributed as dist\n\nif not dist.is_available():\n print(\"Distributed not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\nfrom torch.testing._internal.common_utils import TestCase, find_free_port, run_tests\nfrom torch.distributed.distributed_c10d import _get_default_group\nfrom torch.testing._internal.distributed.distributed_test import (\n DistributedTest, TestDistBackend\n)\n\ntorch.backends.cuda.matmul.allow_tf32 = False\n\nCPP_EXTENSIONS_WARNING = \"\"\"\nNinja (https://ninja-build.org) must be available to run C++ extensions tests,\nbut it could not be found. Install ninja with `pip install ninja`\nor `conda install ninja`.\n\"\"\"\n\nBACKEND = os.environ[\"BACKEND\"]\nINIT_METHOD = os.getenv(\"INIT_METHOD\", \"env://\")\n\n\ndef skip_if_no_ninja(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n import torch.utils.cpp_extension\n torch.utils.cpp_extension.verify_ninja_availability()\n except RuntimeError:\n print(CPP_EXTENSIONS_WARNING)\n return 0\n\n return func(*args, **kwargs)\n\n return wrapper\n\n\nif BACKEND == \"gloo\" or BACKEND == \"nccl\":\n\n class TestDistBackendWithFork(TestDistBackend, DistributedTest._DistTestBase):\n\n def setUp(self):\n super().setUp()\n self._fork_processes()\n torch.backends.cudnn.flags(allow_tf32=False).__enter__()\n\n\nelif BACKEND == \"mpi\":\n WORLD_SIZE = os.environ[\"WORLD_SIZE\"]\n dist.init_process_group(init_method=INIT_METHOD, backend=\"mpi\")\n\n class TestMPIWithFork(TestCase, DistributedTest._DistTestBase):\n pass\n\nelif BACKEND == \"test\":\n class TestBackendDynamicLoad(TestCase):\n def setUp(self):\n super(TestBackendDynamicLoad, self).setUp()\n\n def _load_test_backend(self):\n temp_dir = tempfile.mkdtemp()\n src = \"{}/../cpp_extensions/cpp_c10d_extension.cpp\".format(os.path.abspath(os.path.dirname(__file__)))\n extension = torch.utils.cpp_extension.load(\n name=\"torch_test\",\n sources=[src],\n build_directory=temp_dir\n )\n\n @skip_if_no_ninja\n def test_backend_apis(self):\n self._load_test_backend()\n\n os.environ['WORLD_SIZE'] = '1'\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = str(find_free_port())\n os.environ['RANK'] = '0'\n\n dist.init_process_group(backend='test', init_method='env://', world_size=1, rank=0)\n self.assertEqual(dist.get_rank(), 0)\n self.assertEqual(dist.get_world_size(), 1)\n\n process_group = _get_default_group()\n work = process_group.allreduce([torch.rand(1), torch.rand(1)])\n self.assertTrue(work.wait())\n self.assertTrue(work.is_completed())\n self.assertTrue(work.is_success())\n\n work = process_group.broadcast([torch.rand(1)])\n self.assertTrue(work.wait())\n self.assertTrue(work.is_completed())\n self.assertTrue(work.is_success())\n\n dist.destroy_process_group()\n\nif __name__ == \"__main__\":\n assert (\n not torch.cuda._initialized\n ), \"test_distributed must not have initialized CUDA context on main process\"\n\n run_tests()\n" ]
[ [ "torch._C._autograd_init", "torch._C._autograd.kineto_available", "torch.ones_like", "torch.tensor" ], [ "torch._C._multiprocessing_init" ], [ "torch.utils.cpp_extension.verify_ninja_availability", "torch.utils.cpp_extension.load", "torch.distributed.init_process_group", "torch.testing._internal.common_utils.find_free_port", "torch.distributed.get_rank", "torch.distributed.distributed_c10d._get_default_group", "torch.distributed.is_available", "torch.rand", "torch.distributed.destroy_process_group", "torch.backends.cudnn.flags", "torch.distributed.get_world_size", "torch.testing._internal.common_utils.run_tests" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
harshitjindal/BrainNetworksInPython
[ "7b35d0693b5ea05f51a9b7b3e711f82e12c70a24" ]
[ "WRAPPERS/network_analysis_from_corrmat.py" ]
[ "#!/usr/bin/env python\n\n#=============================================================================\n# Created by Kirstie Whitaker\n# at Neurohackweek 2016 in Seattle, September 2016\n# Contact: [email protected]\n#=============================================================================\n\n#=============================================================================\n# IMPORTS\n#=============================================================================\nimport os\nimport sys\nimport argparse\nimport textwrap\n\nimport numpy as np\nimport pandas as pd\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '../SCRIPTS/'))\nimport make_graphs as mkg\n\n#=============================================================================\n# FUNCTIONS\n#=============================================================================\ndef setup_argparser():\n '''\n Code to read in arguments from the command line\n Also allows you to change some settings\n '''\n # Build a basic parser.\n help_text = (('Generate a graph as a fixed cost from a non-thresholded matrix\\n')+\n ('and return global and nodal measures.'))\n\n sign_off = 'Author: Kirstie Whitaker <[email protected]>'\n\n parser = argparse.ArgumentParser(description=help_text,\n epilog=sign_off,\n formatter_class=argparse.RawTextHelpFormatter)\n\n # Now add the arguments\n parser.add_argument(dest='corr_mat_file',\n type=str,\n metavar='corr_mat_file',\n help=textwrap.dedent(('Text file (tab or space delimited) that contains the unthresholded\\n')+\n ('matrix with no column or row labels.')))\n\n parser.add_argument(dest='names_file',\n type=str,\n metavar='names_file',\n help=textwrap.dedent(('Text file that contains the names of each region, in the same\\n')+\n ('order as the correlation matrix. One region name on each line.')))\n\n parser.add_argument(dest='centroids_file',\n type=str,\n metavar='centroids_file',\n help=textwrap.dedent(('Text file that contains the x, y, z coordinates of each region,\\n')+\n ('in the same order as the correlation matrix. One set of three\\n')+\n ('coordinates, tab or space delimited, on each line.')))\n\n parser.add_argument(dest='output_dir',\n type=str,\n metavar='output_dir',\n help=textwrap.dedent(('Location in which to save global and nodal measures.')))\n\n parser.add_argument('-c', '--cost',\n type=float,\n metavar='cost',\n help=textwrap.dedent(('Cost at which to threshold the matrix.\\n')+\n (' Default: 10.0')),\n default=10.0)\n\n parser.add_argument('-n', '--n_rand',\n type=int,\n metavar='n_rand',\n help=textwrap.dedent(('Number of random graphs to generate to compare with real network.\\n')+\n (' Default: 1000')),\n default=1000)\n\n parser.add_argument('--names_308_style',\n action='store_true',\n help=textwrap.dedent(('Include this flag if your names are in the NSPN 308\\n')+\n ('parcellation style (which means you have 41 subcortical regions)\\n')+\n ('that are still in the names and centroids files and that\\n')+\n ('the names are in <hemi>_<DK-region>_<part> format.\\n')+\n (' Default: False')),\n default=False)\n\n arguments = parser.parse_args()\n\n return arguments, parser\n\n\ndef read_in_data(corr_mat_file, names_file, centroids_file, names_308_style):\n '''\n Read in the data from the three input files:\n * corr_mat_file\n * names_file\n * centroids_file\n '''\n # Load the input files\n M = np.loadtxt(corr_mat_file)\n names = [ line.strip() for line in open(names_file) ]\n centroids = np.loadtxt(centroids_file)\n\n # If you have your names in names_308_style you need to strip the\n # first 41 items\n if names_308_style:\n names = names[41:]\n centroids = centroids[41:,:]\n\n return M, names, centroids\n\n\ndef write_out_nodal_measures(nodal_dict, centroids, output_dir, corr_mat_file, cost):\n '''\n Write the nodal dictionary into a pandas data frame and then\n save this data frame into a csv file where columns are the nodal measures\n and the rows are each region.\n '''\n \n # Put the nodal dict into a pandas dataframe\n df = pd.DataFrame(nodal_dict)# IS: This works fine since the replacement of \"np.array\" with list\" (see make_graphs.py)\n\n # Add in the centroids\n df['x'] = centroids[:, 0]\n df['y'] = centroids[:, 1]\n df['z'] = centroids[:, 2]\n\n # Make the output directory if it doesn't exist already\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n # Figure out the output file name\n basename_corr_mat_file = os.path.basename(corr_mat_file).strip('.txt')\n output_f_name = os.path.join(output_dir,\n 'NodalMeasures_{}_COST{:03.0f}.csv'.format(basename_corr_mat_file,\n cost))\n\n # Write the data frame out (with the name column first)\n new_col_list = ['name'] + [ col_name for col_name in df.columns if not col_name == 'name' ]\n df.to_csv(output_f_name, columns=new_col_list)\n\n\ndef write_out_global_measures(global_dict, output_dir, corr_mat_file, cost):\n '''\n Write the global dictionary into a pandas data frame and then\n save this data frame into a csv file where columns are the global measures\n and the rows are each of the random networks. Note that this means the\n non-randomised graph measures are the same in every row.\n\n (If there's a better way to write this out then I'm totally down!)\n '''\n # Put the global dict into a pandas dataframe\n df = pd.DataFrame(global_dict)\n\n # Make the output directory if it doesn't exist already\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n # Figure out the output file name\n basename_corr_mat_file = os.path.basename(corr_mat_file).strip('.txt')\n output_f_name = os.path.join(output_dir,\n 'GlobalMeasures_{}_COST{:03.0f}.csv'.format(basename_corr_mat_file,\n cost))\n\n # Write the data frame out (with the name column first)\n df.to_csv(output_f_name)\n\ndef write_out_rich_club(deg, rc, rc_rand, output_dir, corr_mat_file, cost):\n '''\n Write the rich club array into a pandas data frame and then\n save this data frame into a csv file where columns are the graphs, and the rows are their kth rich club coefficients.\n '''\n \n # Put the rich club arrays into a pandas dataframe\n df = pd.DataFrame(rc_rand)\n df.rename(columns=lambda x: 'random graph '+str(x), inplace=True)\n df['degree'], df['real graph'] = deg, rc\n \n # Make the output directory if it doesn't exist already\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n # Figure out the output file name\n basename_corr_mat_file = os.path.basename(corr_mat_file).strip('.txt')\n output_f_name = os.path.join(output_dir,\n 'RICH_CLUB_{}_COST{:03.0f}.csv'.format(basename_corr_mat_file,\n cost))\n\n # Write the data frame out (with the degree column first)\n new_col_list = ['degree']+ ['real graph'] + [ col_name for col_name in df.columns if col_name != 'degree' and col_name != 'real graph']\n df.to_csv(output_f_name, columns=new_col_list)\n\n\ndef network_analysis_from_corrmat(corr_mat_file,\n names_file,\n centroids_file,\n output_dir,\n cost=10,\n n_rand=1000,\n names_308_style=False):\n '''\n This is the big function!\n It reads in the correlation matrix, thresholds it at the given cost\n (incorporating a minimum spanning tree), creates a networkx graph,\n calculates global and nodal measures (including random comparisons\n for the global measures) and writes them out to csv files.\n '''\n # Read in the data\n M, names, centroids = read_in_data(corr_mat_file,\n names_file,\n centroids_file,\n names_308_style)\n\n # Make your graph at cost\n G = mkg.graph_at_cost(M, cost)\n\n # Calculate the modules\n nodal_partition = mkg.calc_nodal_partition(G)\n\n # Get the nodal measures\n # (note that this takes a bit of time because the participation coefficient\n # takes a while)\n G, nodal_dict = mkg.calculate_nodal_measures(G,\n centroids,\n names,\n nodal_partition=nodal_partition,\n names_308_style=names_308_style)\n\n # Save your nodal measures\n write_out_nodal_measures(nodal_dict, centroids, output_dir, corr_mat_file, cost)\n\n # Get the global measures\n # (note that this takes a bit of time because you're generating random\n # graphs)\n R_list, R_nodal_partition_list = mkg.make_random_list(G, n_rand=n_rand)\n\n global_dict = mkg.calculate_global_measures(G,\n R_list=R_list,\n nodal_partition=nodal_partition,\n R_nodal_partition_list=R_nodal_partition_list)\n\n # Write out the global measures\n write_out_global_measures(global_dict, output_dir, corr_mat_file, cost)\n \n # Get the rich club coefficients\n deg, rc, rc_rand = mkg.rich_club(G, R_list=R_list, n=n_rand)\n \n # Write out the rich club coefficients\n write_out_rich_club(deg, rc, rc_rand, output_dir, corr_mat_file, cost)\n\n\nif __name__ == \"__main__\":\n\n # Read in the command line arguments\n arg, parser = setup_argparser()\n\n # Now run the main function :)\n network_analysis_from_corrmat(arg.corr_mat_file,\n arg.names_file,\n arg.centroids_file,\n arg.output_dir,\n cost=arg.cost,\n n_rand=arg.n_rand,\n names_308_style=arg.names_308_style)\n\n#=============================================================================\n# Wooo! All done :)\n#=============================================================================\n" ]
[ [ "numpy.loadtxt", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ricklentz/open_model_zoo
[ "8738a4e3056a5e65c52836a14531b01c18f1ba3e" ]
[ "tools/accuracy_checker/openvino/tools/accuracy_checker/postprocessor/crop_segmentation_mask.py" ]
[ "\"\"\"\nCopyright (c) 2018-2022 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom functools import singledispatch\nimport numpy as np\n\nfrom .postprocessor import PostprocessorWithSpecificTargets, Postprocessor\nfrom ..representation import (\n BrainTumorSegmentationAnnotation, BrainTumorSegmentationPrediction, SegmentationAnnotation, SegmentationPrediction\n)\nfrom ..config import NumberField\nfrom ..preprocessor import Crop3D, CropOrPad, Crop\nfrom ..utils import get_size_3d_from_config, get_size_from_config\n\n\nclass CropSegmentationMask(PostprocessorWithSpecificTargets):\n __provider__ = 'crop_segmentation_mask'\n\n annotation_types = (BrainTumorSegmentationAnnotation, SegmentationAnnotation, )\n prediction_types = (BrainTumorSegmentationPrediction, SegmentationPrediction, )\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'dst_width': NumberField(\n value_type=int, optional=True, min_value=1, description=\"Destination width for mask cropping\"\n ),\n 'dst_height': NumberField(\n value_type=int, optional=True, min_value=1, description=\"Destination height for mask cropping.\"\n ),\n 'dst_volume': NumberField(\n value_type=int, optional=True, min_value=1, description=\"Destination volume for mask cropping.\"\n ),\n 'size': NumberField(\n value_type=int, optional=True, min_value=1,\n description=\"Destination size for mask cropping for both dimensions.\"\n )\n })\n return parameters\n\n def configure(self):\n self.dst_height, self.dst_width, self.dst_volume = get_size_3d_from_config(self.config)\n\n def process_image(self, annotation, prediction):\n\n @singledispatch\n def crop_segmentation_mask(entry, height, width, volume):\n return entry\n\n @crop_segmentation_mask.register(SegmentationAnnotation)\n @crop_segmentation_mask.register(SegmentationPrediction)\n def _(entry, height, width, volume):\n shape = len(entry.mask.shape)\n if shape == 2:\n entry.mask = Crop.process_data(entry.mask, height, width, None, False, False, True, {})\n elif shape == 3:\n entry_mask = []\n for class_mask in entry.mask:\n mask_channel = Crop.process_data(class_mask, height, width, None, False, False, True, {})\n entry_mask.append(mask_channel)\n entry.mask = np.array(entry_mask)\n else:\n raise ValueError(\"'arr' does not have a suitable array shape for any mode.\")\n return entry\n\n @crop_segmentation_mask.register(BrainTumorSegmentationAnnotation)\n @crop_segmentation_mask.register(BrainTumorSegmentationPrediction)\n def _(entry, height, width, volume):\n entry.mask = Crop3D.crop_center(entry.mask, height, width, volume)\n return entry\n\n for target in annotation:\n crop_segmentation_mask(target, self.dst_height, self.dst_width, self.dst_volume)\n\n for target in prediction:\n crop_segmentation_mask(target, self.dst_height, self.dst_width, self.dst_volume)\n\n return annotation, prediction\n\n\nclass CropOrPadSegmentationMask(Postprocessor):\n __provider__ = 'crop_or_pad'\n\n annotation_types = (SegmentationAnnotation, )\n prediction_types = (SegmentationPrediction, )\n\n @classmethod\n def parameters(cls):\n params = super().parameters()\n params.update({\n 'dst_width': NumberField(\n value_type=int, optional=True, min_value=1, description=\"Destination width for mask cropping\"\n ),\n 'dst_height': NumberField(\n value_type=int, optional=True, min_value=1, description=\"Destination height for mask cropping.\"\n ),\n 'size': NumberField(\n value_type=int, optional=True, min_value=1,\n description=\"Destination size for mask cropping for both dimensions.\"\n )\n })\n return params\n\n def configure(self):\n self.dst_height, self.dst_width = get_size_from_config(self.config)\n\n def process_image(self, annotation, prediction):\n if not self.deprocessing_mode:\n for ann in annotation:\n if ann is None:\n continue\n ann.mask = self.process_mask(ann.mask)\n return annotation, prediction\n\n def process_mask(self, mask):\n if len(mask.shape) == 2:\n height, width = mask.shape\n else:\n height, width, _ = mask.shape\n\n width_diff = self.dst_width - width\n offset_crop_width = max(-width_diff // 2, 0)\n offset_pad_width = max(width_diff // 2, 0)\n\n height_diff = self.dst_height - height\n offset_crop_height = max(-height_diff // 2, 0)\n offset_pad_height = max(height_diff // 2, 0)\n cropped, _ = CropOrPad.crop_to_bounding_box(\n mask, offset_crop_height, offset_crop_width, min(self.dst_height, height), min(self.dst_width, width))\n resized, _ = CropOrPad.pad_to_bounding_box(\n cropped, offset_pad_height, offset_pad_width, self.dst_height, self.dst_width\n )\n return resized\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iamaneek/Artificial-Intelligence
[ "7014f1200d0dc3af7a85da1db611bc0816b89d5b" ]
[ "apriori.py" ]
[ "import numpy as np \nimport matplotlib.pyplot as plt \nimport pandas as pd \nfrom apyori import apriori \nimport time \nimport statistics\nimport argparse\n\ndef csvToList(csvFile):\n\t'''This function reads the csv object and converts to List\n\targs: CSV file object\n\treturn:List'''\n\ttempRecord = [] \n\t\n\tfor i in range(0, 1000):\n\t#\tprint(i) \n\t\ttempRecord.append([str(csvFile.values[i,j]) for j in range(0, 20)])\n\n\treturn tempRecord\n\ndef applyApriori(dataList,support,timeList):\n\t'''this function aplies the apriori algorithm to the lis\n\targs:List containing the data related to transactions;supportList: The minimum support for the apriori \n\treturn:List (the association result as a list)'''\n \n\tstartTime = time.time()\n\tassociation_rules = apriori(dataList, min_support=support, min_confidence=0.2, min_lift=3, min_length=2) \n\ttimeList.append(time.time() - startTime)\n\tassociation_results = list(association_rules)\n\n\treturn association_results\n\n\ndef getVariousMetrics(association_results):\n\t'''The function decodes the association result list returns the mean confidence\n\targs:List(association_results)\n\treturn: float (mean confidence value)''' \n\t\n\ttempList = []\n\tfor item in association_results:\n \t\tpair = item[0] \n \t\titems = [x for x in pair]\n \t\ttempList.append( float(str(item[2][0][2])))\n\n\tif len(tempList) != 0:\n\t\treturn \tstatistics.mean(tempList)\n\telse:\n\t\treturn 0 \t\n\ndef argsParser():\n\t'''The function is responsible for parsing the arguments\n\targs:None\n\treturn:args a dictionary'''\n\t\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--filePath', type = str, default = './store_data.csv', help='The absolute path of csv file' )\n\targs = parser.parse_args()\n\treturn args\n\n\ndef main():\n\n\t'''All the steps are performed in this function\n\targs:None\n\treturn:None'''\n\targs = argsParser() \n\tstore_data = pd.read_csv(args.filePath)\n\tsupportList = [0.0045,0.0055,0.0065,0.0075,0.0085,0.0095,0.0105]\n\ttimeList = []\n\truleLength = []\n\tconfidenceList = []\n\tfinalConfidenceList = []\n\trecords = csvToList(store_data)\n\tassociationResults = []\n\tfor support in supportList:\n\t\tassociationResults = applyApriori(records,support,timeList)\n\t\truleLength.append(len(associationResults))\n\t\tconfidenceList.append(getVariousMetrics(associationResults))\t\n\t\t\n\tprint('support list:{}'.format(supportList))\n\tprint('confidenceList:{}'.format(confidenceList))\n\tprint('timeList:{}'.format(timeList))\n\tprint('ruleLength:{}'.format(ruleLength))\n\nif __name__ == '__main__':\n\tmain()\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
forcedotcom/distributions
[ "8d4d8eebbcec14fa9f4c314425f127e1316d9951" ]
[ "distributions/tests/test_util.py" ]
[ "# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# - Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# - Neither the name of Salesforce.com nor the names of its contributors\n# may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS\n# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy\nfrom nose.tools import (\n assert_less,\n assert_less_equal,\n assert_greater,\n assert_list_equal,\n)\nfrom distributions.util import (\n scores_to_probs,\n bin_samples,\n multinomial_goodness_of_fit,\n)\nfrom distributions.tests.util import seed_all\n\n\ndef test_scores_to_probs():\n scores = [-10000, 10000, 10001, 9999, 0, 5, 6, 6, 7]\n probs = scores_to_probs(scores)\n assert_less(abs(sum(probs) - 1), 1e-6)\n for prob in probs:\n assert_less_equal(0, prob)\n assert_less_equal(prob, 1)\n\n\ndef test_multinomial_goodness_of_fit():\n for dim in range(2, 20):\n yield _test_multinomial_goodness_of_fit, dim\n\n\ndef _test_multinomial_goodness_of_fit(dim):\n seed_all(0)\n thresh = 1e-3\n sample_count = int(1e5)\n probs = numpy.random.dirichlet([1] * dim)\n\n counts = numpy.random.multinomial(sample_count, probs)\n p_good = multinomial_goodness_of_fit(probs, counts, sample_count)\n assert_greater(p_good, thresh)\n\n unif_counts = numpy.random.multinomial(sample_count, [1. / dim] * dim)\n p_bad = multinomial_goodness_of_fit(probs, unif_counts, sample_count)\n assert_less(p_bad, thresh)\n\n\ndef test_bin_samples():\n samples = range(6)\n numpy.random.shuffle(samples)\n counts, bounds = bin_samples(samples, 2)\n assert_list_equal(list(counts), [3, 3])\n assert_list_equal(list(bounds[0]), [0, 3])\n assert_list_equal(list(bounds[1]), [3, 5])\n" ]
[ [ "numpy.random.multinomial", "numpy.random.dirichlet", "numpy.random.shuffle" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pleiszenburg/bulk_lambert
[ "4ce1596970dfe0b1446709d320d3878639dac519" ]
[ "bulk_lambert/util.py" ]
[ "\nimport numpy as np\nfrom numpy import cos, sin\n\nfrom astropy.time import Time\n\nfrom ._jit import jit\n\n\n@jit\ndef rotation_matrix(angle, axis):\n c = cos(angle)\n s = sin(angle)\n if axis == 0:\n return np.array([[1.0, 0.0, 0.0], [0.0, c, -s], [0.0, s, c]])\n elif axis == 1:\n return np.array([[c, 0.0, s], [0.0, 1.0, 0.0], [s, 0.0, c]])\n elif axis == 2:\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])\n else:\n raise ValueError(\"Invalid axis: must be one of 'x', 'y' or 'z'\")\n\n\ndef time_range(start, *, periods=50, spacing=None, end=None, format=None, scale=None):\n \"\"\"Generates range of astronomical times.\n .. versionadded:: 0.8.0\n Parameters\n ----------\n periods : int, optional\n Number of periods, default to 50.\n spacing : Time or Quantity, optional\n Spacing between periods, optional.\n end : Time or equivalent, optional\n End date.\n Returns\n -------\n Time\n Array of time values.\n \"\"\"\n start = Time(start, format=format, scale=scale)\n\n if spacing is not None and end is None:\n result = start + spacing * np.arange(0, periods)\n\n elif end is not None and spacing is None:\n end = Time(end, format=format, scale=scale)\n result = start + (end - start) * np.linspace(0, 1, periods)\n\n else:\n raise ValueError(\"Either 'end' or 'spacing' must be specified\")\n\n return result\n" ]
[ [ "numpy.linspace", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
phoopies/desdeo-tools
[ "d3cb48c16b35114762386ee8368214b4b432eee0" ]
[ "desdeo_tools/utilities/lattice_generators.py" ]
[ "\"\"\"A file to contain different kinds of lattice generation algorithms.\n\n\"\"\"\n\nimport numba\nimport numpy as np\n\n\[email protected]()\ndef fibonacci_sphere(samples: int = 1000) -> np.ndarray:\n \"\"\"Generate a very even lattice of points on a 3d sphere using the fibonacci sphere\n or fibonacci spiral algorithm.\n\n Args:\n samples (int, optional): Number of points to be generated. Defaults to 1000.\n\n Returns:\n np.ndarray: The lattice of points as a 2-D (samples, 3) numpy array.\n \"\"\"\n points = np.zeros((samples, 3), dtype=np.float_)\n phi = np.pi * (3 - np.sqrt(5)) # golden angle in radians\n\n for i in range(samples):\n points[i, 1] = 1 - (i / (samples - 1)) * 2 # y goes from 1 to -1\n radius = np.sqrt(1 - points[i, 1] ** 2) # radius at y\n\n theta = phi * i # golden angle increment\n\n points[i, 0] = np.cos(theta) * radius\n points[i, 2] = np.sin(theta) * radius\n return points\n" ]
[ [ "numpy.cos", "numpy.zeros", "numpy.sqrt", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jvmncs/grandma
[ "36b852cdcab6fa5d60b48c6219bb1f3a599671b8" ]
[ "pygrandma/tests/test_one_d_viz.py" ]
[ "\n\nimport pygrandma\n\nimport numpy as np\nfrom one_d_viz import show1D\n\n\ndata = np.array([[0.499], [0.48], [-0.49], [0.0]],dtype=np.float32)\n\ntree = pygrandma.PyGrandma()\ntree.set_scale_base(2)\ntree.set_cutoff(0)\ntree.fit(data)\nshow1D(tree,data)" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LinghengMeng/lstm_td3
[ "2306a4f0e84417e92b9d77627abc8a7f1925f2b1" ]
[ "spinup/algos/pytorch/stochastic_td3/core.py" ]
[ "import numpy as np\nimport scipy.signal\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.normal import Normal\n\n\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\ndef mlp(sizes, activation, output_activation=nn.Identity):\n layers = []\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n layers += [nn.Linear(sizes[j], sizes[j+1]), act()]\n return nn.Sequential(*layers)\n\ndef count_vars(module):\n return sum([np.prod(p.shape) for p in module.parameters()])\n\n\nLOG_STD_MAX = 2\nLOG_STD_MIN = -20\n\nclass SquashedGaussianMLPActor(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):\n super().__init__()\n self.net = mlp([obs_dim] + list(hidden_sizes), activation, activation)\n self.mu_layer = nn.Linear(hidden_sizes[-1], act_dim)\n self.log_std_layer = nn.Linear(hidden_sizes[-1], act_dim)\n self.act_limit = act_limit\n\n def forward(self, obs, deterministic=False, with_logprob=True):\n net_out = self.net(obs)\n mu = self.mu_layer(net_out)\n log_std = self.log_std_layer(net_out)\n log_std = torch.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)\n std = torch.exp(log_std)\n\n # Pre-squash distribution and sample\n pi_distribution = Normal(mu, std)\n if deterministic:\n # Only used for evaluating policy at test time.\n pi_action = mu\n else:\n pi_action = pi_distribution.rsample()\n\n if with_logprob:\n # Compute logprob from Gaussian, and then apply correction for Tanh squashing.\n # NOTE: The correction formula is a little bit magic. To get an understanding \n # of where it comes from, check out the original SAC paper (arXiv 1801.01290) \n # and look in appendix C. This is a more numerically-stable equivalent to Eq 21.\n # Try deriving it yourself as a (very difficult) exercise. :)\n logp_pi = pi_distribution.log_prob(pi_action).sum(axis=-1)\n logp_pi -= (2*(np.log(2) - pi_action - F.softplus(-2*pi_action))).sum(axis=1)\n else:\n logp_pi = None\n\n pi_action = torch.tanh(pi_action)\n pi_action = self.act_limit * pi_action\n\n return pi_action, logp_pi, log_std, pi_distribution\n\n\nclass MLPQFunction(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n self.q = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)\n\n def forward(self, obs, act):\n q = self.q(torch.cat([obs, act], dim=-1))\n return torch.squeeze(q, -1) # Critical to ensure q has right shape.\n\nclass MLPActorCritic(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=(256,256),\n activation=nn.ReLU):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n act_dim = action_space.shape[0]\n act_limit = action_space.high[0]\n\n # build policy and value functions\n self.pi = SquashedGaussianMLPActor(obs_dim, act_dim, hidden_sizes, activation, act_limit)\n self.q1 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)\n self.q2 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)\n\n def act(self, obs, deterministic=False):\n with torch.no_grad():\n a, _, _, _ = self.pi(obs, deterministic, False)\n return a.cpu().detach().numpy()\n" ]
[ [ "torch.nn.Sequential", "numpy.log", "torch.cat", "torch.exp", "torch.nn.Linear", "torch.tanh", "torch.no_grad", "numpy.isscalar", "numpy.prod", "torch.distributions.normal.Normal", "torch.clamp", "torch.nn.functional.softplus", "torch.squeeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
libertyh/mne-python
[ "bf03e17f323341a877dea62963c86cf140757896" ]
[ "mne/dipole.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Single-dipole functions and classes.\"\"\"\n\n# Authors: Alexandre Gramfort <[email protected]>\n# Eric Larson <[email protected]>\n#\n# License: Simplified BSD\n\nfrom copy import deepcopy\nfrom functools import partial\nimport re\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom .cov import read_cov, compute_whitener\nfrom .io.constants import FIFF\nfrom .io.pick import pick_types, channel_type\nfrom .io.proj import make_projector, _needs_eeg_average_ref_proj\nfrom .bem import _fit_sphere\nfrom .evoked import _read_evoked, _aspect_rev, _write_evokeds\nfrom .transforms import _print_coord_trans, _coord_frame_name, apply_trans\nfrom .viz.evoked import _plot_evoked\nfrom .forward._make_forward import (_get_trans, _setup_bem,\n _prep_meg_channels, _prep_eeg_channels)\nfrom .forward._compute_forward import (_compute_forwards_meeg,\n _prep_field_computation)\n\nfrom .surface import transform_surface_to, _compute_nearest\nfrom .bem import _bem_find_surface, _surf_name\nfrom .source_space import (_make_volume_source_space, SourceSpaces,\n _points_outside_surface)\nfrom .parallel import parallel_func\nfrom .utils import (logger, verbose, _time_mask, warn, _check_fname,\n check_fname, _pl, fill_doc, _check_option,\n _svd_lwork, _repeated_svd, ddot, dgemv, dgemm)\n\n\n@fill_doc\nclass Dipole(object):\n u\"\"\"Dipole class for sequential dipole fits.\n\n .. note:: This class should usually not be instantiated directly,\n instead :func:`mne.read_dipole` should be used.\n\n Used to store positions, orientations, amplitudes, times, goodness of fit\n of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit\n or certain inverse solvers. Note that dipole position vectors are given in\n the head coordinate frame.\n\n Parameters\n ----------\n times : array, shape (n_dipoles,)\n The time instants at which each dipole was fitted (sec).\n pos : array, shape (n_dipoles, 3)\n The dipoles positions (m) in head coordinates.\n amplitude : array, shape (n_dipoles,)\n The amplitude of the dipoles (Am).\n ori : array, shape (n_dipoles, 3)\n The dipole orientations (normalized to unit length).\n gof : array, shape (n_dipoles,)\n The goodness of fit.\n name : str | None\n Name of the dipole.\n conf : dict\n Confidence limits in dipole orientation for \"vol\" in m^3 (volume),\n \"depth\" in m (along the depth axis), \"long\" in m (longitudinal axis),\n \"trans\" in m (transverse axis), \"qlong\" in Am, and \"qtrans\" in Am\n (currents). The current confidence limit in the depth direction is\n assumed to be zero (although it can be non-zero when a BEM is used).\n\n .. versionadded:: 0.15\n khi2 : array, shape (n_dipoles,)\n The χ^2 values for the fits.\n\n .. versionadded:: 0.15\n nfree : array, shape (n_dipoles,)\n The number of free parameters for each fit.\n\n .. versionadded:: 0.15\n %(verbose)s\n\n See Also\n --------\n fit_dipole\n DipoleFixed\n read_dipole\n\n Notes\n -----\n This class is for sequential dipole fits, where the position\n changes as a function of time. For fixed dipole fits, where the\n position is fixed as a function of time, use :class:`mne.DipoleFixed`.\n \"\"\"\n\n @verbose\n def __init__(self, times, pos, amplitude, ori, gof,\n name=None, conf=None, khi2=None, nfree=None,\n verbose=None): # noqa: D102\n self.times = np.array(times)\n self.pos = np.array(pos)\n self.amplitude = np.array(amplitude)\n self.ori = np.array(ori)\n self.gof = np.array(gof)\n self.name = name\n self.conf = deepcopy(conf) if conf is not None else dict()\n self.khi2 = np.array(khi2) if khi2 is not None else None\n self.nfree = np.array(nfree) if nfree is not None else None\n self.verbose = verbose\n\n def __repr__(self): # noqa: D105\n s = \"n_times : %s\" % len(self.times)\n s += \", tmin : %0.3f\" % np.min(self.times)\n s += \", tmax : %0.3f\" % np.max(self.times)\n return \"<Dipole | %s>\" % s\n\n def save(self, fname):\n \"\"\"Save dipole in a .dip file.\n\n Parameters\n ----------\n fname : str\n The name of the .dip file.\n \"\"\"\n # obligatory fields\n fmt = ' %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f'\n header = ('# begin end X (mm) Y (mm) Z (mm)'\n ' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%')\n t = self.times[:, np.newaxis] * 1000.\n gof = self.gof[:, np.newaxis]\n amp = 1e9 * self.amplitude[:, np.newaxis]\n out = (t, t, self.pos / 1e-3, amp, self.ori * amp, gof)\n\n # optional fields\n fmts = dict(khi2=(' khi^2', ' %8.1f', 1.),\n nfree=(' free', ' %5d', 1),\n vol=(' vol/mm^3', ' %9.3f', 1e9),\n depth=(' depth/mm', ' %9.3f', 1e3),\n long=(' long/mm', ' %8.3f', 1e3),\n trans=(' trans/mm', ' %9.3f', 1e3),\n qlong=(' Qlong/nAm', ' %10.3f', 1e9),\n qtrans=(' Qtrans/nAm', ' %11.3f', 1e9),\n )\n for key in ('khi2', 'nfree'):\n data = getattr(self, key)\n if data is not None:\n header += fmts[key][0]\n fmt += fmts[key][1]\n out += (data[:, np.newaxis] * fmts[key][2],)\n for key in ('vol', 'depth', 'long', 'trans', 'qlong', 'qtrans'):\n data = self.conf.get(key)\n if data is not None:\n header += fmts[key][0]\n fmt += fmts[key][1]\n out += (data[:, np.newaxis] * fmts[key][2],)\n out = np.concatenate(out, axis=-1)\n\n # NB CoordinateSystem is hard-coded as Head here\n with open(fname, 'wb') as fid:\n fid.write('# CoordinateSystem \"Head\"\\n'.encode('utf-8'))\n fid.write((header + '\\n').encode('utf-8'))\n np.savetxt(fid, out, fmt=fmt)\n if self.name is not None:\n fid.write(('## Name \"%s dipoles\" Style \"Dipoles\"'\n % self.name).encode('utf-8'))\n\n @fill_doc\n def crop(self, tmin=None, tmax=None, include_tmax=True):\n \"\"\"Crop data to a given time interval.\n\n Parameters\n ----------\n tmin : float | None\n Start time of selection in seconds.\n tmax : float | None\n End time of selection in seconds.\n %(include_tmax)s\n\n Returns\n -------\n self : instance of Dipole\n The cropped instance.\n \"\"\"\n sfreq = None\n if len(self.times) > 1:\n sfreq = 1. / np.median(np.diff(self.times))\n mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq,\n include_tmax=include_tmax)\n for attr in ('times', 'pos', 'gof', 'amplitude', 'ori',\n 'khi2', 'nfree'):\n if getattr(self, attr) is not None:\n setattr(self, attr, getattr(self, attr)[mask])\n for key in self.conf.keys():\n self.conf[key] = self.conf[key][mask]\n return self\n\n def copy(self):\n \"\"\"Copy the Dipoles object.\n\n Returns\n -------\n dip : instance of Dipole\n The copied dipole instance.\n \"\"\"\n return deepcopy(self)\n\n @verbose\n def plot_locations(self, trans, subject, subjects_dir=None,\n mode='orthoview', coord_frame='mri', idx='gof',\n show_all=True, ax=None, block=False, show=True,\n scale=5e-3, color=(1.0, 0.0, 0.0), fig=None,\n verbose=None):\n \"\"\"Plot dipole locations in 3d.\n\n Parameters\n ----------\n trans : dict\n The mri to head trans.\n subject : str\n The subject name corresponding to FreeSurfer environment\n variable SUBJECT.\n subjects_dir : None | str\n The path to the freesurfer subjects reconstructions.\n It corresponds to Freesurfer environment variable SUBJECTS_DIR.\n The default is None.\n mode : str\n Can be ``'arrow'``, ``'sphere'`` or ``'orthoview'``.\n\n .. versionadded:: 0.14.0\n coord_frame : str\n Coordinate frame to use, 'head' or 'mri'. Defaults to 'mri'.\n\n .. versionadded:: 0.14.0\n idx : int | 'gof' | 'amplitude'\n Index of the initially plotted dipole. Can also be 'gof' to plot\n the dipole with highest goodness of fit value or 'amplitude' to\n plot the dipole with the highest amplitude. The dipoles can also be\n browsed through using up/down arrow keys or mouse scroll. Defaults\n to 'gof'. Only used if mode equals 'orthoview'.\n\n .. versionadded:: 0.14.0\n show_all : bool\n Whether to always plot all the dipoles. If True (default), the\n active dipole is plotted as a red dot and it's location determines\n the shown MRI slices. The the non-active dipoles are plotted as\n small blue dots. If False, only the active dipole is plotted.\n Only used if mode equals 'orthoview'.\n\n .. versionadded:: 0.14.0\n ax : instance of matplotlib Axes3D | None\n Axes to plot into. If None (default), axes will be created.\n Only used if mode equals 'orthoview'.\n\n .. versionadded:: 0.14.0\n block : bool\n Whether to halt program execution until the figure is closed.\n Defaults to False. Only used if mode equals 'orthoview'.\n\n .. versionadded:: 0.14.0\n show : bool\n Show figure if True. Defaults to True.\n Only used if mode equals 'orthoview'.\n\n scale: float\n The scale of the dipoles if ``mode`` is 'arrow' or 'sphere'.\n color : tuple\n The color of the dipoles if ``mode`` is 'arrow' or 'sphere'.\n fig : mayavi.mlab.Figure | None\n Mayavi Scene in which to plot the alignment.\n If ``None``, creates a new 600x600 pixel figure with black\n background.\n\n .. versionadded:: 0.14.0\n %(verbose_meth)s\n\n Returns\n -------\n fig : instance of mayavi.mlab.Figure or matplotlib.figure.Figure\n The mayavi figure or matplotlib Figure.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n _check_option('mode', mode, [None, 'arrow', 'sphere', 'orthoview'])\n\n from .viz import plot_dipole_locations\n return plot_dipole_locations(\n self, trans, subject, subjects_dir, mode, coord_frame, idx,\n show_all, ax, block, show, scale=scale, color=color, fig=fig)\n\n def plot_amplitudes(self, color='k', show=True):\n \"\"\"Plot the dipole amplitudes as a function of time.\n\n Parameters\n ----------\n color: matplotlib Color\n Color to use for the trace.\n show : bool\n Show figure if True.\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure object containing the plot.\n \"\"\"\n from .viz import plot_dipole_amplitudes\n return plot_dipole_amplitudes([self], [color], show)\n\n def __getitem__(self, item):\n \"\"\"Get a time slice.\n\n Parameters\n ----------\n item : array-like or slice\n The slice of time points to use.\n\n Returns\n -------\n dip : instance of Dipole\n The sliced dipole.\n \"\"\"\n if isinstance(item, int): # make sure attributes stay 2d\n item = [item]\n\n selected_times = self.times[item].copy()\n selected_pos = self.pos[item, :].copy()\n selected_amplitude = self.amplitude[item].copy()\n selected_ori = self.ori[item, :].copy()\n selected_gof = self.gof[item].copy()\n selected_name = self.name\n selected_conf = dict()\n for key in self.conf.keys():\n selected_conf[key] = self.conf[key][item]\n selected_khi2 = self.khi2[item] if self.khi2 is not None else None\n selected_nfree = self.nfree[item] if self.nfree is not None else None\n return Dipole(\n selected_times, selected_pos, selected_amplitude, selected_ori,\n selected_gof, selected_name, selected_conf, selected_khi2,\n selected_nfree)\n\n def __len__(self):\n \"\"\"Return the number of dipoles.\n\n Returns\n -------\n len : int\n The number of dipoles.\n\n Examples\n --------\n This can be used as::\n\n >>> len(dipoles) # doctest: +SKIP\n 10\n\n \"\"\"\n return self.pos.shape[0]\n\n\ndef _read_dipole_fixed(fname):\n \"\"\"Read a fixed dipole FIF file.\"\"\"\n logger.info('Reading %s ...' % fname)\n info, nave, aspect_kind, first, last, comment, times, data = \\\n _read_evoked(fname)\n return DipoleFixed(info, data, times, nave, aspect_kind, first, last,\n comment)\n\n\n@fill_doc\nclass DipoleFixed(object):\n \"\"\"Dipole class for fixed-position dipole fits.\n\n .. note:: This class should usually not be instantiated directly,\n instead :func:`mne.read_dipole` should be used.\n\n Parameters\n ----------\n info : instance of Info\n The measurement info.\n data : array, shape (n_channels, n_times)\n The dipole data.\n times : array, shape (n_times,)\n The time points.\n nave : int\n Number of averages.\n aspect_kind : int\n The kind of data.\n first : int\n First sample.\n last : int\n Last sample.\n comment : str\n The dipole comment.\n %(verbose)s\n\n See Also\n --------\n read_dipole\n Dipole\n fit_dipole\n\n Notes\n -----\n This class is for fixed-position dipole fits, where the position\n (and maybe orientation) is static over time. For sequential dipole fits,\n where the position can change a function of time, use :class:`mne.Dipole`.\n\n .. versionadded:: 0.12\n \"\"\"\n\n @verbose\n def __init__(self, info, data, times, nave, aspect_kind, first, last,\n comment, verbose=None): # noqa: D102\n self.info = info\n self.nave = nave\n self._aspect_kind = aspect_kind\n self.kind = _aspect_rev.get(aspect_kind, 'unknown')\n self.first = first\n self.last = last\n self.comment = comment\n self.times = times\n self.data = data\n self.verbose = verbose\n\n def __repr__(self): # noqa: D105\n s = \"n_times : %s\" % len(self.times)\n s += \", tmin : %s\" % np.min(self.times)\n s += \", tmax : %s\" % np.max(self.times)\n return \"<DipoleFixed | %s>\" % s\n\n def copy(self):\n \"\"\"Copy the DipoleFixed object.\n\n Returns\n -------\n inst : instance of DipoleFixed\n The copy.\n\n Notes\n -----\n .. versionadded:: 0.16\n \"\"\"\n return deepcopy(self)\n\n @property\n def ch_names(self):\n \"\"\"Channel names.\"\"\"\n return self.info['ch_names']\n\n @verbose\n def save(self, fname, verbose=None):\n \"\"\"Save dipole in a .fif file.\n\n Parameters\n ----------\n fname : str\n The name of the .fif file. Must end with ``'.fif'`` or\n ``'.fif.gz'`` to make it explicit that the file contains\n dipole information in FIF format.\n %(verbose_meth)s\n \"\"\"\n check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz',\n '_dip.fif', '_dip.fif.gz',),\n ('.fif', '.fif.gz'))\n _write_evokeds(fname, self, check=False)\n\n def plot(self, show=True, time_unit='s'):\n \"\"\"Plot dipole data.\n\n Parameters\n ----------\n show : bool\n Call pyplot.show() at the end or not.\n time_unit : str\n The units for the time axis, can be \"ms\" or \"s\" (default).\n\n .. versionadded:: 0.16\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure containing the time courses.\n \"\"\"\n return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,\n ylim=None, xlim='tight', proj=False, hline=None,\n units=None, scalings=None, titles=None, axes=None,\n gfp=False, window_title=None, spatial_colors=False,\n plot_type=\"butterfly\", selectable=False,\n time_unit=time_unit)\n\n\n# #############################################################################\n# IO\n@verbose\ndef read_dipole(fname, verbose=None):\n \"\"\"Read .dip file from Neuromag/xfit or MNE.\n\n Parameters\n ----------\n fname : str\n The name of the .dip or .fif file.\n %(verbose)s\n\n Returns\n -------\n dipole : instance of Dipole or DipoleFixed\n The dipole.\n\n See Also\n --------\n Dipole\n DipoleFixed\n fit_dipole\n \"\"\"\n _check_fname(fname, overwrite='read', must_exist=True)\n if fname.endswith('.fif') or fname.endswith('.fif.gz'):\n return _read_dipole_fixed(fname)\n else:\n return _read_dipole_text(fname)\n\n\ndef _read_dipole_text(fname):\n \"\"\"Read a dipole text file.\"\"\"\n # Figure out the special fields\n need_header = True\n def_line = name = None\n # There is a bug in older np.loadtxt regarding skipping fields,\n # so just read the data ourselves (need to get name and header anyway)\n data = list()\n with open(fname, 'r') as fid:\n for line in fid:\n if not (line.startswith('%') or line.startswith('#')):\n need_header = False\n data.append(line.strip().split())\n else:\n if need_header:\n def_line = line\n if line.startswith('##') or line.startswith('%%'):\n m = re.search('Name \"(.*) dipoles\"', line)\n if m:\n name = m.group(1)\n del line\n data = np.atleast_2d(np.array(data, float))\n if def_line is None:\n raise IOError('Dipole text file is missing field definition '\n 'comment, cannot parse %s' % (fname,))\n # actually parse the fields\n def_line = def_line.lstrip('%').lstrip('#').strip()\n # MNE writes it out differently than Elekta, let's standardize them...\n fields = re.sub(r'([X|Y|Z] )\\(mm\\)', # \"X (mm)\", etc.\n lambda match: match.group(1).strip() + '/mm', def_line)\n fields = re.sub(r'\\((.*?)\\)', # \"Q(nAm)\", etc.\n lambda match: '/' + match.group(1), fields)\n fields = re.sub('(begin|end) ', # \"begin\" and \"end\" with no units\n lambda match: match.group(1) + '/ms', fields)\n fields = fields.lower().split()\n required_fields = ('begin/ms',\n 'x/mm', 'y/mm', 'z/mm',\n 'q/nam', 'qx/nam', 'qy/nam', 'qz/nam',\n 'g/%')\n optional_fields = ('khi^2', 'free', # standard ones\n # now the confidence fields (up to 5!)\n 'vol/mm^3', 'depth/mm', 'long/mm', 'trans/mm',\n 'qlong/nam', 'qtrans/nam')\n conf_scales = [1e-9, 1e-3, 1e-3, 1e-3, 1e-9, 1e-9]\n missing_fields = sorted(set(required_fields) - set(fields))\n if len(missing_fields) > 0:\n raise RuntimeError('Could not find necessary fields in header: %s'\n % (missing_fields,))\n handled_fields = set(required_fields) | set(optional_fields)\n assert len(handled_fields) == len(required_fields) + len(optional_fields)\n ignored_fields = sorted(set(fields) -\n set(handled_fields) -\n {'end/ms'})\n if len(ignored_fields) > 0:\n warn('Ignoring extra fields in dipole file: %s' % (ignored_fields,))\n if len(fields) != data.shape[1]:\n raise IOError('More data fields (%s) found than data columns (%s): %s'\n % (len(fields), data.shape[1], fields))\n\n logger.info(\"%d dipole(s) found\" % len(data))\n\n if 'end/ms' in fields:\n if np.diff(data[:, [fields.index('begin/ms'),\n fields.index('end/ms')]], 1, -1).any():\n warn('begin and end fields differed, but only begin will be used '\n 'to store time values')\n\n # Find the correct column in our data array, then scale to proper units\n idx = [fields.index(field) for field in required_fields]\n assert len(idx) >= 9\n times = data[:, idx[0]] / 1000.\n pos = 1e-3 * data[:, idx[1:4]] # put data in meters\n amplitude = data[:, idx[4]]\n norm = amplitude.copy()\n amplitude /= 1e9\n norm[norm == 0] = 1\n ori = data[:, idx[5:8]] / norm[:, np.newaxis]\n gof = data[:, idx[8]]\n # Deal with optional fields\n optional = [None] * 2\n for fi, field in enumerate(optional_fields[:2]):\n if field in fields:\n optional[fi] = data[:, fields.index(field)]\n khi2, nfree = optional\n conf = dict()\n for field, scale in zip(optional_fields[2:], conf_scales): # confidence\n if field in fields:\n conf[field.split('/')[0]] = scale * data[:, fields.index(field)]\n return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree)\n\n\n# #############################################################################\n# Fitting\n\ndef _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):\n \"\"\"Compute the forward solution and do other nice stuff.\"\"\"\n B = _compute_forwards_meeg(rr, fwd_data, n_jobs, silent=True)\n B = np.concatenate(B, axis=1)\n assert np.isfinite(B).all()\n B_orig = B.copy()\n\n # Apply projection and whiten (cov has projections already)\n B = dgemm(1., B, whitener.T)\n\n # column normalization doesn't affect our fitting, so skip for now\n # S = np.sum(B * B, axis=1) # across channels\n # scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),\n # axis=1)), 3)\n # B *= scales[:, np.newaxis]\n scales = np.ones(3)\n return B, B_orig, scales\n\n\ndef _make_guesses(surf, grid, exclude, mindist, n_jobs):\n \"\"\"Make a guess space inside a sphere or BEM surface.\"\"\"\n if 'rr' in surf:\n logger.info('Guess surface (%s) is in %s coordinates'\n % (_surf_name[surf['id']],\n _coord_frame_name(surf['coord_frame'])))\n else:\n logger.info('Making a spherical guess space with radius %7.1f mm...'\n % (1000 * surf['R']))\n logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))\n src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,\n do_neighbors=False, n_jobs=n_jobs)\n assert 'vertno' in src\n # simplify the result to make things easier later\n src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],\n nuse=src['nuse'], coord_frame=src['coord_frame'],\n vertno=np.arange(src['nuse']))\n return SourceSpaces([src])\n\n\ndef _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None,\n lwork=None):\n \"\"\"Calculate the residual sum of squares.\"\"\"\n if fwd_svd is None:\n fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]\n uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True)\n else:\n uu, sing, vv = fwd_svd\n gof = _dipole_gof(uu, sing, vv, B, B2)[0]\n # mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version\n return 1. - gof\n\n\ndef _dipole_gof(uu, sing, vv, B, B2):\n \"\"\"Calculate the goodness of fit from the forward SVD.\"\"\"\n ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2\n one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B)\n Bm2 = ddot(one, one) # np.sum(one * one)\n gof = Bm2 / B2\n return gof, one\n\n\ndef _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None):\n \"\"\"Fit the dipole moment once the location is known.\"\"\"\n if 'fwd' in fwd_data:\n # should be a single precomputed \"guess\" (i.e., fixed position)\n assert rd is None\n fwd = fwd_data['fwd']\n assert fwd.shape[0] == 3\n fwd_orig = fwd_data['fwd_orig']\n assert fwd_orig.shape[0] == 3\n scales = fwd_data['scales']\n assert scales.shape == (3,)\n fwd_svd = fwd_data['fwd_svd'][0]\n else:\n fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,\n rd[np.newaxis, :])\n fwd_svd = None\n if ori is None:\n if fwd_svd is None:\n fwd_svd = linalg.svd(fwd, full_matrices=False)\n uu, sing, vv = fwd_svd\n gof, one = _dipole_gof(uu, sing, vv, B, B2)\n ncomp = len(one)\n # Counteract the effect of column normalization\n Q = scales[0] * np.sum(uu.T[:ncomp] *\n (one / sing[:ncomp])[:, np.newaxis], axis=0)\n else:\n fwd = np.dot(ori[np.newaxis], fwd)\n sing = np.linalg.norm(fwd)\n one = np.dot(fwd / sing, B)\n gof = (one * one)[0] / B2\n Q = ori * (scales[0] * np.sum(one / sing))\n ncomp = 3\n B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q)\n return Q, gof, B_residual_noproj, ncomp\n\n\ndef _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,\n guess_data, fwd_data, whitener, ori, n_jobs, rank):\n \"\"\"Fit a single dipole to the given whitened, projected data.\"\"\"\n from scipy.optimize import fmin_cobyla\n parallel, p_fun, _ = parallel_func(fun, n_jobs)\n # parallel over time points\n res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,\n guess_data, fwd_data, whitener,\n fmin_cobyla, ori, rank)\n for B, t in zip(data.T, times))\n pos = np.array([r[0] for r in res])\n amp = np.array([r[1] for r in res])\n ori = np.array([r[2] for r in res])\n gof = np.array([r[3] for r in res]) * 100 # convert to percentage\n conf = None\n if res[0][4] is not None:\n conf = np.array([r[4] for r in res])\n keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans']\n conf = {key: conf[:, ki] for ki, key in enumerate(keys)}\n khi2 = np.array([r[5] for r in res])\n nfree = np.array([r[6] for r in res])\n residual_noproj = np.array([r[7] for r in res]).T\n\n return pos, amp, ori, gof, conf, khi2, nfree, residual_noproj\n\n\n'''Simplex code in case we ever want/need it for testing\n\ndef _make_tetra_simplex():\n \"\"\"Make the initial tetrahedron\"\"\"\n #\n # For this definition of a regular tetrahedron, see\n #\n # http://mathworld.wolfram.com/Tetrahedron.html\n #\n x = np.sqrt(3.0) / 3.0\n r = np.sqrt(6.0) / 12.0\n R = 3 * r\n d = x / 2.0\n simplex = 1e-2 * np.array([[x, 0.0, -r],\n [-d, 0.5, -r],\n [-d, -0.5, -r],\n [0., 0., R]])\n return simplex\n\n\ndef try_(p, y, psum, ndim, fun, ihi, neval, fac):\n \"\"\"Helper to try a value\"\"\"\n ptry = np.empty(ndim)\n fac1 = (1.0 - fac) / ndim\n fac2 = fac1 - fac\n ptry = psum * fac1 - p[ihi] * fac2\n ytry = fun(ptry)\n neval += 1\n if ytry < y[ihi]:\n y[ihi] = ytry\n psum[:] += ptry - p[ihi]\n p[ihi] = ptry\n return ytry, neval\n\n\ndef _simplex_minimize(p, ftol, stol, fun, max_eval=1000):\n \"\"\"Minimization with the simplex algorithm\n\n Modified from Numerical recipes\"\"\"\n y = np.array([fun(s) for s in p])\n ndim = p.shape[1]\n assert p.shape[0] == ndim + 1\n mpts = ndim + 1\n neval = 0\n psum = p.sum(axis=0)\n\n loop = 1\n while(True):\n ilo = 1\n if y[1] > y[2]:\n ihi = 1\n inhi = 2\n else:\n ihi = 2\n inhi = 1\n for i in range(mpts):\n if y[i] < y[ilo]:\n ilo = i\n if y[i] > y[ihi]:\n inhi = ihi\n ihi = i\n elif y[i] > y[inhi]:\n if i != ihi:\n inhi = i\n\n rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))\n if rtol < ftol:\n break\n if neval >= max_eval:\n raise RuntimeError('Maximum number of evaluations exceeded.')\n if stol > 0: # Has the simplex collapsed?\n dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))\n if loop > 5 and dsum < stol:\n break\n\n ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)\n if ytry <= y[ilo]:\n ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)\n elif ytry >= y[inhi]:\n ysave = y[ihi]\n ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)\n if ytry >= ysave:\n for i in range(mpts):\n if i != ilo:\n psum[:] = 0.5 * (p[i] + p[ilo])\n p[i] = psum\n y[i] = fun(psum)\n neval += ndim\n psum = p.sum(axis=0)\n loop += 1\n'''\n\n\ndef _fit_confidence(rd, Q, ori, whitener, fwd_data):\n # As describedd in the Xfit manual, confidence intervals can be calculated\n # by examining a linearization of model at the best-fitting location,\n # i.e. taking the Jacobian and using the whitener:\n #\n # J = [∂b/∂x ∂b/∂y ∂b/∂z ∂b/∂Qx ∂b/∂Qy ∂b/∂Qz]\n # C = (J.T C^-1 J)^-1\n #\n # And then the confidence interval is the diagonal of C, scaled by 1.96\n # (for 95% confidence).\n direction = np.empty((3, 3))\n # The coordinate system has the x axis aligned with the dipole orientation,\n direction[0] = ori\n # the z axis through the origin of the sphere model\n rvec = rd - fwd_data['inner_skull']['r0']\n direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize\n direction[2] /= np.linalg.norm(direction[2])\n # and the y axis perpendical with these forming a right-handed system.\n direction[1] = np.cross(direction[2], direction[0])\n assert np.allclose(np.dot(direction, direction.T), np.eye(3))\n # Get spatial deltas in dipole coordinate directions\n deltas = (-1e-4, 1e-4)\n J = np.empty((whitener.shape[0], 6))\n for ii in range(3):\n fwds = []\n for delta in deltas:\n this_r = rd[np.newaxis] + delta * direction[ii]\n fwds.append(\n np.dot(Q, _dipole_forwards(fwd_data, whitener, this_r)[0]))\n J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]\n # Get current (Q) deltas in the dipole directions\n deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q)\n this_fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis])[0]\n for ii in range(3):\n fwds = []\n for delta in deltas:\n fwds.append(np.dot(Q + delta * direction[ii], this_fwd))\n J[:, ii + 3] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]\n # J is already whitened, so we don't need to do np.dot(whitener, J).\n # However, the units in the Jacobian are potentially quite different,\n # so we need to do some normalization during inversion, then revert.\n direction_norm = np.linalg.norm(J[:, :3])\n Q_norm = np.linalg.norm(J[:, 3:5]) # omit possible zero Z\n norm = np.array([direction_norm] * 3 + [Q_norm] * 3)\n J /= norm\n J = np.dot(J.T, J)\n C = linalg.pinvh(J, rcond=1e-14)\n C /= norm\n C /= norm[:, np.newaxis]\n conf = 1.96 * np.sqrt(np.diag(C))\n # The confidence volume of the dipole location is obtained from by\n # taking the eigenvalues of the upper left submatrix and computing\n # v = 4π/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or:\n vol_conf = 4 * np.pi / 3. * np.sqrt(\n 476.379541 * np.prod(linalg.eigh(C[:3, :3], eigvals_only=True)))\n conf = np.concatenate([conf, [vol_conf]])\n # Now we reorder and subselect the proper columns:\n # vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero)\n conf = conf[[6, 2, 0, 1, 3, 4]]\n return conf\n\n\ndef _surface_constraint(rd, surf, min_dist_to_inner_skull):\n \"\"\"Surface fitting constraint.\"\"\"\n dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],\n return_dists=True)[1][0]\n if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:\n dist *= -1.\n # Once we know the dipole is below the inner skull,\n # let's check if its distance to the inner skull is at least\n # min_dist_to_inner_skull. This can be enforced by adding a\n # constrain proportional to its distance.\n dist -= min_dist_to_inner_skull\n return dist\n\n\ndef _sphere_constraint(rd, r0, R_adj):\n \"\"\"Sphere fitting constraint.\"\"\"\n return R_adj - np.sqrt(np.sum((rd - r0) ** 2))\n\n\ndef _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,\n guess_data, fwd_data, whitener, fmin_cobyla, ori, rank):\n \"\"\"Fit a single bit of data.\"\"\"\n B = np.dot(whitener, B_orig)\n\n # make constraint function to keep the solver within the inner skull\n if 'rr' in fwd_data['inner_skull']: # bem\n surf = fwd_data['inner_skull']\n constraint = partial(_surface_constraint, surf=surf,\n min_dist_to_inner_skull=min_dist_to_inner_skull)\n else: # sphere\n surf = None\n constraint = partial(\n _sphere_constraint, r0=fwd_data['inner_skull']['r0'],\n R_adj=fwd_data['inner_skull']['R'] - min_dist_to_inner_skull)\n\n # Find a good starting point (find_best_guess in C)\n B2 = np.dot(B, B)\n if B2 == 0:\n warn('Zero field found for time %s' % t)\n return np.zeros(3), 0, np.zeros(3), 0, B\n\n idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)\n for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])\n x0 = guess_rrs[idx]\n lwork = _svd_lwork((3, B.shape[0]))\n fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener,\n lwork=lwork)\n\n # Tested minimizers:\n # Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC\n # Several were similar, but COBYLA won for having a handy constraint\n # function we can use to ensure we stay inside the inner skull /\n # smallest sphere\n rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),\n rhobeg=5e-2, rhoend=5e-5, disp=False)\n\n # simplex = _make_tetra_simplex() + x0\n # _simplex_minimize(simplex, 1e-4, 2e-4, fun)\n # rd_final = simplex[0]\n\n # Compute the dipole moment at the final point\n Q, gof, residual_noproj, n_comp = _fit_Q(\n fwd_data, whitener, B, B2, B_orig, rd_final, ori=ori)\n khi2 = (1 - gof) * B2\n nfree = rank - n_comp\n amp = np.sqrt(np.dot(Q, Q))\n norm = 1. if amp == 0. else amp\n ori = Q / norm\n\n conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)\n\n msg = '---- Fitted : %7.1f ms' % (1000. * t)\n if surf is not None:\n dist_to_inner_skull = _compute_nearest(\n surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]\n msg += (\", distance to inner skull : %2.4f mm\"\n % (dist_to_inner_skull * 1000.))\n\n logger.info(msg)\n return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj\n\n\ndef _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,\n guess_data, fwd_data, whitener,\n fmin_cobyla, ori, rank):\n \"\"\"Fit a data using a fixed position.\"\"\"\n B = np.dot(whitener, B_orig)\n B2 = np.dot(B, B)\n if B2 == 0:\n warn('Zero field found for time %s' % t)\n return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6)\n # Compute the dipole moment\n Q, gof, residual_noproj = _fit_Q(guess_data, whitener, B, B2, B_orig,\n rd=None, ori=ori)[:3]\n if ori is None:\n amp = np.sqrt(np.dot(Q, Q))\n norm = 1. if amp == 0. else amp\n ori = Q / norm\n else:\n amp = np.dot(Q, ori)\n rd_final = guess_rrs[0]\n # This will be slow, and we don't use it anyway, so omit it for now:\n # conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data)\n conf = khi2 = nfree = None\n # No corresponding 'logger' message here because it should go *very* fast\n return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj\n\n\n@verbose\ndef fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,\n pos=None, ori=None, verbose=None):\n \"\"\"Fit a dipole.\n\n Parameters\n ----------\n evoked : instance of Evoked\n The dataset to fit.\n cov : str | instance of Covariance\n The noise covariance.\n bem : str | instance of ConductorModel\n The BEM filename (str) or conductor model.\n trans : str | None\n The head<->MRI transform filename. Must be provided unless BEM\n is a sphere model.\n min_dist : float\n Minimum distance (in millimeters) from the dipole to the inner skull.\n Must be positive. Note that because this is a constraint passed to\n a solver it is not strict but close, i.e. for a ``min_dist=5.`` the\n fits could be 4.9 mm from the inner skull.\n %(n_jobs)s\n It is used in field computation and fitting.\n pos : ndarray, shape (3,) | None\n Position of the dipole to use. If None (default), sequential\n fitting (different position and orientation for each time instance)\n is performed. If a position (in head coords) is given as an array,\n the position is fixed during fitting.\n\n .. versionadded:: 0.12\n\n ori : ndarray, shape (3,) | None\n Orientation of the dipole to use. If None (default), the\n orientation is free to change as a function of time. If an\n orientation (in head coordinates) is given as an array, ``pos``\n must also be provided, and the routine computes the amplitude and\n goodness of fit of the dipole at the given position and orientation\n for each time instant.\n\n .. versionadded:: 0.12\n\n %(verbose)s\n\n Returns\n -------\n dip : instance of Dipole or DipoleFixed\n The dipole fits. A :class:`mne.DipoleFixed` is returned if\n ``pos`` and ``ori`` are both not None, otherwise a\n :class:`mne.Dipole` is returned.\n residual : instance of Evoked\n The M-EEG data channels with the fitted dipolar activity removed.\n\n See Also\n --------\n mne.beamformer.rap_music\n Dipole\n DipoleFixed\n read_dipole\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n # This could eventually be adapted to work with other inputs, these\n # are what is needed:\n\n evoked = evoked.copy()\n\n # Determine if a list of projectors has an average EEG ref\n if _needs_eeg_average_ref_proj(evoked.info):\n raise ValueError('EEG average reference is mandatory for dipole '\n 'fitting.')\n if min_dist < 0:\n raise ValueError('min_dist should be positive. Got %s' % min_dist)\n if ori is not None and pos is None:\n raise ValueError('pos must be provided if ori is not None')\n\n data = evoked.data\n if not np.isfinite(data).all():\n raise ValueError('Evoked data must be finite')\n info = evoked.info\n times = evoked.times.copy()\n comment = evoked.comment\n\n # Convert the min_dist to meters\n min_dist_to_inner_skull = min_dist / 1000.\n del min_dist\n\n # Figure out our inputs\n neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False,\n exclude=[]))\n if isinstance(bem, str):\n bem_extra = bem\n else:\n bem_extra = repr(bem)\n logger.info('BEM : %s' % bem_extra)\n mri_head_t, trans = _get_trans(trans)\n logger.info('MRI transform : %s' % trans)\n bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False)\n if not bem['is_sphere']:\n # Find the best-fitting sphere\n inner_skull = _bem_find_surface(bem, 'inner_skull')\n inner_skull = inner_skull.copy()\n R, r0 = _fit_sphere(inner_skull['rr'], disp=False)\n # r0 back to head frame for logging\n r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]\n inner_skull['r0'] = r0\n logger.info('Head origin : '\n '%6.1f %6.1f %6.1f mm rad = %6.1f mm.'\n % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))\n del R, r0\n else:\n r0 = bem['r0']\n if len(bem.get('layers', [])) > 0:\n R = bem['layers'][0]['rad']\n kind = 'rad'\n else: # MEG-only\n # Use the minimum distance to the MEG sensors as the radius then\n R = np.dot(linalg.inv(info['dev_head_t']['trans']),\n np.hstack([r0, [1.]]))[:3] # r0 -> device\n R = R - [info['chs'][pick]['loc'][:3]\n for pick in pick_types(info, meg=True, exclude=[])]\n if len(R) == 0:\n raise RuntimeError('No MEG channels found, but MEG-only '\n 'sphere model used')\n R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors\n kind = 'max_rad'\n logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '\n '%s = %6.1f mm'\n % (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))\n inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame\n del R, r0\n accurate = False # can be an option later (shouldn't make big diff)\n\n # Deal with DipoleFixed cases here\n if pos is not None:\n fixed_position = True\n pos = np.array(pos, float)\n if pos.shape != (3,):\n raise ValueError('pos must be None or a 3-element array-like,'\n ' got %s' % (pos,))\n logger.info('Fixed position : %6.1f %6.1f %6.1f mm'\n % tuple(1000 * pos))\n if ori is not None:\n ori = np.array(ori, float)\n if ori.shape != (3,):\n raise ValueError('oris must be None or a 3-element array-like,'\n ' got %s' % (ori,))\n norm = np.sqrt(np.sum(ori * ori))\n if not np.isclose(norm, 1):\n raise ValueError('ori must be a unit vector, got length %s'\n % (norm,))\n logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'\n % tuple(ori))\n else:\n logger.info('Free orientation : <time-varying>')\n fit_n_jobs = 1 # only use 1 job to do the guess fitting\n else:\n fixed_position = False\n # Eventually these could be parameters, but they are just used for\n # the initial grid anyway\n guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf\n guess_mindist = max(0.005, min_dist_to_inner_skull)\n guess_exclude = 0.02\n\n logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))\n if guess_mindist > 0.0:\n logger.info('Guess mindist : %6.1f mm'\n % (1000 * guess_mindist,))\n if guess_exclude > 0:\n logger.info('Guess exclude : %6.1f mm'\n % (1000 * guess_exclude,))\n logger.info('Using %s MEG coil definitions.'\n % (\"accurate\" if accurate else \"standard\"))\n fit_n_jobs = n_jobs\n if isinstance(cov, str):\n logger.info('Noise covariance : %s' % (cov,))\n cov = read_cov(cov, verbose=False)\n logger.info('')\n\n _print_coord_trans(mri_head_t)\n _print_coord_trans(info['dev_head_t'])\n logger.info('%d bad channels total' % len(info['bads']))\n\n # Forward model setup (setup_forward_model from setup.c)\n ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]\n\n megcoils, compcoils, megnames, meg_info = [], [], [], None\n eegels, eegnames = [], []\n if 'grad' in ch_types or 'mag' in ch_types:\n megcoils, compcoils, megnames, meg_info = \\\n _prep_meg_channels(info, exclude='bads',\n accurate=accurate, verbose=verbose)\n if 'eeg' in ch_types:\n eegels, eegnames = _prep_eeg_channels(info, exclude='bads',\n verbose=verbose)\n\n # Ensure that MEG and/or EEG channels are present\n if len(megcoils + eegels) == 0:\n raise RuntimeError('No MEG or EEG channels found.')\n\n # Whitener for the data\n logger.info('Decomposing the sensor noise covariance matrix...')\n picks = pick_types(info, meg=True, eeg=True, ref_meg=False)\n\n # In case we want to more closely match MNE-C for debugging:\n # from .io.pick import pick_info\n # from .cov import prepare_noise_cov\n # info_nb = pick_info(info, picks)\n # cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)\n # nzero = (cov['eig'] > 0)\n # n_chan = len(info_nb['ch_names'])\n # whitener = np.zeros((n_chan, n_chan), dtype=np.float)\n # whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])\n # whitener = np.dot(whitener, cov['eigvec'])\n\n whitener, _, rank = compute_whitener(cov, info, picks=picks,\n return_rank=True)\n\n # Proceed to computing the fits (make_guess_data)\n if fixed_position:\n guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))\n logger.info('Compute forward for dipole location...')\n else:\n logger.info('\\n---- Computing the forward solution for the guesses...')\n guess_src = _make_guesses(inner_skull, guess_grid, guess_exclude,\n guess_mindist, n_jobs=n_jobs)[0]\n # grid coordinates go from mri to head frame\n transform_surface_to(guess_src, 'head', mri_head_t)\n logger.info('Go through all guess source locations...')\n\n # inner_skull goes from mri to head frame\n if 'rr' in inner_skull:\n transform_surface_to(inner_skull, 'head', mri_head_t)\n if fixed_position:\n if 'rr' in inner_skull:\n check = _surface_constraint(pos, inner_skull,\n min_dist_to_inner_skull)\n else:\n check = _sphere_constraint(\n pos, inner_skull['r0'],\n R_adj=inner_skull['R'] - min_dist_to_inner_skull)\n if check <= 0:\n raise ValueError('fixed position is %0.1fmm outside the inner '\n 'skull boundary' % (-1000 * check,))\n\n # C code computes guesses w/sphere model for speed, don't bother here\n fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],\n ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],\n inner_skull=inner_skull)\n # fwd_data['inner_skull'] in head frame, bem in mri, confusing...\n _prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,\n verbose=False)\n guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(\n fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)\n # decompose ahead of time\n guess_fwd_svd = [linalg.svd(fwd, overwrite_a=False, full_matrices=False)\n for fwd in np.array_split(guess_fwd,\n len(guess_src['rr']))]\n guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,\n fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)\n del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed\n logger.info('[done %d source%s]' % (guess_src['nuse'],\n _pl(guess_src['nuse'])))\n\n # Do actual fits\n data = data[picks]\n ch_names = [info['ch_names'][p] for p in picks]\n proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]\n fun = _fit_dipole_fixed if fixed_position else _fit_dipole\n out = _fit_dipoles(\n fun, min_dist_to_inner_skull, data, times, guess_src['rr'],\n guess_data, fwd_data, whitener, ori, n_jobs, rank)\n assert len(out) == 8\n if fixed_position and ori is not None:\n # DipoleFixed\n data = np.array([out[1], out[3]])\n out_info = deepcopy(info)\n loc = np.concatenate([pos, ori, np.zeros(6)])\n out_info['chs'] = [\n dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,\n coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,\n coil_type=FIFF.FIFFV_COIL_DIPOLE,\n unit_mul=0, range=1, cal=1., scanno=1, logno=1),\n dict(ch_name='goodness', loc=np.full(12, np.nan),\n kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,\n coord_frame=FIFF.FIFFV_COORD_UNKNOWN,\n coil_type=FIFF.FIFFV_COIL_NONE,\n unit_mul=0, range=1., cal=1., scanno=2, logno=100)]\n for key in ['hpi_meas', 'hpi_results', 'projs']:\n out_info[key] = list()\n for key in ['acq_pars', 'acq_stim', 'description', 'dig',\n 'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',\n 'subject_info']:\n out_info[key] = None\n out_info['bads'] = []\n out_info._update_redundant()\n out_info._check_consistency()\n dipoles = DipoleFixed(out_info, data, times, evoked.nave,\n evoked._aspect_kind, evoked.first, evoked.last,\n comment)\n else:\n dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment,\n out[4], out[5], out[6])\n residual = evoked.copy().apply_proj() # set the projs active\n residual.data[picks] = np.dot(proj_op, out[-1])\n logger.info('%d time points fitted' % len(dipoles.times))\n return dipoles, residual\n\n\ndef get_phantom_dipoles(kind='vectorview'):\n \"\"\"Get standard phantom dipole locations and orientations.\n\n Parameters\n ----------\n kind : str\n Get the information for the given system:\n\n ``vectorview`` (default)\n The Neuromag VectorView phantom.\n ``otaniemi``\n The older Neuromag phantom used at Otaniemi.\n\n Returns\n -------\n pos : ndarray, shape (n_dipoles, 3)\n The dipole positions.\n ori : ndarray, shape (n_dipoles, 3)\n The dipole orientations.\n\n Notes\n -----\n The Elekta phantoms have a radius of 79.5mm, and HPI coil locations\n in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...).\n \"\"\"\n _check_option('kind', kind, ['vectorview', 'otaniemi'])\n if kind == 'vectorview':\n # these values were pulled from a scanned image provided by\n # Elekta folks\n a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9])\n b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3])\n x = np.concatenate((a, [0] * 8, -b, [0] * 8))\n y = np.concatenate(([0] * 8, -a, [0] * 8, b))\n c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0]\n d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9]\n z = np.concatenate((c, c, d, d))\n signs = ([1, -1] * 4 + [-1, 1] * 4) * 2\n elif kind == 'otaniemi':\n # these values were pulled from an Neuromag manual\n # (NM20456A, 13.7.1999, p.65)\n a = np.array([56.3, 47.6, 39.0, 30.3])\n b = np.array([32.5, 27.5, 22.5, 17.5])\n c = np.zeros(4)\n x = np.concatenate((a, b, c, c, -a, -b, c, c))\n y = np.concatenate((c, c, -a, -b, c, c, b, a))\n z = np.concatenate((b, a, b, a, b, a, a, b))\n signs = [-1] * 8 + [1] * 16 + [-1] * 8\n pos = np.vstack((x, y, z)).T / 1000.\n # Locs are always in XZ or YZ, and so are the oris. The oris are\n # also in the same plane and tangential, so it's easy to determine\n # the orientation.\n ori = list()\n for pi, this_pos in enumerate(pos):\n this_ori = np.zeros(3)\n idx = np.where(this_pos == 0)[0]\n # assert len(idx) == 1\n idx = np.setdiff1d(np.arange(3), idx[0])\n this_ori[idx] = (this_pos[idx][::-1] /\n np.linalg.norm(this_pos[idx])) * [1, -1]\n this_ori *= signs[pi]\n # Now we have this quality, which we could uncomment to\n # double-check:\n # np.testing.assert_allclose(np.dot(this_ori, this_pos) /\n # np.linalg.norm(this_pos), 0,\n # atol=1e-15)\n ori.append(this_ori)\n ori = np.array(ori)\n return pos, ori\n\n\ndef _concatenate_dipoles(dipoles):\n \"\"\"Concatenate a list of dipoles.\"\"\"\n times, pos, amplitude, ori, gof = [], [], [], [], []\n for dipole in dipoles:\n times.append(dipole.times)\n pos.append(dipole.pos)\n amplitude.append(dipole.amplitude)\n ori.append(dipole.ori)\n gof.append(dipole.gof)\n\n return Dipole(np.concatenate(times), np.concatenate(pos),\n np.concatenate(amplitude), np.concatenate(ori),\n np.concatenate(gof), name=None)\n" ]
[ [ "numpy.diag", "numpy.dot", "scipy.linalg.svd", "numpy.vstack", "numpy.concatenate", "numpy.max", "numpy.cross", "numpy.where", "numpy.hstack", "numpy.arange", "numpy.eye", "numpy.full", "scipy.linalg.eigh", "numpy.diff", "scipy.linalg.pinvh", "scipy.linalg.inv", "numpy.zeros", "numpy.isclose", "numpy.min", "scipy.optimize.fmin_cobyla", "numpy.savetxt", "numpy.array", "numpy.sum", "numpy.isfinite", "numpy.linalg.norm", "numpy.ones", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
newbe36524/Newbe.Demo
[ "ba59394e78306bd94f8a1526d1d4a0234dcee4e0" ]
[ "src/BlogDemos/Newbe.TextOcr/ocr/mymain.py" ]
[ "import os\nimport cv2\nimport imutils\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef pre_process_image(img, save_in_file=None, morph_size=(8, 8)):\n # get rid of the color\n pre = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Otsu threshold\n pre = cv2.threshold(pre, 250, 255, cv2.THRESH_TOZERO | cv2.THRESH_OTSU)[1]\n # pre = cv2.threshold(pre, 250, 255, cv2.THRESH_TRUNC | cv2.THRESH_OTSU)[1]\n # pre = cv2.threshold(pre, 250, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n pre = ~pre\n\n if save_in_file is not None:\n cv2.imwrite(save_in_file, pre)\n return pre\n\n\ndef pre_process_image_crop(img, save_in_file=None, morph_size=(8, 8)):\n # get rid of the color\n pre = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Otsu threshold\n pre = cv2.threshold(pre, 100, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n # # dilate the text to make it solid spot\n # cpy = pre.copy()\n # struct = cv2.getStructuringElement(cv2.MORPH_RECT, morph_size)\n # cpy = cv2.dilate(~cpy, struct, anchor=(-1, -1), iterations=1)\n pre = ~pre\n\n if save_in_file is not None:\n cv2.imwrite(save_in_file, pre)\n return pre\n\n\ndef crop_range(source_img, source_file_name, pre_processed_img, mark_in_file, crop_dir):\n low_threshold = 50\n high_threshold = 150\n edges = cv2.Canny(pre_processed_img, low_threshold, high_threshold)\n result = np.copy(source_img)\n contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n crop_index = 0\n copped_images = []\n hull_list = []\n for cnt in range(len(contours)):\n\n # 轮廓逼近\n epsilon = 0.01 * cv2.arcLength(contours[cnt], True)\n approx = cv2.approxPolyDP(contours[cnt], epsilon, True)\n corners = len(approx)\n if corners == 4:\n area = cv2.contourArea(contours[cnt])\n if area > 10000:\n # 提取与绘制轮廓\n # cv2.drawContours(result, contours, cnt, (0, 0, 255), 10)\n # plt.imshow(result, cmap='gray')\n # plt.show()\n hull = cv2.convexHull(contours[cnt])\n hull_list.append(hull)\n cv2.drawContours(result, hull_list, len(hull_list) - 1, (0, 0, 0), 10)\n\n x, y, w, h = cv2.boundingRect(hull)\n cropped = result[y:y + h, x:x + w]\n cv2.imwrite(os.path.join(crop_dir, f\"{source_file_name}_{crop_index}.png\"), cropped)\n plt.imshow(cropped, cmap='gray')\n plt.show()\n copped_images.append(np.copy(cropped))\n crop_index += 1\n\n for cnt in range(len(hull_list)):\n cv2.drawContours(result, hull_list, cnt, (0, 0, 255), 10)\n plt.imshow(result, cmap='gray')\n plt.show()\n\n cv2.imwrite(mark_in_file, result)\n return copped_images\n\n\ndef get_vh(source_img):\n ver_kernel_len = np.array(source_img).shape[0] // 30\n hor_kernel_len = np.array(source_img).shape[1] // 100\n # Defining a vertical kernel to detect all vertical lines of image\n ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, ver_kernel_len))\n # Defining a horizontal kernel to detect all horizontal lines of image\n hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (hor_kernel_len, 1))\n # A kernel of 2x2\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\n\n # Use vertical kernel to detect and save the vertical lines in a jpg\n image_1 = cv2.erode(source_img, ver_kernel, iterations=3)\n vertical_lines = cv2.dilate(image_1, ver_kernel, iterations=3)\n\n # Use horizontal kernel to detect and save the horizontal lines in a jpg\n image_2 = cv2.erode(source_img, hor_kernel, iterations=3)\n horizontal_lines = cv2.dilate(image_2, hor_kernel, iterations=3)\n\n # Combine horizontal and vertical lines in a new third image, with both having same weight.\n img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5, 0.0)\n # Eroding and thesholding the image\n img_vh = cv2.erode(~img_vh, kernel, iterations=2)\n thresh, img_vh = cv2.threshold(img_vh, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n return img_vh\n\n\nif __name__ == \"__main__\":\n source_dir = os.path.join(\"data\", \"source\")\n dir_list = os.listdir(source_dir)\n for cur_file in dir_list:\n source_file = os.path.join(source_dir, cur_file)\n in_file = source_file\n\n pre_file = os.path.join(\"data\", \"pre\", cur_file)\n out_file = os.path.join(\"data\", \"out\", cur_file)\n cropped_dir = os.path.join(\"data\", \"cropped_region\")\n cropped_sub_dir = os.path.join(\"data\", \"cropped_sub\")\n\n img = cv2.imread(os.path.join(in_file))\n\n pre_processed = pre_process_image(img, pre_file)\n copped_images = crop_range(img, cur_file, pre_processed, out_file, cropped_dir)\n\n cropped_index = 0\n for cropped in copped_images:\n cropped_pre_file = os.path.join(\"data\", \"cropped_pre\", f\"{cur_file}_{cropped_index}.png\")\n pre_processed_cropped = pre_process_image_crop(cropped, cropped_pre_file)\n plt.imshow(pre_processed_cropped, cmap='gray')\n plt.show()\n cropped_index += 1\n\n img_vh = get_vh(pre_processed_cropped)\n cv2.imwrite(\"./img_vh.jpg\", img_vh)\n plt.imshow(~img_vh, cmap='gray')\n plt.show()\n\n kernel = np.ones((1, 5), np.uint8)\n erosion = cv2.erode(img_vh, kernel, iterations=10)\n dilate = cv2.dilate(erosion, kernel, iterations=10)\n cv2.imwrite(\"./img_vh_dilate.jpg\", dilate)\n plt.imshow(dilate, cmap='gray')\n plt.show()\n\n low_threshold = 100\n high_threshold = 255\n result = np.copy(cropped)\n edges = cv2.Canny(~dilate, low_threshold, high_threshold)\n\n contours, hierarchy = cv2.findContours(edges, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n index = 0\n for cnt in range(len(contours)):\n # 轮廓逼近\n epsilon = 0.01 * cv2.arcLength(contours[cnt], True)\n approx = cv2.approxPolyDP(contours[cnt], epsilon, True)\n corners = len(approx)\n # if corners == 4 or corners == 5 or corners == 6 or corners == 7 or corners == 8:\n ar = cv2.contourArea(contours[cnt])\n if ar > (pre_processed_cropped.size // 300):\n cv2.drawContours(result, contours, cnt, (0, 0, 255), 5)\n hull = cv2.convexHull(contours[cnt])\n\n x, y, w, h = cv2.boundingRect(hull)\n c = cropped[y:y + h, x:x + w]\n cv2.imwrite(os.path.join(cropped_sub_dir, f\"{cur_file}_{index}.png\"), c)\n index += 1\n cv2.imwrite(\"./img_vh_result.jpg\", result)\n plt.imshow(result)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.ones", "numpy.copy", "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jwcalder/MachineLearningAnthro
[ "d43d8f7602ffb6c230eb204d9f05bb71375cc65f" ]
[ "accuracy_percentage_plots.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nplt.rcParams.update({\n \"text.usetex\": True,\n \"font.family\": \"serif\",\n \"font.sans-serif\": [\"Helvetica\"],\n \"font.size\": 12})\nstyles = ['^-', 'o-', 'd-', 's-', 'p-', 'x-', '*-']\n\n#data = pd.read_csv('results/gbl_ml_accuracies_angular.csv')\n#x = np.array([data.iloc[:,0],data.iloc[:,501]])\n#plt.plot(x[0]*100,x[1], marker = 'o', label = 'Angular Weight Matrix')\n\ndata = pd.read_csv('results/gbl_regular_info.csv')\nx = np.array([data.iloc[:,0],data.iloc[:,1]])\nplt.plot(x[0],x[1], marker = 'D', label = 'Graph-based Learning')\n\ndata = pd.read_csv('results/rf_info.csv')\nx = np.array([data.iloc[:,0], data.iloc[:,1]])\nplt.plot(x[0],x[1],marker = 's', label = 'Random Forest')\n\ndata = pd.read_csv('results/gbl_spectral_info.csv')\nx = np.array([data.iloc[:,0], data.iloc[:,1]])\nplt.plot(x[0],x[1],marker = 's', label = 'Graph-based Learning, with SE')\n\ndata = pd.read_csv('results/gbl_vae_info.csv')\nx = np.array([data.iloc[:,0], data.iloc[:,1]])\nplt.plot(x[0],x[1],marker = 's', label = 'Graph-based Learning, with VAE')\n\nplt.xlabel('Percentage Training Data')\nplt.ylabel('Average Accuracy (Percentage)')\nplt.title('Average accuracy VS. percentage training data')\nplt.ylim(60,100)\nplt.legend()\nplt.savefig('figures/gbl_accuracy_vs_training_percentage')\n\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.rcParams.update", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
AtoosaParsa/CS387-Assignments
[ "57dfd68dded486a61df247299d93ca0c804a6b98" ]
[ "HW03/q1.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import r2_score \nimport time\nfrom preprocessing import preprocessing\n\ndef predict(x, y, theta):\n y_predict = np.matmul(x, theta).flatten()\n loss = ((y_predict-y) ** 2).mean()\n return y_predict, loss\n\n# data from https://www.kaggle.com/shivachandel/kc-house-data\n# loading the data\ndata = pd.read_csv('kc_house_data.csv')\n# preprocessing\nX, Y = preprocessing(data)\n\nx_data = X.to_numpy()\ny_data = Y.to_numpy()\n\nfeatures = x_data.shape[1]\ndataSize = x_data.shape[0]\n\n# normalize the data to avoid big numbers and exploding gradients!\nfor i in range(0, features):\n x_data[:,i] = x_data[:,i]/x_data[:,i].mean()\n\ny_data[:] = y_data[:] / y_data.mean()\n\n# divide the data into train and test\ntrainingSize = int(0.8 * dataSize)\n\nx_train = x_data[0:trainingSize, :]\ny_train = y_data[0:trainingSize]\n\nx_test = x_data[trainingSize:, :]\ny_test = y_data[trainingSize:]\n\n# initial point for parameters \ntheta = np.zeros([features,1])\n\n# parameter of gradient descent\ngamma = 1e-5\nepochs = 5000\nbatchSize = 100\n\ntrainLoss = []\n\nt0 = time.time()\n\nfor e in range(epochs):\n for i in range(0, trainingSize, batchSize):\n \n # get the batches\n x_batch = x_train[i:i + batchSize, :]\n y_batch = y_train[i:i + batchSize]\n\n y_predict = np.matmul(x_batch, theta).flatten()\n error = y_batch - y_predict\n\n gradient = -2 * np.matmul(x_batch.T, np.expand_dims(error,-1))\n \n theta = theta - gamma * gradient\n \n # calculate the training loss\n loss = (error ** 2).sum()\n trainLoss.append(loss)\n print(\"epoch \"+str(e)+\": \"+str(loss))\n \nt1 = time.time()\ndelta = t1 - t0\nprint(\"training done\")\nprint(\"time passed: \"+str(delta))\n\n# now get the prediction and calculate loss and r2 score on test data\ny_pred, loss = predict(x_test, y_test, theta)\nscore = r2_score(y_test, y_pred)\nprint(\"r2-score is: \"+str(score))\nprint(\"loss is: \"+str(loss))\n\n# plotting\nplt.figure()\nplt.grid(color='silver', linestyle='-', linewidth=0.2)\nplt.plot(list(range(1, epochs+1)), trainLoss, color='blue')\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Training Loss\")\n#plt.title(\"\", fontsize='small')\nplt.grid(color='skyblue', linestyle=':', linewidth=0.5)\nplt.tight_layout()\n#plt.legend(['two robot', 'three robots'], loc='upper left')\n#plt.savefig(\"compare.pdf\")\nplt.show()" ]
[ [ "pandas.read_csv", "sklearn.metrics.r2_score", "matplotlib.pyplot.tight_layout", "numpy.expand_dims", "numpy.matmul", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
knagrecha/hydra
[ "bf0cf55e0b71acd1966e6e9766ac2022b4a39605" ]
[ "examples/customLayers/BertEmbedding.py" ]
[ "import torch.nn as nn\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Linear, Dropout, LayerNorm, TransformerEncoder\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.pos_embedding = nn.Embedding(max_len, d_model)\n\n def forward(self, x):\n #print(x.shape)\n S, N = x.size()\n pos = torch.arange(S,\n dtype=torch.long,\n device=x.device).unsqueeze(0).expand((N, S)).t()\n return self.pos_embedding(pos)\n\nclass TokenTypeEncoding(nn.Module):\n def __init__(self, type_token_num, d_model):\n super(TokenTypeEncoding, self).__init__()\n self.token_type_embeddings = nn.Embedding(type_token_num, d_model)\n\n def forward(self, seq_input, token_type_input):\n S, N = seq_input.size()\n if token_type_input is None:\n token_type_input = torch.zeros((S, N),\n dtype=torch.long,\n device=seq_input.device)\n return self.token_type_embeddings(token_type_input)\n\n\nclass BertEmbedding(nn.Module):\n def __init__(self, ntoken, ninp, dropout=0.5, transpose=True):\n super().__init__()\n self.ninp = ninp\n self.ntoken = ntoken\n self.pos_embed = PositionalEncoding(ninp)\n self.embed = nn.Embedding(ntoken, ninp)\n self.tok_type_embed = TokenTypeEncoding(2, ninp) # Two sentence type\n self.norm = nn.LayerNorm(ninp)\n self.dropout = nn.Dropout(dropout)\n self.transpose = transpose\n\n def forward(self, src, token_type_input=None):\n\n embed = self.embed(src)\n pos_embed = self.pos_embed(src)\n tok_type_embed = self.tok_type_embed(src, token_type_input)\n if self.transpose:\n src = embed + pos_embed + tok_type_embed.transpose(0, 1)\n else:\n src = embed + pos_embed + tok_type_embed\n src = self.dropout(self.norm(src))\n return src\n \n \nclass BertEmbeddingNoTOK(nn.Module):\n def __init__(self, ntoken, ninp, dropout=0.5, transpose=True):\n super().__init__()\n self.ninp = ninp\n self.ntoken = ntoken\n self.pos_embed = PositionalEncoding(ninp)\n self.embed = nn.Embedding(ntoken, ninp)\n self.tok_type_embed = TokenTypeEncoding(2, ninp) # Two sentence type\n self.norm = nn.LayerNorm(ninp)\n self.dropout = nn.Dropout(dropout)\n self.transpose = transpose\n\n def forward(self, src):\n\n embed = self.embed(src)\n pos_embed = self.pos_embed(src)\n token_type_input = None\n tok_type_embed = self.tok_type_embed(src, token_type_input)\n if self.transpose:\n src = embed + pos_embed + tok_type_embed.transpose(0, 1)\n else:\n src = embed + pos_embed + tok_type_embed\n src = self.dropout(self.norm(src))\n return src\n\n" ]
[ [ "torch.nn.Dropout", "torch.zeros", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
benchenas/BenchENAS
[ "776cd1dd035d73c4af369d0106d010b932f64782", "776cd1dd035d73c4af369d0106d010b932f64782", "776cd1dd035d73c4af369d0106d010b932f64782", "776cd1dd035d73c4af369d0106d010b932f64782" ]
[ "BenchENAS_python_package/test/test_dataloader.py", "BenchENAS_python_package/algs/nsga_net/genetic/crossover_and_mutation.py", "BenchENAS_linux_platform/algs/nsga_net/utils/utils.py", "BenchENAS_linux_platform/algs/evocnn/genetic/crossover_and_mutation.py" ]
[ "import numpy as np\nimport torch\n\nfrom comm.registry import Registry\nfrom compute import Config_ini\nfrom tools import StatusUpdateTool\nfrom train.dataset.dataloader import BaseDataloader\n\n\ndef test_cifar_loader():\n Config_ini.batch_size = 64\n Config_ini.total_epoch = 50\n datasets = ['CIFAR10', 'CIFAR100']\n for dataset in datasets:\n dataloader_cls = Registry.DataLoaderRegistry.query(dataset)\n dataloader_cls_ins = dataloader_cls()\n dataloader_cls_ins.amend_valid_size(val=0.2)\n train_dataloader = dataloader_cls_ins.get_train_dataloader()\n valid_loader = dataloader_cls_ins.get_val_dataloader()\n assert len(train_dataloader) == np.ceil((1 - dataloader_cls_ins.valid_size) * 50000 / 64)\n assert len(valid_loader) == np.ceil(dataloader_cls_ins.valid_size * 50000 / 64)\n\n for dataset in datasets:\n dataloader_cls = Registry.DataLoaderRegistry.query(dataset)\n dataloader_cls_ins = dataloader_cls()\n dataloader_cls_ins.amend_valid_size(val=None)\n train_dataloader = dataloader_cls_ins.get_train_dataloader()\n valid_loader = dataloader_cls_ins.get_val_dataloader()\n assert len(train_dataloader) == np.ceil(50000 / 64)\n assert len(valid_loader) == np.ceil(10000 / 64)\n\n\ndef test_mnist_loader():\n Config_ini.batch_size = 50\n Config_ini.total_epoch = 50\n dataset = 'MNIST'\n\n dataloader_cls = Registry.DataLoaderRegistry.query(dataset)\n dataloader_cls_ins = dataloader_cls()\n dataloader_cls_ins.amend_valid_size(val=0.2)\n train_dataloader = dataloader_cls_ins.get_train_dataloader()\n valid_loader = dataloader_cls_ins.get_val_dataloader()\n assert len(train_dataloader) == np.ceil((1 - dataloader_cls_ins.valid_size) * 60000 / 50)\n assert len(valid_loader) == np.ceil(dataloader_cls_ins.valid_size * 60000 / 50)\n\n dataloader_cls = Registry.DataLoaderRegistry.query(dataset)\n dataloader_cls_ins = dataloader_cls()\n dataloader_cls_ins.amend_valid_size(val=None)\n train_dataloader = dataloader_cls_ins.get_train_dataloader()\n valid_loader = dataloader_cls_ins.get_val_dataloader()\n assert len(train_dataloader) == np.ceil(60000 / 50)\n assert len(valid_loader) == np.ceil(10000 / 50)\n\n\ndef test_end_evolution():\n algs = ['aecnn', 'cgp_cnn', 'cnn_ga', 'evocnn', 'genetic_CNN', 'hierarchical_representations',\n 'large_scale', 'regularized_evolution', 'nsga_net']\n section = 'evolution_status'\n key = 'IS_RUNNING'\n for alg in algs:\n tool = StatusUpdateTool(alg)\n tool.end_evolution()\n val = tool.read_ini_file(section, key)\n assert int(val) == 0\n", "import random\nimport numpy as np\nimport copy\nimport math\n\nfrom algs.nsga_net.genetic.population import Population\nfrom algs.nsga_net.utils.statusupdatetool import StatusUpdateTool\nfrom algs.nsga_net.utils.utils import Utils\n\n\ndef crossover_mask(X, M):\n # convert input to output by flatting along the first axis\n _X = np.copy(X)\n for i in range(len(_X)):\n print(X[i][1].genome[M[i]])\n _X[i][0].genome[M[i]] = X[i][1].genome[M[i]]\n _X[i][1].genome[M[i]] = X[i][0].genome[M[i]]\n\n return _X\n\n\nclass Crossover(object):\n def __init__(self, pop, parent):\n self.pop = pop\n self.parent = parent\n\n def do(self, prob=0.5, n_points=2):\n do_crossover = np.random.random(len(self.parent)) < prob\n\n n_matings = len(self.parent)\n n_var = len(self.parent[0][0].genome)\n # print(\"n_matings:\"+str(n_matings))\n # print(\"n_var:\"+str(n_var))\n # for i in range(n_matings):\n # print(len(self.parent[i]))\n\n # start point of crossover\n r = np.row_stack([np.random.permutation(n_var - 1) + 1 for _ in range(n_matings)])[:, :n_points]\n r.sort(axis=1)\n r = np.column_stack([r, np.full(n_matings, n_var)])\n\n # the mask do to the crossover\n M = np.full((n_matings, n_var), False)\n\n # create for each individual the crossover range\n for i in range(n_matings):\n\n j = 0\n while j < r.shape[1] - 1:\n a, b = r[i, j], r[i, j + 1]\n M[i, a:b] = True\n j += 2\n\n _parent = crossover_mask(self.parent, M)\n for i, f in enumerate(do_crossover):\n if f:\n self.parent[i] = _parent[i]\n\n off = []\n\n for i in range(0, len(self.parent)):\n off.append(self.parent[i][0])\n off.append(self.parent[i][1])\n return off\n\n\nclass CrossoverAndMutation(object):\n def __init__(self, indis, parents, params, gen_no):\n self.individuals = indis\n self.params = params\n self.parents = parents\n self.gen_no = gen_no\n\n def process(self):\n pop = self.individuals\n off = Crossover(pop, self.parents).do()\n next_gen_pops = Population(self.gen_no, self.params)\n next_gen_pops.create_from_offspring(off)\n for indi in next_gen_pops.individuals:\n indi.reset()\n Utils.save_population_after_crossover(str(next_gen_pops), self.gen_no)\n off = Mutation(off, self.params).do_mutation()\n return off\n\n\nclass Mutation(object):\n def __init__(self, individuals, params, eta=3):\n self.individuals = individuals\n self.eta = float(eta)\n self.params = params\n\n def do_mutation(self):\n n = len(self.individuals)\n n_var = len(self.individuals[0].genome)\n X = np.zeros((n, n_var))\n for i in range(n):\n X[i] = self.individuals[i].genome\n\n X = X.astype(np.float)\n Y = np.full((n, n_var), np.inf)\n\n prob = 1.0 / n_var\n\n do_mutation = np.random.random((n, n_var)) < prob\n\n Y[:, :] = X\n\n xl = np.repeat(self.params['ub'][None, :], n, axis=0)[do_mutation]\n xu = np.repeat(self.params['lb'][None, :], n, axis=0)[do_mutation]\n\n X = X[do_mutation]\n\n delta1 = (X - xl) / (xu - xl)\n delta2 = (xu - X) / (xu - xl)\n\n mut_pow = 1.0 / (self.eta + 1.0)\n\n rand = np.random.random(X.shape)\n mask = rand <= 0.5\n mask_not = np.logical_not(mask)\n\n deltaq = np.zeros(X.shape)\n\n xy = 1.0 - delta1\n val = 2.0 * rand + (1.0 - 2.0 * rand) * (np.power(xy, (self.eta + 1.0)))\n d = np.power(val, mut_pow) - 1.0\n deltaq[mask] = d[mask]\n\n xy = 1.0 - delta2\n val = 2.0 * (1.0 - rand) + 2.0 * (rand - 0.5) * (np.power(xy, (self.eta + 1.0)))\n d = 1.0 - (np.power(val, mut_pow))\n deltaq[mask_not] = d[mask_not]\n\n # mutated values\n _Y = X + deltaq * (xu - xl)\n\n # back in bounds if necessary (floating point issues)\n _Y[_Y < xl] = xl[_Y < xl]\n _Y[_Y > xu] = xu[_Y > xu]\n\n # set the values for output\n indis = copy.deepcopy(self.individuals)\n Y[do_mutation] = _Y\n for i in range(Y.shape[0]):\n indis[i].genome = Y[i].astype(np.int)\n return indis\n", "import configparser\nimport os\nimport platform\nimport multiprocessing\nfrom compute.file import get_algo_local_dir\nimport time\nimport os\nimport numpy as np\nfrom algs.nsga_net.utils.statusupdatetool import StatusUpdateTool\nfrom algs.nsga_net.genetic.population import Population, Individual\n\n\nclass Utils(object):\n _lock = multiprocessing.Lock()\n\n @classmethod\n def get_lock_for_write_fitness(cls):\n return cls._lock\n\n @classmethod\n def path_replace(cls, input_str):\n # input a str, replace '\\\\' with '/', because the os.path in windows return path with '\\\\' joining\n # please use it after creating a string with both os.path and string '/'\n if (platform.system() == 'Windows'):\n new_str = input_str.replace('\\\\', '/')\n else: # Linux or Mac\n new_str = input_str\n return new_str\n\n @classmethod\n def load_cache_data(cls):\n file_name = '%s/cache.txt' % (os.path.join(get_algo_local_dir(), 'populations'))\n file_name = cls.path_replace(file_name)\n _map = {}\n if os.path.exists(file_name):\n f = open(file_name, 'r')\n for each_line in f:\n rs_ = each_line.strip().split(';')\n _map[rs_[0]] = '%.5f' % (float(rs_[1]))\n f.close()\n return _map\n\n @classmethod\n def save_fitness_to_cache(cls, individuals):\n _map1, _map2 = cls.load_cache_data()\n for indi in individuals:\n _key, _str = indi.uuid()\n _acc = indi.acc\n _flop = indi.flop\n if _key not in _map:\n file_name = '%s/cache.txt' % (os.path.join(get_algo_local_dir(), 'populations'))\n file_name = cls.path_replace(file_name)\n f = open(file_name, 'a+')\n _str = '%s;%.5f;%.5f;%s\\n' % (_key, _acc, _flop, _str)\n f.write(_str)\n f.close()\n _map1[_key] = _acc\n _map2[_key] = _flop\n\n @classmethod\n def save_population_at_begin(cls, _str, gen_no):\n file_name = '%s/begin_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), gen_no)\n # solve the path differences caused by different platforms\n file_name = cls.path_replace(file_name)\n with open(file_name, 'w') as f:\n f.write(_str)\n\n @classmethod\n def save_population_after_mutation(cls, _str, gen_no):\n file_name = '%s/mutation_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), gen_no)\n file_name = cls.path_replace(file_name)\n with open(file_name, 'w') as f:\n f.write(_str)\n\n @classmethod\n def get_newest_file_based_on_prefix(cls, prefix):\n id_list = []\n for _, _, file_names in os.walk(os.path.join(get_algo_local_dir(), 'populations')):\n for file_name in file_names:\n if file_name.startswith(prefix):\n number_index = len(prefix) + 1 # the first number index\n id_list.append(int(file_name[number_index:number_index + 5]))\n if len(id_list) == 0:\n\n return None\n else:\n return np.max(id_list)\n\n @classmethod\n def load_population(cls, prefix, gen_no):\n file_name = '%s/%s_%05d.txt' % (os.path.join(get_algo_local_dir(), 'populations'), prefix, np.min(gen_no))\n file_name = cls.path_replace(file_name)\n params = StatusUpdateTool.get_init_params()\n pop = Population(gen_no, params)\n f = open(file_name)\n indi_start_line = f.readline().strip()\n while indi_start_line.startswith('indi'):\n indi_no = indi_start_line[5:]\n indi = Individual(indi_no, params, params['n_var'])\n genome = []\n for line in f:\n line = line.strip()\n if line.startswith('--'):\n indi_start_line = f.readline().strip()\n break\n else:\n if line.startswith('Acc'):\n indi.acc = float(line[4:])\n elif line.startswith('flop'):\n indi.flop = float(line[5:])\n elif line.startswith('genome'):\n print(line)\n l = list(line[8:])\n while ' ' in l:\n l.remove(' ')\n while ',' in l:\n l.remove(',')\n while ']' in l:\n l.remove(']')\n for i in l:\n genome.append(int(i))\n elif line.startswith('0') or line.startswith('1'):\n print(line)\n l = list(line)\n while ' ' in l:\n l.remove(' ')\n while ',' in l:\n l.remove(',')\n while ']' in l:\n l.remove(']')\n for i in l:\n genome.append(int(i))\n else:\n print('Unknown key for load unit type, line content:%s' % (line))\n indi.genome = np.array(genome)\n pop.individuals.append(indi)\n f.close()\n return pop\n\n @classmethod\n def read_template(cls, search_space):\n _path = os.path.join(os.path.dirname(__file__), 'template', search_space + '_models.py')\n part1 = []\n part2 = []\n f = open(_path)\n f.readline() # skip this comment\n line = f.readline().rstrip()\n while line.strip() != \"#generate_init\":\n part1.append(line)\n line = f.readline().rstrip()\n\n line = f.readline().rstrip() # skip the comment '#generate_forward'\n while line.strip() != '\"\"\"':\n part2.append(line)\n line = f.readline().rstrip()\n return part1, part2\n\n @classmethod\n def generate_micro_pytorch_file(cls, indi, params):\n search_space = \"micro\"\n part1, part2 = cls.read_template(search_space)\n line1 = \"genome = convert(%s)\" % (str(list(indi.genome)))\n line2 = \"genotype = decode(genome)\"\n line3 = \"self.net = Network(%d, %d, %d, %d, False, genotype)\" % \\\n (StatusUpdateTool.get_input_channel(), params['init_channels'], params['classes'], params['layers'])\n _str = []\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n _str.append('\"\"\"')\n _str.append(current_time)\n _str.append('\"\"\"')\n _str.extend(part1)\n _str.append(' %s' % (line1))\n _str.append(' %s' % (line2))\n _str.append(' %s' % (line3))\n _str.extend(part2)\n file_name = '%s/%s.py' % (os.path.join(get_algo_local_dir(), 'scripts'), indi.id)\n file_name = cls.path_replace(file_name)\n if not os.path.exists(os.path.join(get_algo_local_dir(), 'scripts')):\n os.makedirs(os.path.join(get_algo_local_dir(), 'scripts'))\n script_file_handler = open(file_name, 'w')\n script_file_handler.write('\\n'.join(_str))\n script_file_handler.flush()\n script_file_handler.close()\n\n @classmethod\n def generate_macro_pytorch_file(cls, indi, channels, params):\n search_space = \"macro\"\n part1, part2 = cls.read_template(search_space)\n line1 = \"genome = convert(np.array(%s))\" % (str(list(indi.genome)))\n line2 = \"genotype = decode(genome)\"\n line3 = \"channels = %s\" % (str(channels))\n line4 = \"self.net = EvoNetwork(genotype, channels, %d, (32, 32), decoder='residual')\" % \\\n (params['classes'])\n _str = []\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n _str.append('\"\"\"')\n _str.append(current_time)\n _str.append('\"\"\"')\n _str.extend(part1)\n _str.append(' %s' % (line1))\n _str.append(' %s' % (line2))\n _str.append(' %s' % (line3))\n _str.append(' %s' % (line4))\n _str.extend(part2)\n file_name = '%s/%s.py' % (os.path.join(get_algo_local_dir(), 'scripts'), indi.id)\n file_name = cls.path_replace(file_name)\n if not os.path.exists(os.path.join(get_algo_local_dir(), 'scripts')):\n os.makedirs(os.path.join(get_algo_local_dir(), 'scripts'))\n script_file_handler = open(file_name, 'w')\n script_file_handler.write('\\n'.join(_str))\n script_file_handler.flush()\n script_file_handler.close()\n\n @classmethod\n def write_to_file(cls, _str, _file):\n f = open(_file, 'w')\n f.write(_str)\n f.flush()\n f.close()\n", "import random\nimport numpy as np\nimport copy\nfrom algs.evocnn.genetic.population import Individual\nfrom algs.evocnn.utils import Utils\nfrom algs.evocnn.genetic.statusupdatetool import StatusUpdateTool\n\n\nclass CrossoverAndMutation(object):\n def __init__(self, prob_crossover, prob_mutation, _log, individuals, gen_no, _params):\n self.prob_crossover = prob_crossover\n self.prob_mutation = prob_mutation\n self.individuals = individuals\n self.gen_no = gen_no\n self.crossover_eta = _params['crossover_eta']\n self.mutation_eta = _params['mutation_eta']\n self.acc_mean_threshold = _params['acc_mean_threshold']\n self.complexity_threshold = _params['complexity_threshold']\n self.log = _log\n self.offspring = []\n\n def process(self):\n crossover = Crossover(self.individuals, self.prob_crossover, self.crossover_eta, self.acc_mean_threshold,\n self.complexity_threshold, self.log)\n offspring = crossover.do_crossover()\n self.offspring = offspring\n Utils.save_population_after_crossover(self.individuals_to_string(), self.gen_no)\n\n mutation = Mutation(self.offspring, self.prob_mutation, self.mutation_eta, self.log)\n mutation.do_mutation()\n\n for i, indi in enumerate(self.offspring):\n indi_no = 'indi%05d_%05d' % (self.gen_no, i)\n indi.id = indi_no\n\n Utils.save_population_after_mutation(self.individuals_to_string(), self.gen_no)\n return self.offspring\n\n def individuals_to_string(self):\n _str = []\n for indi in self.offspring:\n _str.append(str(indi))\n _str.append('-' * 100)\n return '\\n'.join(_str)\n\n\nclass Crossover(object):\n def __init__(self, individuals, prob_, eta, acc_mean_threshold, complexity_threshold, _log):\n self.individuals = individuals\n self.prob = prob_\n self.eta = eta\n self.acc_mean_threshold = acc_mean_threshold\n self.complexity_threshold = complexity_threshold\n self.log = _log\n\n def _choose_one_parent(self):\n count_ = len(self.individuals)\n idx1 = np.random.randint(0, count_)\n idx2 = np.random.randint(0, count_)\n ind1 = self.individuals[idx1]\n ind2 = self.individuals[idx2]\n\n if ind1.acc_mean > ind2.acc_mean:\n if ind1.acc_mean - ind2.acc_mean > self.acc_mean_threshold:\n winner = ind1\n else:\n if ind2.complexity < (ind1.complexity - self.complexity_threshold):\n winner = ind2\n else:\n winner = ind1\n else:\n if ind2.acc_mean - ind1.acc_mean > self.acc_mean_threshold:\n winner = ind2\n else:\n if ind1.complexity < (ind2.complexity - self.complexity_threshold):\n winner = ind1\n else:\n winner = ind2\n return winner\n\n \"\"\"\n binary tournament selection\n \"\"\"\n\n def _choose_two_parents(self):\n # this might choose two same parents\n ind1 = self._choose_one_parent()\n ind2 = self._choose_one_parent()\n return ind1, ind2\n\n def do_crossover(self):\n new_offspring_list = []\n for _ in range(len(self.individuals) // 2):\n ind1, ind2 = self._choose_two_parents()\n\n self.log.info('Do crossover on indi:%s and indi:%s' % (ind1.id, ind2.id))\n p1, p2 = copy.deepcopy(ind1), copy.deepcopy(ind2)\n # for different unit, we define two list, one to save their index and the other one save unit\n p1_conv_index_list = []\n p1_conv_layer_list = []\n p1_pool_index_list = []\n p1_pool_layer_list = []\n p1_full_index_list = []\n p1_full_layer_list = []\n\n p2_conv_index_list = []\n p2_conv_layer_list = []\n p2_pool_index_list = []\n p2_pool_layer_list = []\n p2_full_index_list = []\n p2_full_layer_list = []\n\n for i in range(len(p1.units)):\n unit = p1.units[i]\n if unit.type == 1:\n p1_conv_index_list.append(i)\n p1_conv_layer_list.append(unit)\n elif unit.type == 2:\n p1_pool_index_list.append(i)\n p1_pool_layer_list.append(unit)\n else:\n p1_full_index_list.append(i)\n p1_full_layer_list.append(unit)\n\n for i in range(len(p2.units)):\n unit = p2.units[i]\n if unit.type == 1:\n p2_conv_index_list.append(i)\n p2_conv_layer_list.append(unit)\n elif unit.type == 2:\n p2_pool_index_list.append(i)\n p2_pool_layer_list.append(unit)\n else:\n p2_full_index_list.append(i)\n p2_full_layer_list.append(unit)\n\n # begin crossover on conv layer\n l = min(len(p1_conv_layer_list), len(p2_conv_layer_list))\n for i in range(l):\n unit_p1 = p1_conv_layer_list[i]\n unit_p2 = p2_conv_layer_list[i]\n _p = np.random.random()\n if _p < self.prob:\n # filter size\n filter_size_range = StatusUpdateTool.get_conv_filter_size_limit()\n w1 = unit_p1.filter_size[0]\n w2 = unit_p2.filter_size[0]\n n_w1, n_w2 = self.sbx(w1, w2, filter_size_range[0], filter_size_range[1], self.eta)\n unit_p1.filter_size = int(n_w1), int(n_w1)\n unit_p2.filter_size = int(n_w2), int(n_w2)\n # out channel size\n out_channel_size_range = StatusUpdateTool.get_channel_limit()\n s1 = unit_p1.out_channel\n s2 = unit_p2.out_channel\n n_s1, n_s2 = self.sbx(s1, s2, out_channel_size_range[0], out_channel_size_range[1], self.eta)\n unit_p1.out_channel = int(n_s1)\n unit_p2.out_channel = int(n_s2)\n # mean\n mean_range = StatusUpdateTool.get_mean_limit()\n m1 = unit_p1.mean\n m2 = unit_p2.mean\n n_m1, n_m2 = self.sbx(m1, m2, mean_range[0], mean_range[1], self.eta)\n unit_p1.mean = n_m1\n unit_p2.mean = n_m2\n # std\n std_range = StatusUpdateTool.get_std_limit()\n std1 = unit_p1.std\n std2 = unit_p2.std\n n_std1, n_std2 = self.sbx(std1, std2, std_range[0], std_range[1], self.eta)\n unit_p1.std = n_std1\n unit_p2.std = n_std2\n\n p1_conv_layer_list[i] = unit_p1\n p2_conv_layer_list[i] = unit_p2\n\n # begin crossover on pool layer\n l = min(len(p1_pool_layer_list), len(p2_pool_layer_list))\n for i in range(l):\n unit_p1 = p1_pool_layer_list[i]\n unit_p2 = p2_pool_layer_list[i]\n _p = np.random.random()\n if _p < self.prob:\n # kernel size\n pool_kernel_size_range = StatusUpdateTool.get_pool_kernel_size_list()\n k1 = np.log2(unit_p1.kernel_size[0])\n k2 = np.log2(unit_p2.kernel_size[0])\n n_k1, n_k2 = self.sbx(k1, k2, pool_kernel_size_range[0], pool_kernel_size_range[-1], self.eta)\n n_k1 = int(np.power(2, n_k1))\n n_k2 = int(np.power(2, n_k2))\n unit_p1.kernel_size = n_k1, n_k1\n unit_p2.kernel_size = n_k2, n_k2\n # pool type\n t1 = unit_p1.max_or_avg\n t2 = unit_p2.max_or_avg\n n_t1, n_t2 = self.sbx(t1, t2, 0, 1, self.eta)\n unit_p1.max_or_avg = n_t1\n unit_p2.max_or_avg = n_t2\n\n p1_pool_layer_list[i] = unit_p1\n p2_pool_layer_list[i] = unit_p2\n\n # begin crossover on fc layer\n l = min(len(p1_full_layer_list), len(p2_full_layer_list))\n for i in range(l - 1):\n unit_p1 = p1_full_layer_list[i]\n unit_p2 = p2_full_layer_list[i]\n _p = np.random.random()\n if _p < self.prob:\n # output hidden neurons number\n hidden_neurons_range = StatusUpdateTool.get_hidden_neurons_limit()\n n1 = unit_p1.output_neurons_number\n n2 = unit_p2.output_neurons_number\n n_n1, n_n2 = self.sbx(n1, n2, hidden_neurons_range[0], hidden_neurons_range[1], self.eta)\n unit_p1.output_neurons_number = int(n_n1)\n unit_p2.output_neurons_number = int(n_n2)\n # mean\n mean_range = StatusUpdateTool.get_mean_limit()\n m1 = unit_p1.mean\n m2 = unit_p2.mean\n n_m1, n_m2 = self.sbx(m1, m2, mean_range[0], mean_range[1], self.eta)\n unit_p1.mean = n_m1\n unit_p2.mean = n_m2\n # std\n std_range = StatusUpdateTool.get_std_limit()\n std1 = unit_p1.std\n std2 = unit_p2.std\n n_std1, n_std2 = self.sbx(std1, std2, std_range[0], std_range[1], self.eta)\n unit_p1.std = n_std1\n unit_p2.std = n_std2\n\n p1_full_layer_list[i] = unit_p1\n p2_full_layer_list[i] = unit_p2\n\n # for the last full layer, only mean and std\n unit_p1 = p1_full_layer_list[-1]\n unit_p2 = p2_full_layer_list[-1]\n _p = np.random.random()\n if _p < self.prob:\n # mean\n mean_range = StatusUpdateTool.get_mean_limit()\n m1 = unit_p1.mean\n m2 = unit_p2.mean\n n_m1, n_m2 = self.sbx(m1, m2, mean_range[0], mean_range[1], self.eta)\n unit_p1.mean = n_m1\n unit_p2.mean = n_m2\n # std\n std_range = StatusUpdateTool.get_std_limit()\n std1 = unit_p1.std\n std2 = unit_p2.std\n n_std1, n_std2 = self.sbx(std1, std2, std_range[0], std_range[-1], self.eta)\n unit_p1.std = n_std1\n unit_p2.std = n_std2\n p1_full_layer_list[-1] = unit_p1\n p2_full_layer_list[-1] = unit_p2\n\n # assign these crossovered values to the unit_list1 and unit_list2\n unit_list1 = p1.units\n for i in range(len(p1_conv_index_list)):\n unit_list1[p1_conv_index_list[i]] = p1_conv_layer_list[i]\n for i in range(len(p1_pool_index_list)):\n unit_list1[p1_pool_index_list[i]] = p1_pool_layer_list[i]\n for i in range(len(p1_full_index_list)):\n unit_list1[p1_full_index_list[i]] = p1_full_layer_list[i]\n\n unit_list2 = p2.units\n for i in range(len(p2_conv_index_list)):\n unit_list2[p2_conv_index_list[i]] = p2_conv_layer_list[i]\n for i in range(len(p2_pool_index_list)):\n unit_list2[p2_pool_index_list[i]] = p2_pool_layer_list[i]\n for i in range(len(p2_full_index_list)):\n unit_list2[p2_full_index_list[i]] = p2_full_layer_list[i]\n\n # re-adjust the in_channel of the above two list\n unit_list1 = Individual.update_all_channels(unit_list1, 0, self.log)\n unit_list2 = Individual.update_all_channels(unit_list2, 0, self.log)\n\n p1.units = unit_list1\n p2.units = unit_list2\n offspring1, offspring2 = p1, p2\n offspring1.reset_acc()\n offspring2.reset_acc()\n offspring1.complexity = Individual.calculate_complexity(unit_list1)\n offspring2.complexity = Individual.calculate_complexity(unit_list2)\n new_offspring_list.append(offspring1)\n new_offspring_list.append(offspring2)\n\n self.log.info('CROSSOVER-%d offspring are generated.' % (len(new_offspring_list)))\n return new_offspring_list\n\n def sbx(self, p1, p2, xl, xu, eta):\n '''\n :param p1: parent1\n :param p2: parent2\n :param xl: minimal\n :param xu: maximal\n :param eta: the parameter of sbx\n :return: two offsprings after crossover\n '''\n # par1 is the greater parent\n if p1 > p2:\n par1 = p1\n par2 = p2\n else:\n par1 = p2\n par2 = p1\n yl = xl\n yu = xu\n rand = np.random.random()\n if rand <= 0.5:\n betaq = (2 * rand) ** (1 / (eta + 1))\n else:\n betaq = (1 / (2 - 2 * rand)) ** (1 / (eta + 1))\n child1 = 0.5 * ((par1 + par2) - betaq * (par1 - par2))\n child2 = 0.5 * ((par1 + par2) + betaq * (par1 - par2))\n if child1 < yl:\n child1 = yl\n if child1 > yu:\n child1 = yu\n if child2 < yl:\n child2 = yl\n if child2 > yu:\n child2 = yu\n return child1, child2\n\n\nclass Mutation(object):\n def __init__(self, individuals, prob_, eta, _log):\n self.individuals = individuals\n self.prob = prob_\n self.eta = eta\n self.log = _log\n\n def do_mutation(self):\n _stat_param = {'offspring_new': 0, 'offspring_from_parent': 0, 'ADD': 0, 'REMOVE': 0, 'ALTER': 0}\n\n mutation_list = StatusUpdateTool.get_mutation_probs_for_each()\n for indi in self.individuals:\n p_ = random.random()\n if p_ < self.prob:\n units_list = []\n is_new = False\n for i in range(len(indi.units) - 1):\n cur_unit = indi.units[i]\n p_ = np.random.random()\n if p_ < 0.5:\n is_new = True\n max_length = 6\n mutation_type = self.select_mutation_type(mutation_list)\n if mutation_type == 0:\n current_conv_and_pool_length = indi.get_conv_number() + indi.get_pool_number()\n if current_conv_and_pool_length < max_length:\n _stat_param['ADD'] += 1\n units_list.append(self.generate_a_new_layer(indi, cur_unit.type, len(indi.units)))\n units_list.append(cur_unit)\n else:\n _stat_param['ALTER'] += 1\n updated_unit = self.alter_a_unit(indi, cur_unit, self.eta)\n units_list.append(updated_unit)\n elif mutation_type == 1:\n _stat_param['ALTER'] += 1\n updated_unit = self.alter_a_unit(indi, cur_unit, self.eta)\n units_list.append(updated_unit)\n elif mutation_type == 2:\n _stat_param['REMOVE'] += 1\n # do nothing with units_list\n else:\n raise TypeError('Error mutation type :%d, validate range:0-2' % (mutation_type))\n else:\n units_list.append(cur_unit)\n\n # avoid all units have been removed, add a full layer\n if len(units_list) == 0:\n units_list.append(Individual.init_a_conv(indi))\n units_list.append(Individual.init_a_pool(indi))\n units_list.append(indi.units[-1])\n # judge the first unit and the second unit\n if units_list[0].type != 1:\n units_list.insert(0, Individual.init_a_conv(indi))\n\n if is_new:\n _stat_param['offspring_new'] += 1\n units_list = Individual.update_all_channels(units_list, 1, self.log)\n indi.units = units_list\n indi.complexity = Individual.calculate_complexity(units_list)\n else:\n _stat_param['offspring_from_parent'] += 1\n else:\n _stat_param['offspring_from_parent'] += 1\n self.log.info('MUTATION-mutated individuals:%d[ADD:%d,REMOVE:%d,ALTER:%d, no_changes:%d]' % (\n _stat_param['offspring_new'], _stat_param['ADD'], _stat_param['REMOVE'], _stat_param['ALTER'],\n _stat_param['offspring_from_parent']))\n\n def generate_a_new_layer(self, indi, current_unit_type, unit_length):\n if current_unit_type == 3:\n # judge if current length = 1, add conv or pool\n if unit_length == 1:\n if random.random() < 0.5:\n return Individual.init_a_conv(indi)\n else:\n return Individual.init_a_pool(indi)\n else:\n return Individual.init_a_fc(indi)\n else:\n r = random.random()\n if r < 0.5:\n return Individual.init_a_conv(indi)\n else:\n return Individual.init_a_pool(indi)\n\n def alter_a_unit(self, indi, unit, eta):\n if unit.type == 1:\n # mutate a conv layer\n return self.alter_conv_unit(indi, unit, eta)\n elif unit.type == 2:\n # mutate a pool layer\n return self.alter_pool_unit(indi, unit, eta)\n else:\n # mutate a full layer\n return self.alter_full_layer(indi, unit, eta)\n\n def alter_conv_unit(self, indi, unit, eta):\n # feature map size, feature map number, mean std\n fms = unit.filter_size[0]\n fmn = unit.out_channel\n mean = unit.mean\n std = unit.std\n\n new_fms = int(self.pm(indi.min_conv_filter_size, indi.max_conv_filter_size, fms, eta))\n new_fmn = int(self.pm(indi.min_channel, indi.max_channel, fmn, eta))\n new_mean = self.pm(indi.min_mean, indi.max_mean, mean, eta)\n new_std = self.pm(indi.min_std, indi.max_std, std, eta)\n conv_unit = Individual.init_a_conv(indi, _filter_height=new_fms, _filter_width=new_fms, _out_channel=new_fmn,\n _mean=new_mean, _std=new_std)\n return conv_unit666\n\n def alter_pool_unit(self, indi, unit, eta):\n # kernel size, pool_type\n ksize = np.log2(unit.kernel_size[0])\n pool_type = unit.max_or_avg\n\n new_ksize = self.pm(indi.pool_kernel_size_list[0], indi.pool_kernel_size_list[-1], ksize, eta)\n new_ksize = int(np.power(2, new_ksize))\n new_pool_type = self.pm(0, 1, pool_type, eta)\n pool_unit = Individual.init_a_pool(indi, _kernel_width=new_ksize, _kernel_height=new_ksize,\n _max_or_avg=new_pool_type)\n return pool_unit\n\n def alter_full_layer(self, indi, unit, eta):\n # num of hidden neurons, mean ,std\n n_hidden = unit.output_neurons_number\n mean = unit.mean\n std = unit.std\n\n new_n_hidden = int(self.pm(indi.min_hidden_neurons, indi.max_hidden_neurons, n_hidden, eta))\n new_mean = self.pm(indi.min_mean, indi.max_mean, mean, eta)\n new_std = self.pm(indi.min_std, indi.max_std, std, eta)\n fc_unit = Individual.init_a_fc(indi, _output_neurons_number=new_n_hidden, _mean=new_mean, _std=new_std)\n return fc_unit\n\n def select_mutation_type(self, _a):\n a = np.asarray(_a)\n sum_a = np.sum(a).astype(np.float)\n rand = np.random.random() * sum_a\n sum = 0\n mutation_type = -1\n for i in range(len(a)):\n sum += a[i]\n if sum > rand:\n mutation_type = i\n break\n assert mutation_type != -1\n return mutation_type\n\n def pm(self, xl, xu, x, eta):\n delta_1 = (x - xl) / (xu - xl)\n delta_2 = (xu - x) / (xu - xl)\n rand = np.random.random()\n mut_pow = 1.0 / (eta + 1.)\n if rand < 0.5:\n xy = 1.0 - delta_1\n val = 2.0 * rand + (1.0 - 2.0 * rand) * xy ** (eta + 1)\n delta_q = val ** mut_pow - 1.0\n else:\n xy = 1.0 - delta_2\n val = 2.0 * (1.0 - rand) + 2.0 * (rand - 0.5) * xy ** (eta + 1)\n delta_q = 1.0 - val ** mut_pow\n x = x + delta_q * (xu - xl)\n x = min(max(x, xl), xu)\n return x\n\n\nif __name__ == '__main__':\n cm = CrossoverAndMutation(StatusUpdateTool.get_genetic_probability()[0],\n StatusUpdateTool.get_genetic_probability()[1], )\n" ]
[ [ "numpy.ceil" ], [ "numpy.logical_not", "numpy.random.random", "numpy.power", "numpy.full", "numpy.copy", "numpy.random.permutation", "numpy.repeat", "numpy.zeros" ], [ "numpy.max", "numpy.array", "numpy.min" ], [ "numpy.log2", "numpy.random.random", "numpy.power", "numpy.asarray", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Harsh-Vavaiya/Stock-prediction-game
[ "fbff899f498f90988082a1ce59978cd02fb6f498" ]
[ "app.py" ]
[ "import requests\nfrom flask import Flask, request, jsonify, send_from_directory\napp = Flask(__name__)\nimport pandas as pd\nimport quandl\nimport math\nimport random\nimport os\nimport numpy as np\nfrom sklearn import preprocessing, cross_validation, svm\nfrom sklearn.linear_model import LinearRegression\n\nif 'ON_HEROKU' in os.environ:\n @app.route('/')\n def index():\n return send_from_directory('client/build','index.html')\n @app.route('/index.html')\n def index2():\n return send_from_directory('client/build','index.html')\n @app.route('/static/css/<filename>')\n def index_css(filename):\n return send_from_directory('client/build/static/css',filename)\n @app.route('/static/js/<filename>')\n def index_js(filename):\n return send_from_directory('client/build/static/js',filename)\n @app.route('/service-worker.js')\n def index_service_worker():\n return send_from_directory('client/build', 'service-worker.js')\n @app.route('/manifest.json')\n def index_manifest():\n return send_from_directory('client/build', 'manifest.json')\n @app.route('/favicon-16x16.png')\n def index_favicon16():\n return send_from_directory('client/build', 'favicon-16x16.png')\n @app.route('/favicon-32x32.png')\n def index_favicon32():\n return send_from_directory('client/build', 'favicon-32x32.png')\n @app.route('/favicon-96x96.png')\n def index_favicon96():\n return send_from_directory('client/build', 'favicon-96x96.png')\n\[email protected]('/getstockdata/')\ndef getStockData():\n stock = request.args.get('stock', default=None, type=None)\n quandl.ApiConfig.api_key = \"qWcicxSctVxrP9PhyneG\"\n allData = quandl.get('WIKI/'+stock)\n dataLength = 251\n allDataLength = len(allData)\n firstDataElem = math.floor(random.random()*(allDataLength-dataLength))\n mlData = allData[0:firstDataElem+dataLength]\n\n def FormatForModel(dataArray):\n dataArray = dataArray[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]\n dataArray['HL_PCT'] = (dataArray['Adj. High'] - dataArray['Adj. Close']) / dataArray['Adj. Close'] * 100.0\n dataArray['PCT_change'] = (dataArray['Adj. Close'] - dataArray['Adj. Open']) / dataArray['Adj. Open'] * 100.0\n dataArray = dataArray[['Adj. Close', 'HL_PCT', 'PCT_change','Adj. Volume']]\n dataArray.fillna(-99999, inplace=True)\n return dataArray\n\n mlData = FormatForModel(mlData)\n\n forecast_col = 'Adj. Close'\n forecast_out = int(math.ceil(0.12*dataLength))\n\n mlData['label'] = mlData[forecast_col].shift(-forecast_out)\n mlData.dropna(inplace=True)\n\n X = np.array(mlData.drop(['label'],1))\n X = preprocessing.scale(X)\n X_data = X[-dataLength:]\n X = X[:-dataLength]\n data = mlData[-dataLength:]\n mlData = mlData[:-dataLength]\n y = np.array(mlData['label'])\n\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.001)\n\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n accuracy = clf.score(X_test, y_test)\n\n prediction = clf.predict(X_data)\n data = data[['Adj. Close']]\n data = data.rename(columns={'Adj. Close':'EOD'})\n data['prediction'] = prediction[:]\n data = data.to_json(orient='table')\n return jsonify(data)\n" ]
[ [ "sklearn.cross_validation.train_test_split", "numpy.array", "sklearn.preprocessing.scale", "sklearn.linear_model.LinearRegression" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bashar94/simpletransformers
[ "4a36dde6bd4d049dc1d0e0ecd83dccf5bff1f5b0" ]
[ "simpletransformers/retrieval/retrieval_model.py" ]
[ "import json\nimport logging\nimport math\nimport os\nimport random\nimport warnings\nimport string\nfrom dataclasses import asdict\nfrom multiprocessing import Pool, cpu_count\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport transformers\nfrom tensorboardX import SummaryWriter\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom tqdm.auto import tqdm, trange\nfrom transformers.optimization import (\n get_constant_schedule,\n get_constant_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n get_cosine_schedule_with_warmup,\n get_cosine_with_hard_restarts_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n)\nfrom transformers.optimization import AdamW, Adafactor\nfrom transformers.models.dpr import (\n DPRConfig,\n DPRContextEncoder,\n DPRQuestionEncoder,\n DPRContextEncoderTokenizerFast,\n DPRQuestionEncoderTokenizerFast,\n)\nfrom transformers.models.auto import (\n AutoConfig,\n AutoModel,\n AutoTokenizer,\n)\nimport datasets\nfrom datasets import load_from_disk\n\nfrom simpletransformers.config.global_args import global_args\nfrom simpletransformers.config.model_args import RetrievalArgs\nfrom simpletransformers.config.utils import sweep_config_to_sweep_values\nfrom simpletransformers.retrieval.retrieval_utils import (\n get_prediction_passage_dataset,\n load_hf_dataset,\n get_evaluation_passage_dataset,\n mean_reciprocal_rank_at_k,\n)\n\nimport torch.multiprocessing\n\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CLASSES = {\n \"dpr\": (\n DPRConfig,\n DPRContextEncoder,\n DPRQuestionEncoder,\n DPRContextEncoderTokenizerFast,\n DPRQuestionEncoderTokenizerFast,\n ),\n \"custom\": (\n AutoConfig,\n AutoModel,\n AutoModel,\n AutoTokenizer,\n AutoTokenizer,\n ),\n}\n\n\nclass RetrievalModel:\n def __init__(\n self,\n model_type=None,\n model_name=None,\n context_encoder_name=None,\n query_encoder_name=None,\n context_encoder_tokenizer=None,\n query_encoder_tokenizer=None,\n prediction_passages=None,\n args=None,\n use_cuda=True,\n cuda_device=-1,\n **kwargs,\n ):\n \"\"\"\n Initializes a RetrievalModel model.\n\n Args:\n model_type (str, optional): The type of model architecture. Defaults to None.\n model_name (str, optional): The exact architecture and trained weights to use for the full model. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files. Defaults to None.\n context_encoder_name (str, optional): The exact architecture and trained weights to use for the context encoder model. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files. Defaults to None.\n query_encoder_name (str, optional): The exact architecture and trained weights to use for the query encoder model. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files. Defaults to None.\n context_encoder_tokenizer (str, optional): The tokenizer to use for the context encoder. This may be a Hugging Face Transformers compatible pre-trained tokenizer, a community tokenizer, or the path to a directory containing tokenizer files. Defaults to None.\n query_encoder_tokenizer (str, optional): The tokenizer to use for the query encoder. This may be a Hugging Face Transformers compatible pre-trained tokenizer, a community tokenizer, or the path to a directory containing tokenizer files. Defaults to None.\n prediction_passages (str, optional): The passages to be used as the corpus for retrieval when making predictions. Provide this only when using the model for predictions. Defaults to None.\n args (dict or RetrievalArgs, optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args or an instance of RetrievalArgs.\n use_cuda (bool, optional): Use GPU if available. Setting to False will force model to use CPU only.. Defaults to True.\n cuda_device (int, optional): Specific GPU that should be used. Will use the first available GPU by default. Defaults to -1.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n\n Raises:\n ValueError: [description]\n \"\"\" # noqa: ignore flake8\"\n\n self.args = self._load_model_args(model_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, RetrievalArgs):\n self.args = args\n\n if \"sweep_config\" in kwargs:\n self.is_sweeping = True\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = sweep_config_to_sweep_values(sweep_config)\n self.args.update_from_dict(sweep_values)\n else:\n self.is_sweeping = False\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \"Make sure CUDA is available or set `use_cuda=False`.\"\n )\n else:\n self.device = \"cpu\"\n\n self.results = {}\n\n if not use_cuda:\n self.args.fp16 = False\n\n try:\n (\n config_class,\n context_encoder,\n query_encoder,\n context_tokenizer,\n query_tokenizer,\n ) = MODEL_CLASSES[model_type]\n except KeyError:\n raise ValueError(\n \"Model type {} not found. Available options are {}\".format(\n model_type, list(MODEL_CLASSES.keys())\n )\n )\n\n if context_encoder_name:\n self.context_config = config_class.from_pretrained(\n context_encoder_name, **self.args.context_config\n )\n if self.args.context_config.get(\"projection_dim\") is not None:\n context_encoder._keys_to_ignore_on_load_missing.append(\"encode_proj\")\n self.context_encoder = context_encoder.from_pretrained(\n context_encoder_name, config=self.context_config\n )\n self.context_tokenizer = context_tokenizer.from_pretrained(\n context_encoder_name\n )\n elif model_name:\n self.context_config = config_class.from_pretrained(\n os.path.join(model_name, \"context_encoder\"), **self.args.context_config\n )\n self.context_encoder = context_encoder.from_pretrained(\n os.path.join(model_name, \"context_encoder\"), config=self.context_config\n )\n self.context_tokenizer = context_tokenizer.from_pretrained(\n os.path.join(model_name, \"context_encoder\")\n )\n else:\n self.context_config = config_class(**self.args.context_config)\n self.context_encoder = context_encoder(config=self.context_config)\n self.context_tokenizer = context_tokenizer.from_pretrained(\n context_encoder_tokenizer\n )\n\n if query_encoder_name:\n self.query_config = config_class.from_pretrained(\n query_encoder_name, **self.args.query_config\n )\n if self.args.query_config.get(\"projection_dim\") is not None:\n query_encoder._keys_to_ignore_on_load_missing.append(\"encode_proj\")\n self.query_encoder = query_encoder.from_pretrained(\n query_encoder_name, config=self.query_config\n )\n self.query_tokenizer = query_tokenizer.from_pretrained(query_encoder_name)\n elif model_name:\n self.query_config = config_class.from_pretrained(\n os.path.join(model_name, \"query_encoder\"), **self.args.query_config\n )\n self.query_encoder = query_encoder.from_pretrained(\n os.path.join(model_name, \"query_encoder\"), config=self.query_config\n )\n self.query_tokenizer = query_tokenizer.from_pretrained(\n os.path.join(model_name, \"query_encoder\")\n )\n else:\n self.query_config = config_class(**self.args.query_config)\n self.query_encoder = query_encoder(config=self.query_config)\n self.query_tokenizer = query_tokenizer.from_pretrained(\n query_encoder_tokenizer\n )\n\n # TODO: Add support for adding special tokens to the tokenizers\n\n self.args.model_type = model_type\n self.args.model_name = model_name\n\n if prediction_passages is not None:\n self.prediction_passages = self.get_updated_prediction_passages(\n prediction_passages\n )\n else:\n self.prediction_passages = None\n\n def train_model(\n self,\n train_data,\n output_dir=None,\n show_running_loss=True,\n args=None,\n eval_data=None,\n additional_eval_passages=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model using 'train_data'\n\n Args:\n train_data: Pandas DataFrame containing the 3 columns - `query_text`, `gold_passage`, and `title`. (Title is optional)\n - `query_text`: The Query text sequence\n - `gold_passage`: The gold passage text sequence\n - `title`: The title of the gold passage\n If `use_hf_datasets` is True, then this may also be the path to a TSV file with the same columns.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n additional_eval_passages: Additional passages to be used during evaluation.\n This may be a list of passages, a pandas DataFrame with the column `passages`, or a TSV file with the column `passages`.\n eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.\n\n Returns:\n global_step: Number of global steps trained\n training_details: Average training loss if evaluate_during_training is False or full training progress scores if evaluate_during_training is True\n \"\"\" # noqa: ignore flake8\"\n\n if args:\n self.args.update_from_dict(args)\n\n # if self.args.silent:\n # show_running_loss = False\n\n if self.args.evaluate_during_training and eval_data is None:\n raise ValueError(\n \"evaluate_during_training is enabled but eval_data is not specified.\"\n \" Pass eval_data to model.train_model() if using evaluate_during_training.\"\n )\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n if (\n os.path.exists(output_dir)\n and os.listdir(output_dir)\n and not self.args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \" Set args.overwrite_output_dir = True to overcome.\".format(output_dir)\n )\n\n if self.args.ddp_training:\n self.context_encoder = self.context_encoder.to(kwargs[\"rank\"])\n self.query_encoder = self.query_encoder.to(kwargs[\"rank\"])\n self.context_encoder = DDP(\n self.context_encoder, device_ids=[kwargs[\"rank\"]]\n )\n self.query_encoder = DDP(self.query_encoder, device_ids=[kwargs[\"rank\"]])\n self.device = kwargs[\"rank\"]\n else:\n self._move_model_to_device()\n\n train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)\n\n os.makedirs(output_dir, exist_ok=True)\n\n global_step, training_details = self.train(\n train_dataset,\n output_dir,\n show_running_loss=show_running_loss,\n eval_data=eval_data,\n additional_eval_passages=additional_eval_passages,\n verbose=verbose,\n **kwargs,\n )\n\n self.save_model(\n self.args.output_dir,\n context_model=self.context_encoder,\n query_model=self.query_encoder,\n )\n\n if verbose:\n logger.info(\n \" Training of {} model complete. Saved to {}.\".format(\n self.args.model_name, output_dir\n )\n )\n\n return global_step, training_details\n\n def train(\n self,\n train_dataset,\n output_dir,\n show_running_loss=True,\n eval_data=None,\n additional_eval_passages=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n \"\"\"\n\n context_model = self.context_encoder\n query_model = self.query_encoder\n args = self.args\n\n tb_writer = SummaryWriter(logdir=args.tensorboard_dir)\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset,\n sampler=train_sampler,\n batch_size=args.train_batch_size,\n num_workers=self.args.dataloader_num_workers,\n )\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = (\n args.max_steps\n // (len(train_dataloader) // args.gradient_accumulation_steps)\n + 1\n )\n else:\n t_total = (\n len(train_dataloader)\n // args.gradient_accumulation_steps\n * args.num_train_epochs\n )\n\n optimizer_grouped_parameters = self.get_optimizer_parameters(\n context_model, query_model, args\n )\n\n warmup_steps = math.ceil(t_total * args.warmup_ratio)\n args.warmup_steps = (\n warmup_steps if args.warmup_steps == 0 else args.warmup_steps\n )\n\n if args.optimizer == \"AdamW\":\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.learning_rate,\n eps=args.adam_epsilon,\n )\n elif args.optimizer == \"Adafactor\":\n optimizer = Adafactor(\n optimizer_grouped_parameters,\n lr=args.learning_rate,\n eps=args.adafactor_eps,\n clip_threshold=args.adafactor_clip_threshold,\n decay_rate=args.adafactor_decay_rate,\n beta1=args.adafactor_beta1,\n weight_decay=args.weight_decay,\n scale_parameter=args.adafactor_scale_parameter,\n relative_step=args.adafactor_relative_step,\n warmup_init=args.adafactor_warmup_init,\n )\n else:\n raise ValueError(\n \"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.\".format(\n args.optimizer\n )\n )\n\n scheduler = self.get_scheduler(optimizer, args, t_total)\n\n criterion = torch.nn.NLLLoss(reduction=\"mean\")\n\n if (\n args.model_name\n and os.path.isfile(os.path.join(args.model_name, \"optimizer.pt\"))\n and os.path.isfile(os.path.join(args.model_name, \"scheduler.pt\"))\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(\n torch.load(os.path.join(args.model_name, \"optimizer.pt\"))\n )\n scheduler.load_state_dict(\n torch.load(os.path.join(args.model_name, \"scheduler.pt\"))\n )\n\n if args.n_gpu > 1:\n context_model = torch.nn.DataParallel(context_model)\n query_model = torch.nn.DataParallel(query_model)\n\n logger.info(\" Training started\")\n\n global_step = 0\n training_progress_scores = None\n tr_loss, logging_loss = 0.0, 0.0\n context_model.zero_grad()\n query_model.zero_grad()\n train_iterator = trange(\n int(args.num_train_epochs), desc=\"Epoch\", disable=args.silent, mininterval=0\n )\n epoch_number = 0\n best_eval_metric = None\n early_stopping_counter = 0\n steps_trained_in_current_epoch = 0\n epochs_trained = 0\n\n if args.model_name and os.path.exists(args.model_name):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name.split(\"/\")[-1].split(\"-\")\n if len(checkpoint_suffix) > 2:\n checkpoint_suffix = checkpoint_suffix[1]\n else:\n checkpoint_suffix = checkpoint_suffix[-1]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (\n len(train_dataloader) // args.gradient_accumulation_steps\n )\n steps_trained_in_current_epoch = global_step % (\n len(train_dataloader) // args.gradient_accumulation_steps\n )\n\n logger.info(\n \" Continuing training from checkpoint, will skip to saved global_step\"\n )\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\n \" Will skip the first %d steps in the current epoch\",\n steps_trained_in_current_epoch,\n )\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n if args.evaluate_during_training:\n training_progress_scores = self._create_training_progress_scores(**kwargs)\n\n if args.wandb_project:\n wandb.init(\n project=args.wandb_project,\n config={**asdict(args)},\n **args.wandb_kwargs,\n )\n wandb.run._label(repo=\"simpletransformers\")\n wandb.watch(context_model)\n wandb.watch(query_model)\n\n if args.fp16:\n from torch.cuda import amp\n\n scaler = amp.GradScaler()\n\n for current_epoch in train_iterator:\n if args.train_context_encoder:\n context_model.train()\n else:\n context_model.eval()\n if args.train_query_encoder:\n query_model.train()\n else:\n query_model.eval()\n if epochs_trained > 0:\n epochs_trained -= 1\n continue\n train_iterator.set_description(\n f\"Epoch {epoch_number + 1} of {args.num_train_epochs}\"\n )\n batch_iterator = tqdm(\n train_dataloader,\n desc=f\"Running Epoch {epoch_number} of {args.num_train_epochs}\",\n disable=args.silent,\n mininterval=0,\n )\n for step, batch in enumerate(batch_iterator):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n # batch = tuple(t.to(device) for t in batch)\n\n context_inputs, query_inputs, labels = self._get_inputs_dict(batch)\n if args.fp16:\n with amp.autocast():\n loss, *_, correct_count = self._calculate_loss(\n context_model,\n query_model,\n context_inputs,\n query_inputs,\n labels,\n criterion,\n )\n else:\n loss, *_, correct_count = self._calculate_loss(\n context_model,\n query_model,\n context_inputs,\n query_inputs,\n labels,\n criterion,\n )\n\n if args.n_gpu > 1:\n loss = loss.mean()\n\n current_loss = loss.item()\n\n if show_running_loss:\n batch_iterator.set_description(\n f\"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f} Correct count: {correct_count}\"\n )\n\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n scaler.unscale_(optimizer)\n if args.optimizer == \"AdamW\":\n torch.nn.utils.clip_grad_norm_(\n context_model.parameters(), args.max_grad_norm\n )\n torch.nn.utils.clip_grad_norm_(\n query_model.parameters(), args.max_grad_norm\n )\n\n if args.fp16:\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n context_model.zero_grad()\n query_model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n tb_writer.add_scalar(\n \"lr\", scheduler.get_last_lr()[0], global_step\n )\n tb_writer.add_scalar(\n \"loss\",\n (tr_loss - logging_loss) / args.logging_steps,\n global_step,\n )\n logging_loss = tr_loss\n if args.wandb_project or self.is_sweeping:\n wandb.log(\n {\n \"Training loss\": current_loss,\n \"lr\": scheduler.get_last_lr()[0],\n \"global_step\": global_step,\n }\n )\n\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir_current = os.path.join(\n output_dir, \"checkpoint-{}\".format(global_step)\n )\n\n self.save_model(\n output_dir_current,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n )\n\n if args.evaluate_during_training and (\n args.evaluate_during_training_steps > 0\n and global_step % args.evaluate_during_training_steps == 0\n ):\n # Only evaluate when single GPU otherwise metrics may not average well\n results, *_ = self.eval_model(\n eval_data,\n additional_passages=additional_eval_passages,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n **kwargs,\n )\n for key, value in results.items():\n try:\n tb_writer.add_scalar(\n \"eval_{}\".format(key), value, global_step\n )\n except (NotImplementedError, AssertionError):\n pass\n\n output_dir_current = os.path.join(\n output_dir, \"checkpoint-{}\".format(global_step)\n )\n\n if args.save_eval_checkpoints:\n self.save_model(\n output_dir_current,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n results=results,\n )\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(\n args.output_dir, \"training_progress_scores.csv\"\n ),\n index=False,\n )\n\n if args.wandb_project or self.is_sweeping:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n results=results,\n )\n if best_eval_metric and args.early_stopping_metric_minimize:\n if (\n results[args.early_stopping_metric] - best_eval_metric\n < args.early_stopping_delta\n ):\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n results=results,\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if (\n early_stopping_counter\n < args.early_stopping_patience\n ):\n early_stopping_counter += 1\n if verbose:\n logger.info(\n f\" No improvement in {args.early_stopping_metric}\"\n )\n logger.info(\n f\" Current step: {early_stopping_counter}\"\n )\n logger.info(\n f\" Early stopping patience: {args.early_stopping_patience}\"\n )\n else:\n if verbose:\n logger.info(\n f\" Patience of {args.early_stopping_patience} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n else:\n if (\n results[args.early_stopping_metric] - best_eval_metric\n > args.early_stopping_delta\n ):\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n results=results,\n )\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if (\n early_stopping_counter\n < args.early_stopping_patience\n ):\n early_stopping_counter += 1\n if verbose:\n logger.info(\n f\" No improvement in {args.early_stopping_metric}\"\n )\n logger.info(\n f\" Current step: {early_stopping_counter}\"\n )\n logger.info(\n f\" Early stopping patience: {args.early_stopping_patience}\"\n )\n else:\n if verbose:\n logger.info(\n f\" Patience of {args.early_stopping_patience} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n context_model.train()\n query_model.train()\n\n epoch_number += 1\n output_dir_current = os.path.join(\n output_dir, \"checkpoint-{}-epoch-{}\".format(global_step, epoch_number)\n )\n\n if args.save_model_every_epoch or args.evaluate_during_training:\n os.makedirs(output_dir_current, exist_ok=True)\n\n if args.save_model_every_epoch:\n self.save_model(\n output_dir_current,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n )\n\n if args.evaluate_during_training and args.evaluate_each_epoch:\n results, *_ = self.eval_model(\n eval_data,\n additional_passages=additional_eval_passages,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n **kwargs,\n )\n\n if args.save_eval_checkpoints:\n self.save_model(\n output_dir_current, optimizer, scheduler, results=results\n )\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args.output_dir, \"training_progress_scores.csv\"),\n index=False,\n )\n\n if args.wandb_project or self.is_sweeping:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n results=results,\n )\n if best_eval_metric and args.early_stopping_metric_minimize:\n if (\n results[args.early_stopping_metric] - best_eval_metric\n < args.early_stopping_delta\n ):\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n results=results,\n )\n early_stopping_counter = 0\n else:\n if (\n args.use_early_stopping\n and args.early_stopping_consider_epochs\n ):\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(\n f\" No improvement in {args.early_stopping_metric}\"\n )\n logger.info(\n f\" Current step: {early_stopping_counter}\"\n )\n logger.info(\n f\" Early stopping patience: {args.early_stopping_patience}\"\n )\n else:\n if verbose:\n logger.info(\n f\" Patience of {args.early_stopping_patience} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n else:\n if (\n results[args.early_stopping_metric] - best_eval_metric\n > args.early_stopping_delta\n ):\n best_eval_metric = results[args.early_stopping_metric]\n if args.save_best_model:\n self.save_model(\n args.best_model_dir,\n optimizer,\n scheduler,\n context_model=context_model,\n query_model=query_model,\n results=results,\n )\n early_stopping_counter = 0\n else:\n if (\n args.use_early_stopping\n and args.early_stopping_consider_epochs\n ):\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(\n f\" No improvement in {args.early_stopping_metric}\"\n )\n logger.info(\n f\" Current step: {early_stopping_counter}\"\n )\n logger.info(\n f\" Early stopping patience: {args.early_stopping_patience}\"\n )\n else:\n if verbose:\n logger.info(\n f\" Patience of {args.early_stopping_patience} steps reached\"\n )\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n\n return (\n global_step,\n tr_loss / global_step\n if not self.args.evaluate_during_training\n else training_progress_scores,\n )\n\n def eval_model(\n self,\n eval_data,\n evaluate_with_all_passages=True,\n additional_passages=None,\n top_k_values=None,\n retrieve_n_docs=None,\n return_doc_dicts=True,\n passage_dataset=None,\n qa_evaluation=False,\n output_dir=None,\n verbose=True,\n silent=False,\n **kwargs,\n ):\n \"\"\"\n Evaluates the model on eval_data. Saves results to output_dir.\n\n Args:\n eval_data: Pandas DataFrame containing the 2 columns - `query_text`, 'gold_passage'.\n - `query_text`: The Query text sequence\n - `gold_passage`: The gold passage text sequence\n If `use_hf_datasets` is True, then this may also be the path to a TSV file with the same columns.\n evaluate_with_all_passages: If True, evaluate with all passages. If False, evaluate only with in-batch negatives.\n additional_passages: Additional passages to be used during evaluation.\n This may be a list of passages, a pandas DataFrame with the column \"passages\", or a TSV file with the column \"passages\".\n top_k_values: List of top-k values to be used for evaluation.\n retrieve_n_docs: Number of documents to retrieve for each query. Overrides `args.retrieve_n_docs` for this evaluation.\n return_doc_dicts: If True, return the doc dicts for the retrieved passages. Setting this to False can speed up evaluation.\n passage_dataset: Path to a saved Huggingface dataset (containing generated embeddings) for both the eval_data and additional passages\n qa_evaluation: If True, evaluation is done by checking if the retrieved passages contain the gold passage.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n silent: If silent, tqdm progress bars will be hidden.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs\n will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.\n Returns:\n results: Dictionary containing evaluation results.\n \"\"\" # noqa: ignore flake8\"\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n self._move_model_to_device()\n\n if self.prediction_passages is None:\n passage_dataset = get_evaluation_passage_dataset(\n eval_data,\n additional_passages,\n self.context_encoder,\n self.context_tokenizer,\n self.context_config,\n self.args,\n self.device,\n passage_dataset=passage_dataset,\n )\n else:\n passage_dataset = self.prediction_passages\n\n eval_dataset, gold_passages = load_hf_dataset(\n eval_data,\n self.context_tokenizer,\n self.query_tokenizer,\n self.args,\n evaluate=True,\n )\n\n result, doc_ids, doc_vectors, doc_dicts = self.evaluate(\n eval_dataset,\n gold_passages,\n evaluate_with_all_passages,\n passage_dataset,\n qa_evaluation,\n top_k_values,\n return_doc_dicts,\n output_dir,\n verbose=verbose,\n silent=silent,\n retrieve_n_docs=retrieve_n_docs,\n **kwargs,\n )\n\n if verbose:\n logger.info(result)\n\n return result, doc_ids, doc_vectors, doc_dicts\n\n def evaluate(\n self,\n eval_dataset,\n gold_passages,\n evaluate_with_all_passages=True,\n passage_dataset=None,\n qa_evaluation=False,\n top_k_values=None,\n return_doc_dicts=True,\n output_dir=None,\n verbose=True,\n silent=False,\n retrieve_n_docs=None,\n **kwargs,\n ):\n \"\"\"\n Evaluates the model on eval_dataset.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n \"\"\"\n\n context_model = self.context_encoder\n query_model = self.query_encoder\n args = self.args\n eval_output_dir = output_dir\n\n results = {}\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size\n )\n\n if args.n_gpu > 1:\n context_model = torch.nn.DataParallel(context_model)\n query_model = torch.nn.DataParallel(query_model)\n\n nb_eval_steps = 0\n eval_loss = 0\n context_model.eval()\n query_model.eval()\n\n criterion = torch.nn.NLLLoss(reduction=\"mean\")\n\n if self.args.fp16:\n from torch.cuda import amp\n\n all_query_embeddings = np.zeros(\n (\n len(eval_dataset),\n self.query_config.hidden_size\n if \"projection_dim\" not in self.query_config.to_dict()\n or not self.query_config.projection_dim\n else self.query_config.projection_dim,\n )\n )\n for i, batch in enumerate(\n tqdm(\n eval_dataloader,\n disable=args.silent or silent,\n desc=\"Running Evaluation\",\n )\n ):\n # batch = tuple(t.to(device) for t in batch)\n\n context_inputs, query_inputs, labels = self._get_inputs_dict(\n batch, evaluate=True\n )\n with torch.no_grad():\n if self.args.fp16:\n with amp.autocast():\n (\n tmp_eval_loss,\n _,\n query_outputs,\n correct_count,\n ) = self._calculate_loss(\n context_model,\n query_model,\n context_inputs,\n query_inputs,\n labels,\n criterion,\n )\n else:\n (\n tmp_eval_loss,\n _,\n query_outputs,\n correct_count,\n ) = self._calculate_loss(\n context_model,\n query_model,\n context_inputs,\n query_inputs,\n labels,\n criterion,\n )\n if self.args.n_gpu > 1:\n tmp_eval_loss = tmp_eval_loss.mean()\n\n eval_loss += tmp_eval_loss.item()\n all_query_embeddings[\n i * args.eval_batch_size : (i + 1) * args.eval_batch_size\n ] = (query_outputs.cpu().detach().numpy())\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n\n results[\"eval_loss\"] = eval_loss\n\n if evaluate_with_all_passages:\n doc_ids, doc_vectors, doc_dicts = self.retrieve_docs_from_query_embeddings(\n all_query_embeddings,\n passage_dataset,\n retrieve_n_docs,\n return_doc_dicts=True,\n )\n\n doc_texts = [doc_dict[\"passages\"] for doc_dict in doc_dicts]\n\n scores = self.compute_metrics(\n gold_passages,\n doc_texts,\n self.args,\n qa_evaluation,\n top_k_values,\n **kwargs,\n )\n\n results.update(scores)\n\n output_eval_file = os.path.join(eval_output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n if args.wandb_project:\n if not wandb.setup().settings.sweep_id:\n logger.info(\" Initializing WandB run for evaluation.\")\n wandb.init(\n project=args.wandb_project,\n config={**asdict(args)},\n **args.wandb_kwargs,\n )\n wandb.run._label(repo=\"simpletransformers\")\n self.wandb_run_id = wandb.run.id\n wandb.log(results)\n\n return results, doc_ids, doc_vectors, doc_dicts\n\n def predict(\n self,\n to_predict,\n prediction_passages=None,\n retrieve_n_docs=None,\n passages_only=False,\n ):\n \"\"\"\n Retrieve the relevant documents from the prediction passages for a list of queries.\n\n Args:\n to_predict (list): A list of strings containing the queries to be predicted.\n prediction_passages (Union[str, DataFrame], optional): Path to a directory containing a passage dataset, a JSON/TSV file containing the passages, or a Pandas DataFrame. Defaults to None.\n retrieve_n_docs (int, optional): Number of docs to retrieve per query. Defaults to None.\n\n Raises:\n ValueError: [description]\n\n Returns:\n passages: List of lists containing the retrieved passages per query. (Shape: `(len(to_predict), retrieve_n_docs)`)\n doc_ids: List of lists containing the retrieved doc ids per query. (Shape: `(len(to_predict), retrieve_n_docs)`)\n doc_vectors: List of lists containing the retrieved doc vectors per query. (Shape: `(len(to_predict), retrieve_n_docs)`)\n doc_dicts: List of dicts containing the retrieved doc dicts per query.\n \"\"\" # noqa: ignore flake8\"\n if self.prediction_passages is None:\n if prediction_passages is None:\n raise ValueError(\n \"prediction_passages cannot be None if the model does not contain a predicition passage index.\"\n )\n else:\n self.context_encoder.to(self.device)\n self.context_encoder.eval()\n self.prediction_passages = self.get_updated_prediction_passages(\n prediction_passages\n )\n self.context_encoder.to(self.device)\n\n all_query_embeddings = np.zeros(\n (\n len(to_predict),\n self.query_config.hidden_size\n if \"projection_dim\" not in self.query_config.to_dict()\n or not self.query_config.projection_dim\n else self.query_config.projection_dim,\n )\n )\n\n query_model = self.query_encoder\n query_model.to(self.device)\n\n if self.args.n_gpu > 1:\n query_model = torch.nn.DataParallel(query_model)\n\n if self.args.fp16:\n from torch.cuda import amp\n\n query_model.eval()\n\n # Batching\n for i, batch in tqdm(\n enumerate(\n [\n to_predict[i : i + self.args.eval_batch_size]\n for i in range(0, len(to_predict), self.args.eval_batch_size)\n ]\n ),\n desc=\"Generating query embeddings\",\n disable=self.args.silent,\n ):\n query_batch = self.query_tokenizer(\n batch,\n max_length=self.args.max_seq_length,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"pt\",\n )\n\n query_inputs = {\n \"input_ids\": query_batch[\"input_ids\"].to(self.device),\n \"attention_mask\": query_batch[\"attention_mask\"].to(self.device),\n }\n\n with torch.no_grad():\n if self.args.fp16:\n with amp.autocast():\n query_outputs = query_model(**query_inputs).pooler_output\n else:\n query_outputs = query_model(**query_inputs).pooler_output\n\n all_query_embeddings[\n i * self.args.eval_batch_size : (i + 1) * self.args.eval_batch_size\n ] = (query_outputs.cpu().detach().numpy())\n\n if not passages_only:\n doc_ids, doc_vectors, doc_dicts = self.retrieve_docs_from_query_embeddings(\n all_query_embeddings, self.prediction_passages, retrieve_n_docs\n )\n passages = [d[\"passages\"] for d in doc_dicts]\n\n return passages, doc_ids, doc_vectors, doc_dicts\n else:\n passages = self.retrieve_docs_from_query_embeddings(\n all_query_embeddings,\n self.prediction_passages,\n retrieve_n_docs,\n passages_only=True,\n )\n return passages\n\n def compute_metrics(\n self,\n gold_passages,\n doc_texts,\n args,\n qa_evaluation=False,\n top_k_values=None,\n **kwargs,\n ):\n \"\"\"\n Computes the metrics for the evaluation data.\n \"\"\"\n if top_k_values is None:\n top_k_values = [1, 2, 3, 5, 10]\n\n top_k_values = [k for k in top_k_values if k <= args.retrieve_n_docs]\n\n relevance_list = np.zeros((len(gold_passages), args.retrieve_n_docs))\n for i, (docs, truth) in enumerate(zip(doc_texts, gold_passages)):\n for j, d in enumerate(docs):\n if qa_evaluation:\n if truth.strip().lower().replace(\" \", \"\").translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ) in d.strip().lower().replace(\" \", \"\").translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ):\n relevance_list[i, j] = 1\n break\n else:\n if d.strip().lower().translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ) == truth.strip().lower().translate(\n str.maketrans(\"\", \"\", string.punctuation)\n ):\n relevance_list[i, j] = 1\n break\n\n mrr = {\n f\"mrr@{k}\": mean_reciprocal_rank_at_k(relevance_list, k)\n for k in top_k_values\n }\n\n top_k_accuracy = {\n f\"top_{k}_accuracy\": np.mean(np.sum(relevance_list[:, :k], axis=-1))\n for k in top_k_values\n }\n\n extra_metrics = {}\n for metric, func in kwargs.items():\n extra_metrics[metric] = func(gold_passages, doc_texts)\n\n results = {**mrr, **top_k_accuracy, **extra_metrics}\n\n return results\n\n def retrieve_docs_from_query_embeddings(\n self,\n query_embeddings,\n passage_dataset,\n retrieve_n_docs=None,\n return_doc_dicts=True,\n passages_only=False,\n ):\n \"\"\"\n Retrieves documents from the index using the given query embeddings.\n \"\"\"\n args = self.args\n if retrieve_n_docs is None:\n retrieve_n_docs = args.retrieve_n_docs\n\n query_embeddings_batched = [\n query_embeddings[i : i + args.retrieval_batch_size]\n for i in range(0, len(query_embeddings), args.retrieval_batch_size)\n ]\n\n if passages_only:\n passages = []\n for i, query_embeddings in enumerate(\n tqdm(\n query_embeddings_batched,\n desc=\"Retrieving docs\",\n disable=args.silent,\n )\n ):\n _, _, doc_dicts_batch = passage_dataset.get_top_docs(\n query_embeddings.astype(np.float32), retrieve_n_docs\n )\n\n passages.extend([d[\"passages\"] for d in doc_dicts_batch])\n\n return passages\n else:\n ids_batched = np.zeros((len(query_embeddings), retrieve_n_docs))\n vectors_batched = np.zeros(\n (\n len(query_embeddings),\n retrieve_n_docs,\n self.context_config.hidden_size\n if \"projection_dim\" not in self.context_config.to_dict()\n or not self.context_config.projection_dim\n else self.context_config.projection_dim,\n )\n )\n doc_dicts = []\n\n for i, query_embeddings in enumerate(\n tqdm(\n query_embeddings_batched,\n desc=\"Retrieving docs\",\n disable=args.silent,\n )\n ):\n ids, vectors, doc_dicts_batch = passage_dataset.get_top_docs(\n query_embeddings.astype(np.float32), retrieve_n_docs\n )\n ids_batched[\n i * args.retrieval_batch_size : (i * args.retrieval_batch_size)\n + len(ids)\n ] = ids\n vectors_batched[\n i * args.retrieval_batch_size : (i * args.retrieval_batch_size)\n + len(ids)\n ] = vectors\n\n if return_doc_dicts:\n doc_dicts.extend(doc_dicts_batch)\n\n if not return_doc_dicts:\n doc_dicts = None\n\n return ids_batched, vectors_batched, doc_dicts\n\n def build_hard_negatives(\n self,\n queries,\n passage_dataset=None,\n retrieve_n_docs=None,\n write_to_disk=True,\n hard_negatives_save_file_path=None,\n ):\n hard_negatives, *_ = self.predict(\n to_predict=queries,\n prediction_passages=passage_dataset,\n retrieve_n_docs=retrieve_n_docs,\n )\n\n if retrieve_n_docs is None:\n retrieve_n_docs = self.args.retrieve_n_docs\n\n column_names = [f\"hard_negatives_{i}\" for i in range(retrieve_n_docs)]\n\n # Build hard negative df from list of lists\n hard_negative_df = pd.DataFrame(hard_negatives, columns=column_names)\n\n if write_to_disk:\n if hard_negatives_save_file_path is None:\n os.makedirs(self.args.output_dir, exist_ok=True)\n hard_negatives_save_file_path = os.path.join(\n self.args.output_dir, \"hard_negatives.tsv\"\n )\n hard_negative_df.to_csv(\n hard_negatives_save_file_path,\n index=False,\n sep=\"\\t\",\n )\n\n return hard_negative_df\n\n def load_and_cache_examples(\n self, data, evaluate=False, no_cache=False, verbose=True, silent=False\n ):\n \"\"\"\n Creates a IRDataset from data\n \"\"\"\n\n if not no_cache:\n no_cache = self.args.no_cache\n\n if not no_cache:\n os.makedirs(self.args.cache_dir, exist_ok=True)\n\n if self.args.use_hf_datasets:\n dataset = load_hf_dataset(\n data, self.context_tokenizer, self.query_tokenizer, self.args\n )\n\n return dataset\n else:\n # Retrieval models can only be used with hf datasets\n raise ValueError(\"Retrieval models can only be used with hf datasets.\")\n\n def get_optimizer_parameters(self, context_model, query_model, args):\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n\n optimizer_grouped_parameters = []\n custom_parameter_names = set()\n for group in self.args.custom_parameter_groups:\n params = group.pop(\"params\")\n custom_parameter_names.update(params)\n param_group = {**group}\n param_group[\"params\"] = [\n p for n, p in context_model.named_parameters() if n in params\n ]\n param_group[\"params\"].extend(\n [p for n, p in query_model.named_parameters() if n in params]\n )\n optimizer_grouped_parameters.append(param_group)\n\n for group in self.args.custom_layer_parameters:\n layer_number = group.pop(\"layer\")\n layer = f\"layer.{layer_number}.\"\n group_d = {**group}\n group_nd = {**group}\n group_nd[\"weight_decay\"] = 0.0\n params_d = []\n params_nd = []\n for n, p in context_model.named_parameters():\n if n not in custom_parameter_names and layer in n:\n if any(nd in n for nd in no_decay):\n params_nd.append(p)\n else:\n params_d.append(p)\n custom_parameter_names.add(n)\n for n, p in query_model.named_parameters():\n if n not in custom_parameter_names and layer in n:\n if any(nd in n for nd in no_decay):\n params_nd.append(p)\n else:\n params_d.append(p)\n custom_parameter_names.add(n)\n group_d[\"params\"] = params_d\n group_nd[\"params\"] = params_nd\n\n optimizer_grouped_parameters.append(group_d)\n optimizer_grouped_parameters.append(group_nd)\n\n if not self.args.train_custom_parameters_only:\n if self.args.train_context_encoder:\n optimizer_grouped_parameters.extend(\n [\n {\n \"params\": [\n p\n for n, p in context_model.named_parameters()\n if n not in custom_parameter_names\n and not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in context_model.named_parameters()\n if n not in custom_parameter_names\n and any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n )\n if self.args.train_query_encoder:\n optimizer_grouped_parameters.extend(\n [\n {\n \"params\": [\n p\n for n, p in query_model.named_parameters()\n if n not in custom_parameter_names\n and not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in query_model.named_parameters()\n if n not in custom_parameter_names\n and any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n )\n\n return optimizer_grouped_parameters\n\n def get_scheduler(self, optimizer, args, t_total):\n if args.scheduler == \"constant_schedule\":\n scheduler = get_constant_schedule(optimizer)\n\n elif args.scheduler == \"constant_schedule_with_warmup\":\n scheduler = get_constant_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps\n )\n\n elif args.scheduler == \"linear_schedule_with_warmup\":\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n )\n\n elif args.scheduler == \"cosine_schedule_with_warmup\":\n scheduler = get_cosine_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n num_cycles=args.cosine_schedule_num_cycles,\n )\n\n elif args.scheduler == \"cosine_with_hard_restarts_schedule_with_warmup\":\n scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n num_cycles=args.cosine_schedule_num_cycles,\n )\n\n elif args.scheduler == \"polynomial_decay_schedule_with_warmup\":\n scheduler = get_polynomial_decay_schedule_with_warmup(\n optimizer,\n num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total,\n lr_end=args.polynomial_decay_schedule_lr_end,\n power=args.polynomial_decay_schedule_power,\n )\n else:\n raise ValueError(\"{} is not a valid scheduler.\".format(args.scheduler))\n\n return scheduler\n\n def get_updated_prediction_passages(self, prediction_passages):\n \"\"\"\n Update the model passage dataset with a new passage dataset.\n This is typycally only useful for prediction.\n\n Args:\n prediction_passages (str): Path to new passage dataset.\n \"\"\"\n prediction_passages = get_prediction_passage_dataset(\n prediction_passages,\n self.context_encoder,\n self.context_tokenizer,\n self.context_config,\n self.args,\n self.device,\n )\n\n return prediction_passages\n\n def _calculate_loss(\n self,\n context_model,\n query_model,\n context_inputs,\n query_inputs,\n labels,\n criterion,\n ):\n context_outputs = context_model(**context_inputs).pooler_output\n query_outputs = query_model(**query_inputs).pooler_output\n\n context_outputs = torch.nn.functional.dropout(context_outputs, p=0.1)\n query_outputs = torch.nn.functional.dropout(query_outputs, p=0.1)\n\n similarity_score = torch.matmul(query_outputs, context_outputs.t())\n softmax_score = torch.nn.functional.log_softmax(similarity_score, dim=-1)\n\n criterion = torch.nn.NLLLoss(reduction=\"mean\")\n\n loss = criterion(softmax_score, labels)\n\n max_score, max_idxs = torch.max(softmax_score, 1)\n correct_predictions_count = (\n (max_idxs == torch.tensor(labels)).sum().cpu().detach().numpy().item()\n )\n\n return loss, context_outputs, query_outputs, correct_predictions_count\n\n def _get_inputs_dict(self, batch, evaluate=False):\n device = self.device\n\n labels = [i for i in range(len(batch[\"context_ids\"]))]\n labels = torch.tensor(labels, dtype=torch.long)\n\n if not evaluate:\n labels = labels.to(device)\n if self.args.hard_negatives:\n shuffled_indices = torch.randperm(len(labels))\n context_ids = torch.cat(\n [\n batch[\"context_ids\"],\n batch[\"hard_negative_ids\"][shuffled_indices],\n ],\n dim=0,\n )\n context_masks = torch.cat(\n [\n batch[\"context_mask\"],\n batch[\"hard_negatives_mask\"][shuffled_indices],\n ],\n dim=0,\n )\n else:\n context_ids = batch[\"context_ids\"]\n context_masks = batch[\"context_mask\"]\n context_input = {\n \"input_ids\": context_ids.to(device),\n \"attention_mask\": context_masks.to(device),\n }\n query_input = {\n \"input_ids\": batch[\"query_ids\"].to(device),\n \"attention_mask\": batch[\"query_mask\"].to(device),\n }\n else:\n shuffled_indices = torch.randperm(len(labels))\n\n labels = labels[shuffled_indices].to(device)\n\n if self.args.hard_negatives:\n context_ids = torch.cat(\n [\n batch[\"context_ids\"][shuffled_indices],\n batch[\"hard_negative_ids\"],\n ],\n dim=0,\n )\n context_masks = torch.cat(\n [\n batch[\"context_mask\"][shuffled_indices],\n batch[\"hard_negatives_mask\"],\n ],\n dim=0,\n )\n else:\n context_ids = batch[\"context_ids\"][shuffled_indices]\n context_masks = batch[\"context_mask\"][shuffled_indices]\n\n context_input = {\n \"input_ids\": context_ids.to(device),\n \"attention_mask\": context_masks.to(device),\n }\n query_input = {\n \"input_ids\": batch[\"query_ids\"].to(device),\n \"attention_mask\": batch[\"query_mask\"].to(device),\n }\n\n return context_input, query_input, labels\n\n def _create_training_progress_scores(self, **kwargs):\n # TODO: top_k_values should be part of the model. Probably.\n top_k_values = [1, 2, 3, 5, 10]\n extra_metrics = {key: [] for key in kwargs}\n training_progress_scores = {\n \"global_step\": [],\n \"eval_loss\": [],\n \"train_loss\": [],\n **extra_metrics,\n }\n training_progress_scores = {\n **training_progress_scores,\n **{f\"mrr@{k}\": [] for k in top_k_values},\n }\n training_progress_scores = {\n **training_progress_scores,\n **{f\"top_{k}_accuracy\": [] for k in top_k_values},\n }\n\n return training_progress_scores\n\n def _get_last_metrics(self, metric_values):\n return {metric: values[-1] for metric, values in metric_values.items()}\n\n def save_model(\n self,\n output_dir=None,\n optimizer=None,\n scheduler=None,\n context_model=None,\n query_model=None,\n results=None,\n ):\n if not output_dir:\n output_dir = self.args.output_dir\n\n if context_model and query_model and not self.args.no_save:\n os.makedirs(output_dir, exist_ok=True)\n\n logger.info(f\"Saving model into {output_dir}\")\n # Take care of distributed/parallel training\n context_model_to_save = (\n context_model.module\n if hasattr(context_model, \"module\")\n else context_model\n )\n query_model_to_save = (\n query_model.module if hasattr(query_model, \"module\") else query_model\n )\n self.save_model_args(output_dir)\n\n os.makedirs(os.path.join(output_dir, \"context_encoder\"), exist_ok=True)\n os.makedirs(os.path.join(output_dir, \"query_encoder\"), exist_ok=True)\n self.context_config.save_pretrained(\n os.path.join(output_dir, \"context_encoder\")\n )\n self.query_config.save_pretrained(os.path.join(output_dir, \"query_encoder\"))\n\n context_model_to_save.save_pretrained(\n os.path.join(output_dir, \"context_encoder\")\n )\n query_model_to_save.save_pretrained(\n os.path.join(output_dir, \"query_encoder\")\n )\n\n self.context_tokenizer.save_pretrained(\n os.path.join(output_dir, \"context_encoder\")\n )\n self.query_tokenizer.save_pretrained(\n os.path.join(output_dir, \"query_encoder\")\n )\n\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n if optimizer and scheduler and self.args.save_optimizer_and_scheduler:\n torch.save(\n optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\")\n )\n torch.save(\n scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\")\n )\n\n if results:\n os.makedirs(output_dir, exist_ok=True)\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n def _move_model_to_device(self):\n self.context_encoder.to(self.device)\n self.query_encoder.to(self.device)\n\n def save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n self.args.save(output_dir)\n\n def _load_model_args(self, input_dir):\n args = RetrievalArgs()\n args.load(input_dir)\n return args\n\n def get_named_parameters(self):\n return [n for n, p in self.context_encoder.named_parameters()] + [\n n for n, p in self.query_encoder.named_parameters()\n ]\n" ]
[ [ "torch.max", "torch.nn.functional.dropout", "torch.cat", "torch.utils.data.DataLoader", "pandas.DataFrame", "torch.cuda.amp.autocast", "torch.no_grad", "torch.cuda.is_available", "torch.cuda.manual_seed_all", "torch.device", "torch.tensor", "torch.nn.NLLLoss", "torch.cuda.amp.GradScaler", "numpy.sum", "torch.nn.parallel.DistributedDataParallel", "torch.nn.functional.log_softmax", "numpy.random.seed", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.DataParallel", "torch.multiprocessing.set_sharing_strategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
RCBiczok/cmapPy
[ "580b0d656892e72f58047666a94e2769ddf63b3f" ]
[ "cmapPy/math/tests/test_agg_wt_avg.py" ]
[ "import unittest\nimport logging\nimport pandas as pd\nimport cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger\nimport cmapPy.math.agg_wt_avg as agg_wt_avg\n\nlogger = logging.getLogger(setup_logger.LOGGER_NAME)\n\ntest_mat = pd.DataFrame({'A':[1,2,3], 'B': [2,8,6], 'C': [6,8,9]})\ntest_mat_corr = test_mat.corr()\n\n\nclass TestAggWtAvg(unittest.TestCase):\n def test_calculate_weights(self):\n # happy path\n raw_weights, weights = agg_wt_avg.calculate_weights(test_mat_corr, min_wt=0.1)\n self.assertTrue(len(weights == 3))\n self.assertTrue(raw_weights.tolist() == [0.8183, 0.7202, 0.8838])\n self.assertTrue(weights.tolist() == [0.3378, 0.2973, 0.3649])\n\n # test that min_wt works\n raw_weights2, weights2 = agg_wt_avg.calculate_weights(test_mat_corr, min_wt=0.85)\n self.assertEqual(raw_weights2[1], 0.85)\n\n def test_get_upper_triangle(self):\n # happy path\n upper_tri_df = agg_wt_avg.get_upper_triangle(test_mat_corr)\n self.assertTrue(upper_tri_df['corr'].tolist() == [0.6547, 0.982, 0.7857])\n self.assertTrue(upper_tri_df['rid'].tolist() == ['B', 'C', 'C'])\n self.assertTrue(upper_tri_df['index'].tolist() == ['A', 'A', 'B'])\n\n def test_agg_wt_avg(self):\n # use spearman\n out_sig, upper_tri_df, raw_weights, weights = agg_wt_avg.agg_wt_avg(test_mat)\n self.assertTrue(out_sig.tolist() == [3.125, 5.75, 6.0])\n self.assertAlmostEqual(upper_tri_df.loc[upper_tri_df.index[0], \"corr\"], 0.5)\n self.assertAlmostEqual(raw_weights[0], 0.75)\n self.assertAlmostEqual(weights[0], 0.375)\n\n # test on a single signature\n out_sig2, _, _, _ = agg_wt_avg.agg_wt_avg(test_mat[[\"C\"]])\n pd.util.testing.assert_frame_equal(out_sig2, test_mat[[\"C\"]])\n\n # should break if empty input\n with self.assertRaises(AssertionError) as e:\n agg_wt_avg.agg_wt_avg(test_mat[[]])\n self.assertIn(\"mat is empty!\", str(e.exception))\n\nif __name__ == \"__main__\":\n setup_logger.setup(verbose=True)\n unittest.main()\n\n" ]
[ [ "pandas.util.testing.assert_frame_equal", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
aicentral/pytorch_cluster
[ "75216b3c22278a3cbc906a1e83f6e4710ff58b41" ]
[ "torch_cluster/knn.py" ]
[ "from typing import Optional\n\nimport torch\n\n\[email protected]\ndef knn(x: torch.Tensor, y: torch.Tensor, k: int,\n batch_x: Optional[torch.Tensor] = None,\n batch_y: Optional[torch.Tensor] = None, cosine: bool = False,\n num_workers: int = 1) -> torch.Tensor:\n r\"\"\"Finds for each element in :obj:`y` the :obj:`k` nearest points in\n :obj:`x`.\n\n Args:\n x (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n y (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{M \\times F}`.\n k (int): The number of neighbors.\n batch_x (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. :obj:`batch_x` needs to be sorted.\n (default: :obj:`None`)\n batch_y (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^M`, which assigns each\n node to a specific example. :obj:`batch_y` needs to be sorted.\n (default: :obj:`None`)\n cosine (boolean, optional): If :obj:`True`, will use the Cosine\n distance instead of the Euclidean distance to find nearest\n neighbors. (default: :obj:`False`)\n num_workers (int): Number of workers to use for computation. Has no\n effect in case :obj:`batch_x` or :obj:`batch_y` is not\n :obj:`None`, or the input lies on the GPU. (default: :obj:`1`)\n\n :rtype: :class:`LongTensor`\n\n .. code-block:: python\n\n import torch\n from torch_cluster import knn\n\n x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch_x = torch.tensor([0, 0, 0, 0])\n y = torch.Tensor([[-1, 0], [1, 0]])\n batch_x = torch.tensor([0, 0])\n assign_index = knn(x, y, 2, batch_x, batch_y)\n \"\"\"\n\n x = x.view(-1, 1) if x.dim() == 1 else x\n y = y.view(-1, 1) if y.dim() == 1 else y\n x, y = x.contiguous(), y.contiguous()\n\n ptr_x: Optional[torch.Tensor] = None\n if batch_x is not None:\n assert x.size(0) == batch_x.numel()\n batch_size = int(batch_x.max()) + 1\n\n deg = x.new_zeros(batch_size, dtype=torch.long)\n deg.scatter_add_(0, batch_x, torch.ones_like(batch_x))\n\n ptr_x = deg.new_zeros(batch_size + 1)\n torch.cumsum(deg, 0, out=ptr_x[1:])\n\n ptr_y: Optional[torch.Tensor] = None\n if batch_y is not None:\n assert y.size(0) == batch_y.numel()\n batch_size = int(batch_y.max()) + 1\n\n deg = y.new_zeros(batch_size, dtype=torch.long)\n deg.scatter_add_(0, batch_y, torch.ones_like(batch_y))\n\n ptr_y = deg.new_zeros(batch_size + 1)\n torch.cumsum(deg, 0, out=ptr_y[1:])\n\n return torch.ops.torch_cluster.knn(x, y, ptr_x, ptr_y, k, cosine,\n num_workers)\n\n\[email protected]\ndef knn_graph(x: torch.Tensor, k: int, batch: Optional[torch.Tensor] = None,\n loop: bool = False, flow: str = 'source_to_target',\n cosine: bool = False, num_workers: int = 1) -> torch.Tensor:\n r\"\"\"Computes graph edges to the nearest :obj:`k` points.\n\n Args:\n x (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n k (int): The number of neighbors.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. :obj:`batch` needs to be sorted.\n (default: :obj:`None`)\n loop (bool, optional): If :obj:`True`, the graph will contain\n self-loops. (default: :obj:`False`)\n flow (string, optional): The flow direction when used in combination\n with message passing (:obj:`\"source_to_target\"` or\n :obj:`\"target_to_source\"`). (default: :obj:`\"source_to_target\"`)\n cosine (boolean, optional): If :obj:`True`, will use the Cosine\n distance instead of Euclidean distance to find nearest neighbors.\n (default: :obj:`False`)\n num_workers (int): Number of workers to use for computation. Has no\n effect in case :obj:`batch` is not :obj:`None`, or the input lies\n on the GPU. (default: :obj:`1`)\n\n :rtype: :class:`LongTensor`\n\n .. code-block:: python\n\n import torch\n from torch_cluster import knn_graph\n\n x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch = torch.tensor([0, 0, 0, 0])\n edge_index = knn_graph(x, k=2, batch=batch, loop=False)\n \"\"\"\n\n assert flow in ['source_to_target', 'target_to_source']\n edge_index = knn(x, x, k if loop else k + 1, batch, batch, cosine,\n num_workers)\n\n if flow == 'source_to_target':\n row, col = edge_index[1], edge_index[0]\n else:\n row, col = edge_index[0], edge_index[1]\n\n if not loop:\n mask = row != col\n row, col = row[mask], col[mask]\n\n return torch.stack([row, col], dim=0)\n" ]
[ [ "torch.stack", "torch.cumsum", "torch.ops.torch_cluster.knn", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Asap7772/rail-rl-franka-eval
[ "4bf99072376828193d05b53cf83c7e8f4efbd3ba", "4bf99072376828193d05b53cf83c7e8f4efbd3ba", "4bf99072376828193d05b53cf83c7e8f4efbd3ba", "4bf99072376828193d05b53cf83c7e8f4efbd3ba", "4bf99072376828193d05b53cf83c7e8f4efbd3ba", "4bf99072376828193d05b53cf83c7e8f4efbd3ba", "4bf99072376828193d05b53cf83c7e8f4efbd3ba" ]
[ "railrl/torch/mpc/controller.py", "experiments/avi/eric_grasp_sac_pixel.py", "experiments/ashvin/vae/old/pointmass/test_vae_goal2.py", "railrl/torch/ddpg/multi_step_ql.py", "visualization/grill/pick_place_baselines.py", "scripts/compute_rewards_on_video.py", "experiments/sac/profile.py" ]
[ "import numpy as np\nfrom torch import optim\nimport torch\n\nfrom railrl.policies.base import ExplorationPolicy\nfrom railrl.state_distance.policies import UniversalPolicy, \\\n SampleBasedUniversalPolicy\nfrom railrl.state_distance.util import merge_into_flat_obs, split_flat_obs\nfrom railrl.torch.core import PyTorchModule\nimport railrl.torch.pytorch_util as ptu\nimport matplotlib.pyplot as plt\n\n\nclass MPCController(PyTorchModule, ExplorationPolicy):\n def __init__(\n self,\n env,\n dynamics_model,\n cost_fn,\n num_simulated_paths=10000,\n mpc_horizon=15,\n ):\n \"\"\"\n Optimization is done by a shooting method.\n\n :param env:\n :param dynamics_model: Dynamics model. See dagger/model.py\n :param cost_fn: Function of the form:\n\n ```\n def cost_fn(self, states, actions, next_states):\n :param states: (BATCH_SIZE x state_dim) numpy array\n :param actions: (BATCH_SIZE x action_dim) numpy array\n :param next_states: (BATCH_SIZE x state_dim) numpy array\n :return: (BATCH_SIZE, ) numpy array\n ```\n :param num_simulated_paths: How many rollouts to do internally.\n :param mpc_horizon: How long to plan for.\n \"\"\"\n assert mpc_horizon >= 1\n super().__init__()\n self.env = env\n self.dynamics_model = dynamics_model\n self.cost_fn = cost_fn\n self.num_simulated_paths = num_simulated_paths\n self.mpc_horizon = mpc_horizon\n self.action_low = self.env.action_space.low\n self.action_high = self.env.action_space.high\n self.action_dim = self.env.action_space.low.shape[0]\n fig, (self.ax1, self.ax2) = plt.subplots(1, 2)\n\n def forward(self, *input):\n raise NotImplementedError()\n\n def expand_np_to_var(self, array):\n array_expanded = np.repeat(\n np.expand_dims(array, 0),\n self.num_simulated_paths,\n axis=0\n )\n return ptu.np_to_var(array_expanded, requires_grad=False)\n\n def sample_actions(self):\n return np.random.uniform(\n self.action_low,\n self.action_high,\n (self.num_simulated_paths, self.action_dim)\n )\n\n def get_action(self, obs):\n sampled_actions = self.sample_actions()\n first_sampled_actions = sampled_actions.copy()\n all_actions_np = [first_sampled_actions]\n actions = ptu.np_to_var(sampled_actions)\n next_obs = self.expand_np_to_var(obs)\n all_obs_torch = [next_obs]\n costs = 0\n all_costs = []\n for i in range(self.mpc_horizon):\n curr_obs = next_obs\n if i > 0:\n sampled_actions = self.sample_actions()\n all_actions_np.append(sampled_actions)\n actions = ptu.np_to_var(sampled_actions)\n next_obs = curr_obs + self.dynamics_model(curr_obs, actions)\n all_obs_torch.append(next_obs)\n new_costs = self.cost_fn(\n ptu.get_numpy(curr_obs),\n ptu.get_numpy(actions),\n ptu.get_numpy(next_obs),\n )\n costs = costs + new_costs\n all_costs.append(new_costs)\n\n # Reward sum of costs or just last time step?\n # min_i = np.argmin(costs)\n min_costs = np.array(all_costs).min(0)\n min_i = np.argmin(min_costs)\n\n # For Point2d u-shaped wall\n # best_action_seq = [action_t[min_i, :] for action_t in all_actions_np]\n # best_obs_seq = [\n # ptu.get_numpy(ob_t[min_i, :]) for ob_t in all_obs_torch\n # ]\n #\n # real_obs_seq = self.env.wrapped_env.wrapped_env.true_states(obs, best_action_seq)\n # self.ax1.clear()\n # self.env.wrapped_env.wrapped_env.plot_trajectory(\n # self.ax1,\n # np.array(best_obs_seq),\n # np.array(best_action_seq),\n # goal=self.env.wrapped_env.wrapped_env._target_position,\n # )\n # self.ax1.set_title(\"imagined\")\n # self.ax2.clear()\n # self.env.wrapped_env.wrapped_env.plot_trajectory(\n # self.ax2,\n # np.array(real_obs_seq),\n # np.array(best_action_seq),\n # goal=self.env.wrapped_env.wrapped_env._target_position,\n # )\n # self.ax2.set_title(\"real\")\n # plt.draw()\n # plt.pause(0.001)\n\n return first_sampled_actions[min_i], {}\n\n\nclass DebugQfToMPCController(PyTorchModule, ExplorationPolicy):\n def __init__(\n self,\n env,\n debug_qf,\n num_simulated_paths=10000,\n mpc_horizon=15,\n ):\n \"\"\"\n Optimization is done by a shooting method.\n\n :param env:\n :param dynamics_model: Dynamics model. See dagger/model.py\n :param cost_fn: Function of the form:\n\n ```\n def cost_fn(self, states, actions, next_states):\n :param states: (BATCH_SIZE x state_dim) numpy array\n :param actions: (BATCH_SIZE x action_dim) numpy array\n :param next_states: (BATCH_SIZE x state_dim) numpy array\n :return: (BATCH_SIZE, ) numpy array\n ```\n :param num_simulated_paths: How many rollouts to do internally.\n :param mpc_horizon: How long to plan for.\n \"\"\"\n assert mpc_horizon >= 1\n super().__init__()\n self.env = env\n self.debug_qf = debug_qf\n self.num_simulated_paths = num_simulated_paths\n self.mpc_horizon = mpc_horizon\n self.action_low = self.env.action_space.low\n self.action_high = self.env.action_space.high\n self.action_dim = self.env.action_space.low.shape[0]\n\n def expand_np_to_var(self, array):\n array_expanded = np.repeat(\n np.expand_dims(array, 0),\n self.num_simulated_paths,\n axis=0\n )\n return ptu.np_to_var(array_expanded, requires_grad=False)\n\n def sample_actions(self):\n return np.random.uniform(\n self.action_low,\n self.action_high,\n (self.num_simulated_paths, self.action_dim)\n )\n\n def get_action(self, obs):\n obs, goals, taus = split_flat_obs(\n obs[None],\n self.env.observation_space.low.size,\n self.env.goal_dim\n )\n sampled_actions = self.sample_actions()\n first_sampled_actions = sampled_actions.copy()\n actions = ptu.np_to_var(sampled_actions)\n next_obs = self.expand_np_to_var(obs[0])\n goals = self.expand_np_to_var(goals[0])\n taus = self.expand_np_to_var(taus[0])\n costs = 0\n for i in range(self.mpc_horizon):\n curr_obs = next_obs\n if i > 0:\n sampled_actions = self.sample_actions()\n actions = ptu.np_to_var(sampled_actions)\n flat_obs = merge_into_flat_obs(\n curr_obs,\n goals,\n taus,\n )\n obs_delta = self.debug_qf(\n flat_obs, actions, return_internal_prediction=True\n )\n next_obs = curr_obs + obs_delta\n next_features = self.env.convert_obs_to_goals(next_obs)\n costs += (next_features[:, :7] - goals[:, :7])**2\n costs_np = ptu.get_numpy(costs).sum(1)\n min_i = np.argmin(costs_np)\n return first_sampled_actions[min_i], {}\n\n\n# TODO(vitchyr): stop hardcoding this\n# GOAL_SLICE = slice(0, 2)\nGOAL_SLICE = slice(0, 7)\n\n\nclass GradientBasedMPCController(PyTorchModule, ExplorationPolicy):\n \"\"\"\n Optimization is done with gradient descent\n \"\"\"\n\n def __init__(\n self,\n env,\n dynamics_model,\n mpc_horizon=15,\n learning_rate=1e-1,\n num_grad_steps=10,\n warm_start=False,\n ):\n \"\"\"\n Optimization is done by a shooting method.\n\n :param env:\n :param dynamics_model: Dynamics model. See dagger/model.py\n :param mpc_horizon: How long to plan for.\n \"\"\"\n assert mpc_horizon >= 1\n super().__init__()\n self.env = env\n self.dynamics_model = dynamics_model\n self.mpc_horizon = mpc_horizon\n self.action_low_repeated = np.repeat(\n self.env.action_space.low,\n self.mpc_horizon,\n )\n self.action_high_repeated = np.repeat(\n self.env.action_space.high,\n self.mpc_horizon\n )\n self.action_dim = self.env.action_space.low.shape[0]\n self.last_actions_np = None\n self.learning_rate = learning_rate\n self.num_grad_steps = num_grad_steps\n self.warm_start = warm_start\n\n def forward(self, *input):\n raise NotImplementedError()\n\n def expand_np_to_var(self, array):\n array_expanded = np.repeat(\n np.expand_dims(array, 0),\n self.num_simulated_paths,\n axis=0\n )\n return ptu.np_to_var(array_expanded, requires_grad=False)\n\n def reset(self):\n self.last_actions_np = None\n\n def cost_function(self, states, all_actions):\n \"\"\"\n Everything is batch-wise.\n \"\"\"\n loss = 0\n for i in range(self.mpc_horizon):\n actions = (\n all_actions[:, i * self.action_dim:(i + 1) * self.action_dim]\n )\n actions = torch.clamp(actions, -1, 1)\n next_states = states + self.dynamics_model(states, actions)\n next_features_predicted = next_states[:, GOAL_SLICE]\n desired_features = ptu.np_to_var(\n self.env.multitask_goal[GOAL_SLICE][None]\n * np.ones(next_features_predicted.shape)\n )\n diff = next_features_predicted - desired_features\n loss += (diff ** 2).sum(dim=1, keepdim=True)\n return loss\n\n def get_action(self, obs):\n if self.last_actions_np is None or not self.warm_start:\n init_actions = np.hstack([\n self.env.action_space.sample()\n for _ in range(self.mpc_horizon)\n ])\n else:\n init_actions = self.last_actions_np\n all_actions = ptu.np_to_var(init_actions[None], requires_grad=True)\n obs = ptu.np_to_var(obs[None])\n optimizer = optim.Adam([all_actions], lr=self.learning_rate)\n for i in range(self.num_grad_steps):\n loss = self.cost_function(obs, all_actions)\n optimizer.zero_grad()\n loss.sum().backward()\n optimizer.step()\n\n self.last_actions_np = np.clip(\n ptu.get_numpy(all_actions)[0],\n self.action_low_repeated,\n self.action_high_repeated,\n )\n action = self.last_actions_np[:self.action_dim]\n return action, {}\n", "import copy\n\nimport gym\nimport numpy as np\nimport torch.nn as nn\n\nimport railrl.misc.hyperparameter as hyp\nimport railrl.torch.pytorch_util as ptu\nfrom railrl.data_management.obs_dict_replay_buffer import \\\n ObsDictReplayBuffer\nfrom railrl.launchers.launcher_util import run_experiment\n# from railrl.samplers.data_collector import MdpPathCollector\n# from railrl.samplers.data_collector.step_collector import MdpStepCollector\nfrom railrl.samplers.data_collector.path_collector import ObsDictPathCollector\nfrom railrl.samplers.data_collector.step_collector import ObsDictStepCollector\nfrom railrl.visualization.video import VideoSaveFunctionBullet\nfrom railrl.misc.buffer_save import BufferSaveFunction\n\nfrom railrl.torch.networks import (\n CNN,\n MlpQfWithObsProcessor,\n Split,\n FlattenEach,\n Concat,\n Flatten,\n)\nfrom railrl.torch.sac.policies import (\n MakeDeterministic, TanhGaussianPolicyAdapter,\n)\nfrom railrl.torch.sac.sac import SACTrainer\nfrom railrl.torch.torch_rl_algorithm import (\n TorchBatchRLAlgorithm,\n TorchOnlineRLAlgorithm,\n)\n\nimport os.path as osp\nfrom experiments.avi.env_wrappers import FlatEnv\n\nPARENT_DIR = '/media/avi/data/Work/github/'\nimport sys\nenv_file = osp.join(PARENT_DIR, 'avisingh599/google-research/dql_grasping/')\nsys.path.insert(1, env_file)\nfrom grasping_env import KukaGraspingProceduralEnv\n\n\ndef experiment(variant):\n\n env_params = dict(\n block_random=0.3,\n camera_random=0,\n simple_observations=False,\n continuous=True,\n remove_height_hack=True,\n render_mode=\"DIRECT\",\n # render_mode=\"GUI\",\n num_objects=5,\n max_num_training_models=900,\n target=False,\n test=False,\n )\n expl_env = FlatEnv(KukaGraspingProceduralEnv(**env_params))\n eval_env = expl_env\n img_width, img_height = eval_env.image_shape\n num_channels = 3\n\n action_dim = int(np.prod(eval_env.action_space.shape))\n cnn_params = variant['cnn_params']\n cnn_params.update(\n input_width=img_width,\n input_height=img_height,\n input_channels=num_channels,\n added_fc_input_size=0,\n output_conv_channels=True,\n output_size=None,\n )\n\n qf_cnn = CNN(**cnn_params)\n qf_obs_processor = nn.Sequential(\n qf_cnn,\n Flatten(),\n )\n\n qf_kwargs = copy.deepcopy(variant['qf_kwargs'])\n qf_kwargs['obs_processor'] = qf_obs_processor\n qf_kwargs['output_size'] = 1\n qf_kwargs['input_size'] = (\n action_dim + qf_cnn.conv_output_flat_size\n )\n qf1 = MlpQfWithObsProcessor(**qf_kwargs)\n qf2 = MlpQfWithObsProcessor(**qf_kwargs)\n\n target_qf_cnn = CNN(**cnn_params)\n target_qf_obs_processor = nn.Sequential(\n target_qf_cnn,\n Flatten(),\n )\n\n target_qf_kwargs = copy.deepcopy(variant['qf_kwargs'])\n target_qf_kwargs['obs_processor'] = target_qf_obs_processor\n target_qf_kwargs['output_size'] = 1\n target_qf_kwargs['input_size'] = (\n action_dim + target_qf_cnn.conv_output_flat_size\n )\n\n target_qf1 = MlpQfWithObsProcessor(**target_qf_kwargs)\n target_qf2 = MlpQfWithObsProcessor(**target_qf_kwargs)\n\n action_dim = int(np.prod(eval_env.action_space.shape))\n policy_cnn = CNN(**cnn_params)\n policy_obs_processor = nn.Sequential(\n policy_cnn,\n Flatten(),\n )\n policy = TanhGaussianPolicyAdapter(\n policy_obs_processor,\n policy_cnn.conv_output_flat_size,\n action_dim,\n **variant['policy_kwargs']\n )\n\n observation_key = 'image'\n eval_policy = MakeDeterministic(policy)\n eval_path_collector = ObsDictPathCollector(\n eval_env,\n eval_policy,\n observation_key=observation_key,\n **variant['eval_path_collector_kwargs']\n )\n replay_buffer = ObsDictReplayBuffer(\n variant['replay_buffer_size'],\n expl_env,\n observation_key=observation_key,\n )\n\n trainer = SACTrainer(\n env=eval_env,\n policy=policy,\n qf1=qf1,\n qf2=qf2,\n target_qf1=target_qf1,\n target_qf2=target_qf2,\n **variant['trainer_kwargs']\n )\n if variant['collection_mode'] == 'batch':\n expl_path_collector = ObsDictPathCollector(\n expl_env,\n policy,\n observation_key=observation_key,\n **variant['expl_path_collector_kwargs']\n )\n algorithm = TorchBatchRLAlgorithm(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector,\n replay_buffer=replay_buffer,\n **variant['algo_kwargs']\n )\n elif variant['collection_mode'] == 'online':\n expl_path_collector = ObsDictStepCollector(\n expl_env,\n policy,\n observation_key=observation_key,\n **variant['expl_path_collector_kwargs']\n )\n algorithm = TorchOnlineRLAlgorithm(\n trainer=trainer,\n exploration_env=expl_env,\n evaluation_env=eval_env,\n exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector,\n replay_buffer=replay_buffer,\n **variant['algo_kwargs']\n )\n else:\n raise NotImplementedError\n\n video_func = VideoSaveFunctionBullet(variant)\n algorithm.post_train_funcs.append(video_func)\n\n # dump_buffer_func = BufferSaveFunction(variant)\n # algorithm.post_train_funcs.append(dump_buffer_func)\n\n algorithm.to(ptu.device)\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n variant = dict(\n trainer_kwargs=dict(\n discount=0.99,\n # soft_target_tau=5e-3,\n # target_update_period=1,\n soft_target_tau=1.0,\n target_update_period=1000,\n policy_lr=3E-4,\n qf_lr=3E-4,\n reward_scale=1,\n use_automatic_entropy_tuning=True,\n ),\n algo_kwargs=dict(\n batch_size=256,\n max_path_length=15,\n num_epochs=5000,\n num_eval_steps_per_epoch=45,\n num_expl_steps_per_train_loop=300,\n num_trains_per_train_loop=300,\n min_num_steps_before_training=10*300,\n # max_path_length=10,\n # num_epochs=100,\n # num_eval_steps_per_epoch=100,\n # num_expl_steps_per_train_loop=100,\n # num_trains_per_train_loop=100,\n # min_num_steps_before_training=100,\n ),\n cnn_params=dict(\n kernel_sizes=[3, 3],\n n_channels=[4, 4],\n strides=[1, 1],\n hidden_sizes=[32, 32],\n paddings=[1, 1],\n pool_type='max2d',\n pool_sizes=[2, 2],\n pool_strides=[2, 2],\n pool_paddings=[0, 0],\n ),\n # replay_buffer_size=int(1E6),\n qf_kwargs=dict(\n hidden_sizes=[256, 256],\n ),\n policy_kwargs=dict(\n hidden_sizes=[256, 256],\n ),\n dump_video_kwargs=dict(\n imsize=48,\n save_video_period=1,\n ),\n logger_config=dict(\n snapshot_gap=10,\n ),\n dump_buffer_kwargs=dict(\n dump_buffer_period=50,\n ),\n replay_buffer_size=int(5E5),\n expl_path_collector_kwargs=dict(),\n eval_path_collector_kwargs=dict(),\n shared_qf_conv=False,\n use_robot_state=False,\n randomize_env=True,\n )\n\n import argparse\n parser = argparse.ArgumentParser()\n # parser.add_argument(\"--env\", type=str, required=True,\n # choices=('SawyerReach-v0', 'SawyerGraspOne-v0'))\n # parser.add_argument(\"--obs\", required=True, type=str, choices=('pixels', 'pixels_debug'))\n parser.add_argument(\"--gpu\", type=int, default=1)\n args = parser.parse_args()\n\n variant['env'] = 'KukaGraspingProceduralEnv'\n variant['obs'] = 'pixels'\n\n n_seeds = 1\n mode = 'local'\n exp_prefix = 'dev-{}'.format(\n __file__.replace('/', '-').replace('_', '-').split('.')[0]\n )\n exp_prefix = 'railrl-bullet-{}-{}'.format(variant['env'], variant['obs'])\n\n # n_seeds = 5\n # mode = 'ec2'\n # exp_prefix = 'railrl-bullet-sawyer-image-reach'\n\n search_space = {\n 'shared_qf_conv': [\n True,\n # False,\n ],\n 'collection_mode': [\n # 'batch',\n 'online',\n ]\n }\n sweeper = hyp.DeterministicHyperparameterSweeper(\n search_space, default_parameters=variant,\n )\n\n for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):\n for _ in range(n_seeds):\n run_experiment(\n experiment,\n exp_name=exp_prefix,\n mode=mode,\n variant=variant,\n use_gpu=True,\n gpu_id=args.gpu,\n unpack_variant=False,\n )\n", "from railrl.envs.multitask.multitask_env import MultitaskToFlatEnv\nfrom railrl.envs.multitask.point2d import MultitaskImagePoint2DEnv\nfrom railrl.envs.mujoco.pusher2d import Pusher2DEnv\nfrom railrl.envs.wrappers import NormalizedBoxEnv\nfrom railrl.exploration_strategies.base import (\n PolicyWrappedWithExplorationStrategy\n)\nfrom railrl.exploration_strategies.epsilon_greedy import EpsilonGreedy\nfrom railrl.exploration_strategies.gaussian_strategy import GaussianStrategy\nfrom railrl.exploration_strategies.ou_strategy import OUStrategy\nfrom railrl.launchers.launcher_util import run_experiment\nfrom railrl.torch.networks import FlattenMlp, TanhMlpPolicy\nimport railrl.torch.pytorch_util as ptu\nfrom railrl.torch.td3.td3 import TD3\nimport railrl.misc.hyperparameter as hyp\nfrom railrl.launchers.arglauncher import run_variants\n\nfrom railrl.envs.vae_wrappers import VAEWrappedImageGoalEnv\nimport torch\n\ndef experiment(variant):\n rdim = variant[\"rdim\"]\n vae_paths = {\n 2: \"/home/ashvin/data/s3doodad/ashvin/vae/point2d-conv-sweep2/run1/id0/params.pkl\",\n 4: \"/home/ashvin/data/s3doodad/ashvin/vae/point2d-conv-sweep2/run1/id1/params.pkl\",\n 8: \"/home/ashvin/data/s3doodad/ashvin/vae/point2d-conv-sweep2/run1/id2/params.pkl\",\n 16: \"/home/ashvin/data/s3doodad/ashvin/vae/point2d-conv-sweep2/run1/id3/params.pkl\"\n }\n vae_path = vae_paths[rdim]\n vae = torch.load(vae_path)\n print(\"loaded\", vae_path)\n\n if variant['multitask']:\n env = MultitaskImagePoint2DEnv(**variant['env_kwargs'])\n env = VAEWrappedImageGoalEnv(env, vae, use_vae_obs=True,\n use_vae_reward=True, use_vae_goals=True,)\n # render_goals=True, render_rollouts=True)\n env = MultitaskToFlatEnv(env)\n # else:\n # env = Pusher2DEnv(**variant['env_kwargs'])\n if variant['normalize']:\n env = NormalizedBoxEnv(env)\n exploration_type = variant['exploration_type']\n if exploration_type == 'ou':\n es = OUStrategy(action_space=env.action_space)\n elif exploration_type == 'gaussian':\n es = GaussianStrategy(\n action_space=env.action_space,\n max_sigma=0.1,\n min_sigma=0.1, # Constant sigma\n )\n elif exploration_type == 'epsilon':\n es = EpsilonGreedy(\n action_space=env.action_space,\n prob_random_action=0.1,\n )\n else:\n raise Exception(\"Invalid type: \" + exploration_type)\n obs_dim = env.observation_space.low.size\n action_dim = env.action_space.low.size\n qf1 = FlattenMlp(\n input_size=obs_dim + action_dim,\n output_size=1,\n hidden_sizes=[400, 300],\n )\n qf2 = FlattenMlp(\n input_size=obs_dim + action_dim,\n output_size=1,\n hidden_sizes=[400, 300],\n )\n policy = TanhMlpPolicy(\n input_size=obs_dim,\n output_size=action_dim,\n hidden_sizes=[400, 300],\n )\n exploration_policy = PolicyWrappedWithExplorationStrategy(\n exploration_strategy=es,\n policy=policy,\n )\n algorithm = TD3(\n env,\n training_env=env,\n qf1=qf1,\n qf2=qf2,\n policy=policy,\n exploration_policy=exploration_policy,\n **variant['algo_kwargs']\n )\n print(\"use_gpu\", variant[\"use_gpu\"], bool(variant[\"use_gpu\"]))\n if variant[\"use_gpu\"]:\n gpu_id = variant[\"gpu_id\"]\n ptu.set_gpu_mode(True)\n ptu.set_device(gpu_id)\n algorithm.to(ptu.device)\n env._wrapped_env.vae.to(ptu.device)\n algorithm.train()\n\n\nif __name__ == \"__main__\":\n # noinspection PyTypeChecker\n variant = dict(\n algo_kwargs=dict(\n num_epochs=100,\n num_steps_per_epoch=1000,\n num_steps_per_eval=1000,\n tau=1e-2,\n batch_size=128,\n max_path_length=100,\n discount=0.99,\n # qf_learning_rate=1e-3,\n # policy_learning_rate=1e-4,\n ),\n env_kwargs=dict(\n render_onscreen=False,\n render_size=84,\n ignore_multitask_goal=True,\n ball_radius=1,\n ),\n algorithm='TD3',\n multitask=True,\n normalize=False,\n rdim=4,\n )\n\n n_seeds = 1\n mode = 'local'\n exp_prefix = 'dev'\n\n n_seeds = 1\n mode = 'ec2'\n exp_prefix = 'pusher-2d-state-baselines-h100-multitask-less-shaped'\n\n search_space = {\n 'exploration_type': [\n 'ou',\n ],\n 'algo_kwargs.reward_scale': [0.1],\n 'rdim': [2, 4, 8, 16],\n 'seedid': range(n_seeds),\n }\n sweeper = hyp.DeterministicHyperparameterSweeper(\n search_space, default_parameters=variant,\n )\n run_variants(experiment, sweeper.iterate_hyperparameters())\n", "from collections import OrderedDict\n\nimport torch\nimport numpy as np\n\nimport railrl.torch.pytorch_util as ptu\nfrom railrl.data_management.split_buffer import SplitReplayBuffer\nfrom railrl.data_management.subtraj_replay_buffer import SubtrajReplayBuffer\nfrom railrl.misc.eval_util import create_stats_ordered_dict\nfrom railrl.torch.ddpg import DDPG\nfrom railrl.misc import np_util\n\n\ndef flatten_subtraj_batch(subtraj_batch):\n return {\n k: array.view(-1, array.size()[-1])\n for k, array in subtraj_batch.items()\n }\n\n\nclass MultiStepDdpg(DDPG):\n def __init__(self, *args, subtraj_length=10, **kwargs):\n super().__init__(*args, **kwargs)\n self.subtraj_length = subtraj_length\n self.gammas = self.discount * torch.ones(self.subtraj_length)\n discount_factors = torch.cumprod(self.gammas, dim=0)\n self.discount_factors = ptu.Variable(\n discount_factors.view(-1, 1),\n requires_grad=False,\n )\n self.replay_buffer = SplitReplayBuffer(\n SubtrajReplayBuffer(\n max_replay_buffer_size=self.replay_buffer_size,\n env=self.env,\n subtraj_length=self.subtraj_length,\n ),\n SubtrajReplayBuffer(\n max_replay_buffer_size=self.replay_buffer_size,\n env=self.env,\n subtraj_length=self.subtraj_length,\n ),\n fraction_paths_in_train=0.8,\n )\n\n def get_train_dict(self, subtraj_batch):\n subtraj_rewards = subtraj_batch['rewards']\n subtraj_rewards_np = ptu.get_numpy(subtraj_rewards).squeeze(2)\n returns = np_util.batch_discounted_cumsum(\n subtraj_rewards_np, self.discount\n )\n returns = np.expand_dims(returns, 2)\n returns = np.ascontiguousarray(returns).astype(np.float32)\n returns = ptu.Variable(ptu.from_numpy(returns))\n subtraj_batch['returns'] = returns\n batch = flatten_subtraj_batch(subtraj_batch)\n # rewards = batch['rewards']\n returns = batch['returns']\n terminals = batch['terminals']\n obs = batch['observations']\n actions = batch['actions']\n next_obs = batch['next_observations']\n\n \"\"\"\n Policy operations.\n \"\"\"\n policy_actions = self.policy(obs)\n q = self.qf(obs, policy_actions)\n policy_loss = - q.mean()\n\n \"\"\"\n Critic operations.\n \"\"\"\n next_actions = self.policy(next_obs)\n # TODO: try to get this to work\n # next_actions = None\n q_target = self.target_qf(\n next_obs,\n next_actions,\n )\n # y_target = self.reward_scale * rewards + (1. - terminals) * self.discount * v_target\n batch_size = q_target.size()[0]\n discount_factors = self.discount_factors.repeat(\n batch_size // self.subtraj_length, 1,\n )\n y_target = self.reward_scale * returns + (1. - terminals) * discount_factors * q_target\n # noinspection PyUnresolvedReferences\n y_target = y_target.detach()\n y_pred = self.qf(obs, actions)\n bellman_errors = (y_pred - y_target)**2\n qf_loss = self.qf_criterion(y_pred, y_target)\n\n return OrderedDict([\n ('Policy Actions', policy_actions),\n ('Policy Loss', policy_loss),\n ('Policy Q Values', q),\n ('Target Y', y_target),\n ('Predicted Y', y_pred),\n ('Bellman Errors', bellman_errors),\n ('Y targets', y_target),\n ('Y predictions', y_pred),\n ('QF Loss', qf_loss),\n ])\n\n def _statistics_from_batch(self, batch, stat_prefix):\n statistics = OrderedDict()\n\n train_dict = self.get_train_dict(batch)\n for name in [\n 'QF Loss',\n 'Policy Loss',\n ]:\n tensor = train_dict[name]\n statistics_name = \"{} {} Mean\".format(stat_prefix, name)\n statistics[statistics_name] = np.mean(ptu.get_numpy(tensor))\n\n for name in [\n 'Bellman Errors',\n 'Target Y',\n 'Predicted Y',\n 'Policy Q Values',\n ]:\n tensor = train_dict[name]\n statistics.update(create_stats_ordered_dict(\n '{} {}'.format(stat_prefix, name),\n ptu.get_numpy(tensor)\n ))\n\n return statistics\n\n def _paths_to_np_batch(self, paths):\n eval_replay_buffer = SubtrajReplayBuffer(\n len(paths) * (self.max_path_length + 1),\n self.env,\n self.subtraj_length,\n )\n for path in paths:\n eval_replay_buffer.add_trajectory(path)\n return eval_replay_buffer.get_all_valid_subtrajectories()\n\n def get_batch(self, training=True):\n replay_buffer = self.replay_buffer.get_replay_buffer(training)\n sample_size = min(\n replay_buffer.num_steps_can_sample(),\n self.batch_size\n )\n batch = replay_buffer.random_batch(sample_size)\n torch_batch = {\n k: ptu.Variable(ptu.from_numpy(array).float(), requires_grad=False)\n for k, array in batch.items()\n }\n rewards = torch_batch['rewards']\n terminals = torch_batch['terminals']\n torch_batch['rewards'] = rewards.unsqueeze(-1)\n torch_batch['terminals'] = terminals.unsqueeze(-1)\n return torch_batch\n", "import matplotlib\nfrom visualization.grill.config import (\n output_dir,\n ashvin_base_dir,\n format_func,\n configure_matplotlib,\n)\nimport matplotlib.pyplot as plt\nfrom railrl.visualization import plot_util as plot\n\nconfigure_matplotlib(matplotlib)\n\ndirs = [\n ashvin_base_dir + 's3doodad/share/camera_ready_pick',\n]\npick_exps = plot.load_exps(dirs, suppress_output=True)\n\nplot.comparison(pick_exps, [\"Final hand_and_obj_distance Mean\"],\n [\n# \"seed\",\n \"exp_prefix\",\n \"train_vae_variant.vae_type\",\n ],\n default_vary={\"train_vae_variant.vae_type\": True},\n smooth=plot.padded_ma_filter(10),\n print_final=False, print_min=False, print_plot=True,\n xlim=(0, 500000),\n# ylim=(0, 0.35),\n figsize=(7.5,4),\n method_order=(2, 1, 3, 4, 0),\n )\nplt.gca().xaxis.set_major_formatter(plt.FuncFormatter(format_func))\nplt.xlabel(\"Timesteps\")\nplt.ylabel(\"\")\n# plt.legend()\nplt.legend(\n [\n \"RIG\",\n \"DSAE\",\n \"HER\",\n \"Oracle\",\n \"L&R\",\n ],\n # bbox_to_anchor=(0.49, -0.2), loc=\"upper center\", ncol=5, handlelength=1)\n bbox_to_anchor=(1.0, 0.5), loc=\"center left\",\n)\nplt.tight_layout()\nplt.title(\"Visual Pick and Place Baselines\")\n# L&R\n# Throw away (SVAE)\n# RIG\n# DSAE\n# HER\n# Oracle\nplt.savefig(output_dir + \"pick_baselines_viz.pdf\")\n", "# from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj_subset import SawyerMultiobjectEnv\n# from multiworld.envs.mujoco.sawyer_xyz.sawyer_reach import SawyerReachXYZEnv\n\nimport sys\nfrom multiworld.core.image_env import ImageEnv\nfrom multiworld.envs.real_world.sawyer.sawyer_reaching import SawyerReachXYZEnv\n# from sawyer_control.envs.sawyer_reaching import SawyerReachXYZEnv\n\n# import railrl.util.hyperparameter as hyp\nfrom railrl.launchers.experiments.ashvin.rfeatures.encoder_wrapped_env import EncoderWrappedEnv\n\nimport torch\n\nfrom railrl.launchers.experiments.ashvin.rfeatures.rfeatures_model import TimestepPredictionModel\nimport numpy as np\n\nimport railrl.torch.pytorch_util as ptu\n\n# from railrl.launchers.experiments.ashvin.rfeatures.rfeatures_trainer import TimePredictionTrainer\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nfrom torchvision.utils import save_image\nimport pickle\n\ndemo_trajectory_rewards = []\n\nimport torchvision\nimport random\n\nimport skvideo.io\n\nRANDOM_CROP_X = 16\nRANDOM_CROP_Y = 16\nWIDTH = 456\nHEIGHT = 256\nCROP_WIDTH = WIDTH - RANDOM_CROP_X\nCROP_HEIGHT = HEIGHT - RANDOM_CROP_Y\n\neps = 1e-5\n\nt_to_pil = torchvision.transforms.ToPILImage()\nt_random_resize = torchvision.transforms.RandomResizedCrop(\n size=(CROP_WIDTH, CROP_HEIGHT,),\n scale=(0.9, 1.0),\n ratio=(1.0, 1.0), # don't change aspect ratio\n)\nt_color_jitter = torchvision.transforms.ColorJitter(\n brightness=0.2, # (0.8, 1.2),\n contrast=0.2, # (0.8, 1.2),\n saturation=0.2, # (0.8, 1.2),\n hue=0.1, # (-0.2, 0.2),\n)\nt_to_tensor = torchvision.transforms.ToTensor()\n\ndef get_random_crop_params(img, scale_x, scale_y):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n w = int(random.uniform(*scale_x) * CROP_WIDTH)\n h = int(random.uniform(*scale_y) * CROP_HEIGHT)\n\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n\n return i, j, h, w\n\ndef load_path(data, demo_path):\n H = len(data)\n print(\"loading video, size\", data.shape)\n\n rewards = []\n\n # import ipdb; ipdb.set_trace()\n\n t_color_jitter_instance = None\n\n # obs_batch = data[:, 60:, :440, :] # right\n obs_batch = data[:, 60:, 60:, :] # left\n # np.zeros((num_obs, 240, 440, 3))\n obs_batch = obs_batch.transpose(0, 3, 1, 2) / 255.0\n zs = env._encode(obs_batch)\n\n z0 = zs[0, :]\n zT = zs[-1, :]\n\n for reward_type in [\"latent_distance\", \"regression_distance\"]:\n\n for i in range(len(zs)):\n dt = zs[i, :] - z0\n dT = zT - z0\n\n if reward_type == \"regression_distance\":\n regression_pred_yt = (dt * dT).sum() / ((dT ** 2).sum() + eps)\n r = -np.abs(1-regression_pred_yt)\n if reward_type == \"latent_distance\":\n r = -np.linalg.norm(dt - dT)\n\n rewards.append(r)\n\n reward_filename = demo_path[:-4] + \"_%s_imagenet.png\" % reward_type\n plt.figure(figsize=(8, 8))\n plt.plot(rewards)\n plt.savefig(reward_filename)\n\n # # Order of these two lines matters\n # env.zT = goal_image_transformed\n # env.initialize(zs)\n # # print(\"z0\", env.z0, \"zT\", env.zT, \"dT\", env.dT)\n\n # for i in range(H):\n # ob = path[i]\n # action = path[\"actions\"][i]\n # reward = path[\"rewards\"][i]\n # next_ob = path[\"next_observations\"][i]\n # terminal = path[\"terminals\"][i]\n # agent_info = path[\"agent_infos\"][i]\n # env_info = path[\"env_infos\"][i]\n\n # # goal = path[\"goal\"][\"state_desired_goal\"][0, :]\n # # import ipdb; ipdb.set_trace()\n # # print(goal.shape, ob[\"state_observation\"])\n # # state_observation = np.concatenate((ob[\"state_observation\"], goal))\n # # action = action[:2]\n\n # # update_obs_with_latent(ob)\n # # update_obs_with_latent(next_ob)\n # env._update_obs_latent(ob, zs[i, :])\n # env._update_obs_latent(next_ob, zs[i+1, :])\n # reward = env.compute_reward(\n # action,\n # next_ob,\n # )\n # path[\"rewards\"][i] = reward\n # # reward = np.array([reward])\n # # terminal = np.array([terminal])\n\n # # print(reward)\n # rewards.append(reward)\n # demo_trajectory_rewards.append(rewards)\n\ndef load_demos(demo_paths, processed_demo_path, reference_path, name):\n datas = []\n for demo_path in demo_paths:\n data = skvideo.io.vread(demo_path)\n load_path(data, demo_path)\n print(\"Finished loading demo: \" + demo_path)\n\n # np.save(processed_demo_path, data)\n print(\"Dumping data\")\n # pickle.dump(datas, open(processed_demo_path, \"wb\"), protocol=4)\n\n plt.figure(figsize=(8, 8))\n print(\"Demo trajectory rewards len: \", len(demo_trajectory_rewards), \"Data len: \", len(datas))\n pickle.dump(demo_trajectory_rewards, open(\"demos/rlbench/demo_rewards_%s.p\" % name, \"wb\"), protocol=4)\n for r in demo_trajectory_rewards:\n plt.plot(r)\n plt.savefig(\"demos/rlbench/demo_rewards_%s.png\" %name)\n\ndef update_obs_with_latent(obs):\n latent_obs = env._encode_one(obs[\"image_observation\"])\n latent_goal = np.zeros([]) # env._encode_one(obs[\"image_desired_goal\"])\n obs['latent_observation'] = latent_obs\n obs['latent_achieved_goal'] = latent_goal\n obs['latent_desired_goal'] = latent_goal\n obs['observation'] = latent_obs\n obs['achieved_goal'] = latent_goal\n obs['desired_goal'] = latent_goal\n return obs\n\nif __name__ == \"__main__\":\n use_imagenet = \"imagenet\" in sys.argv\n variant = dict(\n env_class=SawyerReachXYZEnv,\n env_kwargs=dict(\n action_mode=\"position\",\n max_speed = 0.05,\n camera=\"sawyer_head\"\n ),\n # algo_kwargs=dict(\n # num_epochs=3000,\n # max_path_length=20,\n # batch_size=128,\n # num_eval_steps_per_epoch=1000,\n # num_expl_steps_per_train_loop=1000,\n # num_trains_per_train_loop=1000,\n # min_num_steps_before_training=1000,\n # ),\n algo_kwargs=dict(\n num_epochs=3000,\n max_path_length=10,\n batch_size=5,\n num_eval_steps_per_epoch=10,\n num_expl_steps_per_train_loop=10,\n num_trains_per_train_loop=10,\n min_num_steps_before_training=10,\n ),\n model_kwargs=dict(\n decoder_distribution='gaussian_identity_variance',\n input_channels=3,\n imsize=224,\n architecture=dict(\n hidden_sizes=[200, 200],\n ),\n delta_features=True,\n pretrained_features=False,\n ),\n trainer_kwargs=dict(\n discount=0.99,\n demo_path=\"/home/anair/ros_ws/src/railrl-private/demo_v2_2.npy\",\n add_demo_latents=True,\n bc_num_pretrain_steps=100,\n ),\n replay_buffer_kwargs=dict(\n max_size=100000,\n fraction_goals_rollout_goals=1.0,\n fraction_goals_env_goals=0.0,\n ),\n qf_kwargs=dict(\n hidden_sizes=[400, 300],\n ),\n policy_kwargs=dict(\n hidden_sizes=[400, 300],\n ),\n\n save_video=True,\n dump_video_kwargs=dict(\n save_period=1,\n # imsize=(3, 500, 300),\n )\n )\n\n ptu.set_gpu_mode(\"gpu\")\n\n representation_size = 128\n output_classes = 20\n\n model_class = variant.get('model_class', TimestepPredictionModel)\n model = model_class(\n representation_size,\n # decoder_output_activation=decoder_activation,\n output_classes=output_classes,\n **variant['model_kwargs'],\n )\n # model = torch.nn.DataParallel(model)\n\n imagenets = [True, False]\n reg_types = [\"regression_distance\", \"latent_distance\"]\n for use_imagenet in [True]:\n for reg_type in [\"latent_distance\"]:\n print(\"Processing with imagenet: %s, type: %s\" %(str(use_imagenet), reg_type))\n if use_imagenet:\n model_path = \"/home/anair/data/s3doodad/facebook/models/rfeatures/multitask1/run2/id0/itr_0.pt\" # imagenet\n else:\n model_path = \"/home/anair/data/s3doodad/facebook/models/rfeatures/multitask1/run2/id2/itr_4000.pt\"\n\n # model = load_local_or_remote_file(model_path)\n state_dict = torch.load(model_path)\n model.load_state_dict(state_dict)\n model.to(ptu.device)\n model.eval()\n\n for color in [\"grey\"]:\n reference_path = \"demos/door_demos_v3/demo_v3_%s_0.pkl\"%color\n traj = np.load(\"demos/door_demos_v3/demo_v3_%s_0.pkl\"%color, allow_pickle=True)[0]\n\n goal_image_flat = traj[\"observations\"][-1][\"image_observation\"]\n goal_image = goal_image_flat.reshape(1, 3, 500, 300).transpose([0, 1, 3, 2]) / 255.0\n # goal_image = goal_image[:, ::-1, :, :].copy() # flip bgr\n goal_image = goal_image[:, :, 60:300, 30:470]\n goal_image_pt = ptu.from_numpy(goal_image)\n save_image(goal_image_pt.data.cpu(), 'goal.png', nrow=1)\n goal_latent = model.encode(goal_image_pt).detach().cpu().numpy().flatten()\n\n initial_image_flat = traj[\"observations\"][0][\"image_observation\"]\n initial_image = initial_image_flat.reshape(1, 3, 500, 300).transpose([0, 1, 3, 2]) / 255.0\n # initial_image = initial_image[:, ::-1, :, :].copy() # flip bgr\n initial_image = initial_image[:, :, 60:300, 30:470]\n initial_image_pt = ptu.from_numpy(initial_image)\n save_image(initial_image_pt.data.cpu(), 'initial.png', nrow=1)\n initial_latent = model.encode(initial_image_pt).detach().cpu().numpy().flatten()\n print(\"Finished initial_latent\")\n reward_params = dict(\n goal_latent=goal_latent,\n initial_latent=initial_latent,\n goal_image=goal_image_flat,\n initial_image=initial_image_flat,\n # type=\"latent_distance\"\n # type=\"regression_distance\"\n type=reg_type\n )\n config_params = dict(\n initial_type=\"use_initial_from_trajectory\",\n # initial_type=\"use_initial_from_trajectory\",\n # goal_type=\"use_goal_from_trajectory\",\n goal_type=\"\",\n use_initial=True\n )\n\n env = variant['env_class'](**variant['env_kwargs'])\n env = ImageEnv(env,\n recompute_reward=False,\n transpose=True,\n image_length=450000,\n reward_type=\"image_distance\",\n # init_camera=sawyer_pusher_camera_upright_v2,\n )\n env = EncoderWrappedEnv(env, model, reward_params, config_params)\n print(\"Finished creating env\")\n demo_paths=[\"/home/anair/ros_ws/src/railrl-private/demos/rlbench/demo_left_%i.mp4\" % i for i in range(10)]\n # demo_paths+=[\"/home/anair/ros_ws/src/railrl-private/demos/rlbench/demo_left_%i.pkl\" % i for i in range(10)]\n\n # processed_demo_path = \"/home/anair/ros_ws/src/railrl-private/demos/door_demos_v3/processed_demos_imagenet2.pkl\" # use this for imagenet\n # processed_demo_path = \"/home/anair/ros_ws/src/railrl-private/demos/door_demos_v3/processed_demos_imagenet_jitter2.pkl\"\n if use_imagenet:\n processed_demo_path = \"/home/anair/ros_ws/src/railrl-private/demos/rlbench/demo_right_%s_imagenet_jitter2.pkl\" % color\n else:\n processed_demo_path = \"/home/anair/ros_ws/src/railrl-private/demos/door_demos_v3/processed_demos_%s_jitter2.pkl\" % color\n name = color\n if use_imagenet:\n name = \"_imagenet_%s\"%color\n name = \"demos/rlbench/%s_%s\"%(name,reward_params[\"type\"])\n print(\"Loading demos for: \", name)\n load_demos(demo_paths, processed_demo_path, reference_path, name)\n demo_trajectory_rewards = []", "\"\"\"\nProfile SAC\n\"\"\"\n\nimport numpy as np\n\nimport railrl.torch.pytorch_util as ptu\nfrom railrl.envs.multigoal import MultiGoalEnv\nfrom railrl.envs.wrappers import NormalizedBoxEnv\nfrom railrl.launchers.launcher_util import setup_logger\nfrom railrl.torch.sac.policies import TanhGaussianPolicy\nfrom railrl.torch.sac.sac import SoftActorCritic\nfrom railrl.torch.networks import FlattenMlp\nimport torch\n\n\ndef experiment(variant):\n env = NormalizedBoxEnv(MultiGoalEnv(\n actuation_cost_coeff=10,\n distance_cost_coeff=1,\n goal_reward=10,\n ))\n\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n qf = FlattenMlp(\n hidden_sizes=[100, 100],\n input_size=obs_dim + action_dim,\n output_size=1,\n )\n vf = FlattenMlp(\n hidden_sizes=[100, 100],\n input_size=obs_dim,\n output_size=1,\n )\n policy = TanhGaussianPolicy(\n hidden_sizes=[100, 100],\n obs_dim=obs_dim,\n action_dim=action_dim,\n )\n algorithm = SoftActorCritic(\n env=env,\n policy=policy,\n qf=qf,\n vf=vf,\n **variant['algo_params']\n )\n algorithm.to(ptu.device)\n with torch.autograd.profiler.profile() as prof:\n algorithm.train()\n prof.export_chrome_trace(\"tmp-torch-chrome-trace.prof\")\n\n\nif __name__ == \"__main__\":\n # noinspection PyTypeChecker\n variant = dict(\n algo_params=dict(\n num_epochs=10,\n num_steps_per_epoch=1000,\n num_steps_per_eval=300,\n batch_size=64,\n max_path_length=30,\n reward_scale=0.3,\n discount=0.99,\n soft_target_tau=0.001,\n ),\n )\n setup_logger(\"11-24-profile\")\n experiment(variant)\n" ]
[ [ "torch.optim.Adam", "torch.clamp", "numpy.expand_dims", "matplotlib.pyplot.subplots", "numpy.ones", "numpy.argmin", "numpy.random.uniform", "numpy.repeat", "numpy.array" ], [ "numpy.prod" ], [ "torch.load" ], [ "numpy.ascontiguousarray", "numpy.expand_dims", "torch.ones", "torch.cumprod" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.FuncFormatter", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ], [ "numpy.abs", "torch.load", "matplotlib.use", "numpy.linalg.norm", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.load", "numpy.zeros", "matplotlib.pyplot.figure" ], [ "torch.autograd.profiler.profile", "numpy.prod" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aviplane/runmanager
[ "25fd87ad2fb1232e8484eb8b8643263a81ba588c" ]
[ "runmanager/__init__.py" ]
[ "#####################################################################\n# #\n# /__init__.py #\n# #\n# Copyright 2013, Monash University #\n# #\n# This file is part of the program runmanager, in the labscript #\n# suite (see http://labscriptsuite.org), and is licensed under the #\n# Simplified BSD License. See the license.txt file in the root of #\n# the project for the full license. #\n# #\n#####################################################################\n\nimport itertools\nimport os\nimport sys\nimport random\nimport time\nimport subprocess\nimport types\nimport threading\nimport traceback\nimport datetime\nimport errno\nimport json\nimport tokenize\nimport io\n\nimport labscript_utils.h5_lock\nimport h5py\nimport numpy as np\n\nfrom labscript_utils.ls_zprocess import ProcessTree, zmq_push_multipart\nfrom labscript_utils.labconfig import LabConfig\nprocess_tree = ProcessTree.instance()\n\nfrom .__version__ import __version__\n\n\ndef _ensure_str(s):\n \"\"\"convert bytestrings and numpy strings to python strings. Also converts lambda expressions to strings.\"\"\"\n a = s.decode() if isinstance(s, bytes) else str(s)\n if callable(s):\n print(\"Function: {}\".format(s))\n return \"<function>\"\n return a\n\ndef is_valid_python_identifier(name):\n # No whitespace allowed. Do this check here because an actual newline in the source\n # is not easily distinguished from a NEWLINE token in the produced tokens, which is\n # produced even when there is no newline character in the string. So since we ignore\n # NEWLINE later, we must check for it now.\n if name != \"\".join(name.split()):\n return False\n try:\n tokens = list(tokenize.generate_tokens(io.StringIO(name).readline))\n except tokenize.TokenError:\n return False\n token_types = [\n t[0] for t in tokens if t[0] not in [tokenize.NEWLINE, tokenize.ENDMARKER]\n ]\n if len(token_types) == 1:\n return token_types[0] == tokenize.NAME\n return False\n\n\ndef is_valid_hdf5_group_name(name):\n \"\"\"Ensure that a string is a valid name for an hdf5 group.\n\n The names of hdf5 groups may only contain ASCII characters. Furthermore, the\n characters \"/\" and \".\" are not allowed.\n\n Args:\n name (str): The potential name for an hdf5 group.\n\n Returns:\n bool: Whether or not `name` is a valid name for an hdf5 group. This will\n be `True` if it is a valid name or `False` otherwise.\n \"\"\"\n # Ensure only ASCII characters are used.\n for char in name:\n if ord(char) >= 128:\n return False\n\n # Ensure forbidden ASCII characters are not used.\n forbidden_characters = ['.', '/']\n for character in forbidden_characters:\n if character in name:\n return False\n return True\n\n\nclass ExpansionError(Exception):\n\n \"\"\"An exception class so that error handling code can tell when a\n parsing exception was caused by a mismatch with the expansion mode\"\"\"\n pass\n\n\nclass TraceDictionary(dict):\n\n def __init__(self, *args, **kwargs):\n self.trace_data = None\n dict.__init__(self, *args, **kwargs)\n\n def start_trace(self):\n self.trace_data = []\n\n def __getitem__(self, key):\n if self.trace_data is not None:\n if key not in self.trace_data:\n self.trace_data.append(key)\n return dict.__getitem__(self, key)\n\n def stop_trace(self):\n trace_data = self.trace_data\n self.trace_data = None\n return trace_data\n\n\ndef new_globals_file(filename):\n with h5py.File(filename, 'w') as f:\n f.create_group('globals')\n\n\ndef add_expansion_groups(filename):\n \"\"\"backward compatability, for globals files which don't have\n expansion groups. Create them if they don't exist. Guess expansion\n settings based on datatypes, if possible.\"\"\"\n # DEPRECATED\n # Don't open in write mode unless we have to:\n with h5py.File(filename, 'r') as f:\n requires_expansion_group = []\n for groupname in f['globals']:\n group = f['globals'][groupname]\n if not 'expansion' in group:\n requires_expansion_group.append(groupname)\n if requires_expansion_group:\n group_globalslists = [get_globalslist(filename, groupname) for groupname in requires_expansion_group]\n with h5py.File(filename, 'a') as f:\n for groupname, globalslist in zip(requires_expansion_group, group_globalslists):\n group = f['globals'][groupname]\n subgroup = group.create_group('expansion')\n # Initialise all expansion settings to blank strings:\n for name in globalslist:\n subgroup.attrs[name] = ''\n groups = {group_name: filename for group_name in get_grouplist(filename)}\n sequence_globals = get_globals(groups)\n evaled_globals, global_hierarchy, expansions = evaluate_globals(sequence_globals, raise_exceptions=False)\n for group_name in evaled_globals:\n for global_name in evaled_globals[group_name]:\n value = evaled_globals[group_name][global_name]\n expansion = guess_expansion_type(value)\n set_expansion(filename, group_name, global_name, expansion)\n\n\ndef get_grouplist(filename):\n # For backward compatability, add 'expansion' settings to this\n # globals file, if it doesn't contain any. Guess expansion settings\n # if possible.\n # DEPRECATED\n add_expansion_groups(filename)\n with h5py.File(filename, 'r') as f:\n grouplist = f['globals']\n # File closes after this function call, so have to\n # convert the grouplist generator to a list of strings\n # before its file gets dereferenced:\n return list(grouplist)\n\n\ndef new_group(filename, groupname):\n if not is_valid_hdf5_group_name(groupname):\n raise ValueError(\n 'Invalid group name. Group names must contain only ASCII '\n 'characters and cannot include \"/\" or \".\".'\n )\n with h5py.File(filename, 'a') as f:\n if groupname in f['globals']:\n raise Exception('Can\\'t create group: target name already exists.')\n group = f['globals'].create_group(groupname)\n group.create_group('units')\n group.create_group('expansion')\n\n\ndef copy_group(source_globals_file, source_groupname, dest_globals_file, delete_source_group=False):\n \"\"\" This function copies the group source_groupname from source_globals_file\n to dest_globals_file and renames the new group so that there is no name\n collision. If delete_source_group is False the copyied files have\n a suffix '_copy'.\"\"\"\n with h5py.File(source_globals_file, 'a') as source_f:\n # check if group exists\n if source_groupname not in source_f['globals']:\n raise Exception('Can\\'t copy there is no group \"{}\"!'.format(source_groupname))\n\n # Are we coping from one file to another?\n if dest_globals_file is not None and source_globals_file != dest_globals_file:\n dest_f = h5py.File(dest_globals_file, 'a') # yes -> open dest_globals_file\n else:\n dest_f = source_f # no -> dest files is source file\n\n # rename Group until there is no name collisions\n i = 0 if not delete_source_group else 1\n dest_groupname = source_groupname\n while dest_groupname in dest_f['globals']:\n dest_groupname = \"{}({})\".format(dest_groupname, i) if i > 0 else \"{}_copy\".format(dest_groupname)\n i += 1\n\n # copy group\n dest_f.copy(source_f['globals'][source_groupname], '/globals/%s' % dest_groupname)\n\n # close opend file\n if dest_f != source_f:\n dest_f.close()\n\n return dest_groupname\n\n\ndef rename_group(filename, oldgroupname, newgroupname):\n if oldgroupname == newgroupname:\n # No rename!\n return\n if not is_valid_hdf5_group_name(newgroupname):\n raise ValueError(\n 'Invalid group name. Group names must contain only ASCII '\n 'characters and cannot include \"/\" or \".\".'\n )\n with h5py.File(filename, 'a') as f:\n if newgroupname in f['globals']:\n raise Exception('Can\\'t rename group: target name already exists.')\n f.copy(f['globals'][oldgroupname], '/globals/%s' % newgroupname)\n del f['globals'][oldgroupname]\n\n\ndef delete_group(filename, groupname):\n with h5py.File(filename, 'a') as f:\n del f['globals'][groupname]\n\n\ndef get_globalslist(filename, groupname):\n with h5py.File(filename, 'r') as f:\n group = f['globals'][groupname]\n # File closes after this function call, so have to convert\n # the attrs to a dict before its file gets dereferenced:\n return dict(group.attrs)\n\n\ndef new_global(filename, groupname, globalname):\n if not is_valid_python_identifier(globalname):\n raise ValueError('%s is not a valid Python variable name'%globalname)\n with h5py.File(filename, 'a') as f:\n group = f['globals'][groupname]\n if globalname in group.attrs:\n raise Exception('Can\\'t create global: target name already exists.')\n group.attrs[globalname] = ''\n f['globals'][groupname]['units'].attrs[globalname] = ''\n f['globals'][groupname]['expansion'].attrs[globalname] = ''\n\n\ndef rename_global(filename, groupname, oldglobalname, newglobalname):\n if oldglobalname == newglobalname:\n # No rename!\n return\n if not is_valid_python_identifier(newglobalname):\n raise ValueError('%s is not a valid Python variable name'%newglobalname)\n value = get_value(filename, groupname, oldglobalname)\n units = get_units(filename, groupname, oldglobalname)\n expansion = get_expansion(filename, groupname, oldglobalname)\n with h5py.File(filename, 'a') as f:\n group = f['globals'][groupname]\n if newglobalname in group.attrs:\n raise Exception('Can\\'t rename global: target name already exists.')\n group.attrs[newglobalname] = value\n group['units'].attrs[newglobalname] = units\n group['expansion'].attrs[newglobalname] = expansion\n del group.attrs[oldglobalname]\n del group['units'].attrs[oldglobalname]\n del group['expansion'].attrs[oldglobalname]\n\n\ndef get_value(filename, groupname, globalname):\n with h5py.File(filename, 'r') as f:\n value = f['globals'][groupname].attrs[globalname]\n # Replace numpy strings with python unicode strings.\n # DEPRECATED, for backward compat with old files\n value = _ensure_str(value)\n return value\n\n\ndef set_value(filename, groupname, globalname, value):\n with h5py.File(filename, 'a') as f:\n f['globals'][groupname].attrs[globalname] = value\n\n\ndef get_units(filename, groupname, globalname):\n with h5py.File(filename, 'r') as f:\n value = f['globals'][groupname]['units'].attrs[globalname]\n # Replace numpy strings with python unicode strings.\n # DEPRECATED, for backward compat with old files\n value = _ensure_str(value)\n return value\n\n\ndef set_units(filename, groupname, globalname, units):\n with h5py.File(filename, 'a') as f:\n f['globals'][groupname]['units'].attrs[globalname] = units\n\n\ndef get_expansion(filename, groupname, globalname):\n with h5py.File(filename, 'r') as f:\n value = f['globals'][groupname]['expansion'].attrs[globalname]\n # Replace numpy strings with python unicode strings.\n # DEPRECATED, for backward compat with old files\n value = _ensure_str(value)\n return value\n\n\ndef set_expansion(filename, groupname, globalname, expansion):\n with h5py.File(filename, 'a') as f:\n f['globals'][groupname]['expansion'].attrs[globalname] = expansion\n\n\ndef delete_global(filename, groupname, globalname):\n with h5py.File(filename, 'a') as f:\n group = f['globals'][groupname]\n del group.attrs[globalname]\n\n\ndef guess_expansion_type(value):\n if isinstance(value, np.ndarray) or isinstance(value, list):\n return u'outer'\n else:\n return u''\n\n\ndef iterator_to_tuple(iterator, max_length=1000000):\n # We want to prevent infinite length tuples, but we cannot know\n # whether they are infinite or not in advance. So we'll convert to\n # a tuple only if the length is less than max_length:\n temp_list = []\n for i, element in enumerate(iterator):\n temp_list.append(element)\n if i == max_length:\n raise ValueError('This iterator is very long, possibly infinite. ' +\n 'Runmanager cannot create an infinite number of shots. ' +\n 'If you really want an iterator longer than %d, ' % max_length +\n 'please modify runmanager.iterator_to_tuple and increase max_length.')\n return tuple(temp_list)\n\n\ndef get_all_groups(h5_files):\n \"\"\"returns a dictionary of group_name: h5_path pairs from a list of h5_files.\"\"\"\n if isinstance(h5_files, bytes) or isinstance(h5_files, str):\n h5_files = [h5_files]\n groups = {}\n for path in h5_files:\n for group_name in get_grouplist(path):\n if group_name in groups:\n raise ValueError('Error: group %s is defined in both %s and %s. ' % (group_name, groups[group_name], path) +\n 'Only uniquely named groups can be used together '\n 'to make a run file.')\n groups[group_name] = path\n return groups\n\n\ndef get_globals(groups):\n \"\"\"Takes a dictionary of group_name: h5_file pairs and pulls the\n globals out of the groups in their files. The globals are strings\n storing python expressions at this point. All these globals are\n packed into a new dictionary, keyed by group_name, where the values\n are dictionaries which look like {global_name: (expression, units, expansion), ...}\"\"\"\n # get a list of filepaths:\n filepaths = set(groups.values())\n sequence_globals = {}\n for filepath in filepaths:\n groups_from_this_file = [g for g, f in groups.items() if f == filepath]\n with h5py.File(filepath, 'r') as f:\n for group_name in groups_from_this_file:\n sequence_globals[group_name] = {}\n globals_group = f['globals'][group_name]\n values = dict(globals_group.attrs)\n units = dict(globals_group['units'].attrs)\n expansions = dict(globals_group['expansion'].attrs)\n for global_name, value in values.items():\n unit = units[global_name]\n expansion = expansions[global_name]\n # Replace numpy strings with python unicode strings.\n # DEPRECATED, for backward compat with old files\n value = _ensure_str(value)\n unit = _ensure_str(unit)\n expansion = _ensure_str(expansion)\n sequence_globals[group_name][global_name] = value, unit, expansion\n return sequence_globals\n\n\ndef evaluate_globals(sequence_globals, raise_exceptions=True):\n \"\"\"Takes a dictionary of globals as returned by get_globals. These\n globals are unevaluated strings. Evaluates them all in the same\n namespace so that the expressions can refer to each other. Iterates\n to allow for NameErrors to be resolved by subsequently defined\n globals. Throws an exception if this does not result in all errors\n going away. The exception contains the messages of all exceptions\n which failed to be resolved. If raise_exceptions is False, any\n evaluations resulting in an exception will instead return the\n exception object in the results dictionary\"\"\"\n\n # Flatten all the groups into one dictionary of {global_name:\n # expression} pairs. Also create the group structure of the results\n # dict, which has the same structure as sequence_globals:\n all_globals = {}\n results = {}\n expansions = {}\n global_hierarchy = {}\n # Pre-fill the results dictionary with groups, this is needed for\n # storing exceptions in the case of globals with the same name being\n # defined in multiple groups (all of them get the exception):\n for group_name in sequence_globals:\n results[group_name] = {}\n multiply_defined_globals = set()\n for group_name in sequence_globals:\n for global_name in sequence_globals[group_name]:\n if global_name in all_globals:\n # The same global is defined twice. Either raise an\n # exception, or store the exception for each place it is\n # defined, depending on whether raise_exceptions is True:\n groups_with_same_global = []\n for other_group_name in sequence_globals:\n if global_name in sequence_globals[other_group_name]:\n groups_with_same_global.append(other_group_name)\n exception = ValueError('Global named \\'%s\\' is defined in multiple active groups:\\n ' % global_name +\n '\\n '.join(groups_with_same_global))\n if raise_exceptions:\n raise exception\n for other_group_name in groups_with_same_global:\n results[other_group_name][global_name] = exception\n multiply_defined_globals.add(global_name)\n all_globals[global_name], units, expansion = sequence_globals[group_name][global_name]\n expansions[global_name] = expansion\n\n # Do not attempt to evaluate globals which are multiply defined:\n for global_name in multiply_defined_globals:\n del all_globals[global_name]\n\n # Eval the expressions in the same namespace as each other:\n evaled_globals = {}\n # we use a \"TraceDictionary\" to track which globals another global depends on\n sandbox = TraceDictionary()\n exec('from pylab import *', sandbox, sandbox)\n exec('from runmanager.functions import *', sandbox, sandbox)\n globals_to_eval = all_globals.copy()\n previous_errors = -1\n while globals_to_eval:\n errors = []\n for global_name, expression in globals_to_eval.copy().items():\n # start the trace to determine which globals this global depends on\n sandbox.start_trace()\n try:\n code = compile(expression, '<string>', 'eval')\n value = eval(code, sandbox)\n # Need to know the length of any generators, convert to tuple:\n if isinstance(value, types.GeneratorType):\n value = iterator_to_tuple(value)\n # Make sure if we're zipping or outer-producting this value, that it can\n # be iterated over:\n if expansions[global_name] == 'outer':\n try:\n iter(value)\n except Exception as e:\n raise ExpansionError(str(e))\n except Exception as e:\n # Don't raise, just append the error to a list, we'll display them all later.\n errors.append((global_name, e))\n sandbox.stop_trace()\n continue\n # Put the global into the namespace so other globals can use it:\n sandbox[global_name] = value\n del globals_to_eval[global_name]\n evaled_globals[global_name] = value\n\n # get the results from the global trace\n trace_data = sandbox.stop_trace()\n # Only store names of globals (not other functions)\n for key in list(trace_data): # copy the list before iterating over it\n if key not in all_globals:\n trace_data.remove(key)\n if trace_data:\n global_hierarchy[global_name] = trace_data\n\n if len(errors) == previous_errors:\n # Since some globals may refer to others, we expect maybe\n # some NameErrors to have occured. There should be fewer\n # NameErrors each iteration of this while loop, as globals\n # that are required become defined. If there are not fewer\n # errors, then there is something else wrong and we should\n # raise it.\n if raise_exceptions:\n message = 'Error parsing globals:\\n'\n for global_name, exception in errors:\n message += '%s: %s: %s\\n' % (global_name, exception.__class__.__name__, str(exception))\n raise Exception(message)\n else:\n for global_name, exception in errors:\n evaled_globals[global_name] = exception\n break\n previous_errors = len(errors)\n\n # Assemble results into a dictionary of the same format as sequence_globals:\n for group_name in sequence_globals:\n for global_name in sequence_globals[group_name]:\n # Do not attempt to override exception objects already stored\n # as the result of multiply defined globals:\n if not global_name in results[group_name]:\n results[group_name][global_name] = evaled_globals[global_name]\n\n return results, global_hierarchy, expansions\n\n\ndef expand_globals(sequence_globals, evaled_globals, expansion_config = None, return_dimensions = False):\n \"\"\"Expands iterable globals according to their expansion\n settings. Creates a number of 'axes' which are to be outer product'ed\n together. Some of these axes have only one element, these are globals\n that do not vary. Some have a set of globals being zipped together,\n iterating in lock-step. Others contain a single global varying\n across its values (the globals set to 'outer' expansion). Returns\n a list of shots, each element of which is a dictionary for that\n shot's globals.\"\"\"\n\n if expansion_config is None:\n order = {}\n shuffle = {}\n else:\n order = {k:v['order'] for k,v in expansion_config.items() if 'order' in v}\n shuffle = {k:v['shuffle'] for k,v in expansion_config.items() if 'shuffle' in v}\n\n values = {}\n expansions = {}\n for group_name in sequence_globals:\n for global_name in sequence_globals[group_name]:\n expression, units, expansion = sequence_globals[group_name][global_name]\n value = evaled_globals[group_name][global_name]\n values[global_name] = value\n expansions[global_name] = expansion\n\n # Get a list of the zip keys in use:\n zip_keys = set(expansions.values())\n try:\n zip_keys.remove('outer')\n except KeyError:\n pass\n\n axes = {}\n global_names = {}\n dimensions = {}\n for zip_key in zip_keys:\n axis = []\n zip_global_names = []\n for global_name in expansions:\n if expansions[global_name] == zip_key:\n value = values[global_name]\n if isinstance(value, Exception):\n continue\n if not zip_key:\n # Wrap up non-iterating globals (with zip_key = '') in a\n # one-element list. When zipped and then outer product'ed,\n # this will give us the result we want:\n value = [value]\n axis.append(value)\n zip_global_names.append(global_name)\n axis = list(zip(*axis))\n dimensions['zip '+zip_key] = len(axis)\n axes['zip '+zip_key] = axis\n global_names['zip '+zip_key] = zip_global_names\n\n # Give each global being outer-product'ed its own axis. It gets\n # wrapped up in a list and zipped with itself so that it is in the\n # same format as the zipped globals, ready for outer-producting\n # together:\n for global_name in expansions:\n if expansions[global_name] == 'outer':\n value = values[global_name]\n if isinstance(value, Exception):\n continue\n axis = [value]\n axis = list(zip(*axis))\n dimensions['outer '+global_name] = len(axis)\n axes['outer '+global_name] = axis\n global_names['outer '+global_name] = [global_name]\n\n # add any missing items to order and dimensions\n for key, value in axes.items():\n if key not in order:\n order[key] = -1\n if key not in shuffle:\n shuffle[key] = False\n if key not in dimensions:\n dimensions[key] = 1\n\n # shuffle relevant axes\n for axis_name, axis_values in axes.items():\n if shuffle[axis_name]:\n random.shuffle(axis_values)\n\n # sort axes and global names by order\n axes = [axes.get(key) for key in sorted(order, key=order.get)]\n global_names = [global_names.get(key) for key in sorted(order, key=order.get)]\n\n # flatten the global names\n global_names = [global_name for global_list in global_names for global_name in global_list]\n\n\n shots = []\n for axis_values in itertools.product(*axes):\n # values here is a tuple of tuples, with the outer list being over\n # the axes. We need to flatten it to get our individual values out\n # for each global, since we no longer care what axis they are on:\n global_values = [value for axis in axis_values for value in axis]\n shot_globals = dict(zip(global_names, global_values))\n shots.append(shot_globals)\n\n if return_dimensions:\n return shots, dimensions\n else:\n return shots\n\ndef next_sequence_index(shot_basedir, dt, increment=True):\n \"\"\"Return the next sequence index for sequences in the given base directory (i.e.\n <experiment_shot_storage>/<script_basename>) and the date of the given datetime\n object, and increment the sequence index atomically on disk if increment=True. If\n not setting increment=True, then the result is indicative only and may be used by\n other code at any time. One must increment the sequence index prior to use.\"\"\"\n from labscript_utils.ls_zprocess import Lock\n from labscript_utils.shared_drive import path_to_agnostic\n\n DATE_FORMAT = '%Y-%m-%d'\n # The file where we store the next sequence index on disk:\n sequence_index_file = os.path.join(shot_basedir, '.next_sequence_index')\n # Open with zlock to prevent race conditions with other code:\n with Lock(path_to_agnostic(sequence_index_file), read_only=not increment):\n try:\n with open(sequence_index_file) as f:\n datestr, sequence_index = json.load(f)\n if datestr != dt.strftime(DATE_FORMAT):\n # New day, start from zero again:\n sequence_index = 0\n except (OSError, IOError) as exc:\n if exc.errno != errno.ENOENT:\n raise\n # File doesn't exist yet, start from zero\n sequence_index = 0\n if increment:\n # Write the new file with the incremented sequence index\n os.makedirs(os.path.dirname(sequence_index_file), exist_ok=True)\n with open(sequence_index_file, 'w') as f:\n json.dump([dt.strftime(DATE_FORMAT), sequence_index + 1], f)\n return sequence_index\n\n\ndef new_sequence_details(script_path, config=None, increment_sequence_index=True):\n \"\"\"Generate the details for a new sequence: the toplevel attrs sequence_date,\n sequence_index, sequence_id; and the the output directory and filename prefix for\n the shot files, according to labconfig settings. If increment_sequence_index=True,\n then we are claiming the resulting sequence index for use such that it cannot be\n used by anyone else. This should be done if the sequence details are immediately\n about to be used to compile a sequence. Otherwise, set increment_sequence_index to\n False, but in that case the results are indicative only and one should call this\n function again with increment_sequence_index=True before compiling the sequence, as\n otherwise the sequence_index may be used by other code in the meantime.\"\"\"\n if config is None:\n config = LabConfig()\n script_basename = os.path.splitext(os.path.basename(script_path))[0]\n shot_storage = config.get('DEFAULT', 'experiment_shot_storage')\n shot_name_last = config.get('runmanager', 'script_name_last')\n if not shot_name_last:\n shot_basedir = os.path.join(shot_storage, script_basename)\n else:\n shot_basedir = shot_storage\n now = datetime.datetime.now()\n sequence_timestamp = now.strftime('%Y%m%dT%H%M%S')\n\n # Toplevel attributes to be saved to the shot files:\n sequence_date = now.strftime('%Y-%m-%d')\n sequence_id = sequence_timestamp + '_' + script_basename\n sequence_index = next_sequence_index(shot_basedir, now, increment_sequence_index)\n\n sequence_attrs = {\n 'script_basename': script_basename,\n 'sequence_date': sequence_date,\n 'sequence_index': sequence_index,\n 'sequence_id': sequence_id,\n }\n\n # Compute the output directory based on labconfig settings:\n try:\n subdir_format = config.get('runmanager', 'output_folder_format')\n except (LabConfig.NoOptionError, LabConfig.NoSectionError):\n subdir_format = os.path.join('%Y', '%m', '%d', '{sequence_index:05d}')\n\n # Format the output directory according to the current timestamp, sequence index and\n # sequence_timestamp, if present in the format string:\n subdir = now.strftime(subdir_format).format(\n sequence_index=sequence_index, sequence_timestamp=sequence_timestamp\n )\n shot_output_dir = os.path.join(shot_basedir, subdir)\n if shot_name_last:\n shot_output_dir = os.path.join(shot_output_dir, script_basename)\n # Compute the shot filename prefix according to labconfig settings:\n try:\n filename_prefix_format = config.get('runmanager', 'filename_prefix_format')\n except (LabConfig.NoOptionError, LabConfig.NoSectionError):\n # Default, for backward compatibility:\n filename_prefix_format = '{sequence_timestamp}_{script_basename}'\n # Format the filename prefix according to the current timestamp, sequence index,\n # sequence_timestamp, and script_basename, if present in the format string:\n filename_prefix = now.strftime(filename_prefix_format).format(\n sequence_index=sequence_index,\n sequence_timestamp=sequence_timestamp,\n script_basename=script_basename,\n )\n\n return sequence_attrs, shot_output_dir, filename_prefix\n\n\ndef make_run_files(\n output_folder,\n sequence_globals,\n shots,\n sequence_attrs,\n filename_prefix,\n shuffle=False,\n):\n \"\"\"Does what it says. sequence_globals and shots are of the datatypes returned by\n get_globals and get_shots, one is a nested dictionary with string values, and the\n other a flat dictionary. sequence_attrs is a dict of the attributes pertaining to\n this sequence to be initially set at the top-level group of the h5 file, as returned\n by new_sequence_details. output_folder and filename_prefix determine the directory\n shot files will be output to, as well as their filenames (this function will\n generate filenames with the shot number and .h5 extension appended to\n filename_prefix). Sensible defaults for these are also returned by\n new_sequence_details(), so preferably these should be used.\n\n Shuffle will randomise the order that the run files are generated in with respect to\n which element of shots they come from. This function returns a *generator*. The run\n files are not actually created until you loop over this generator (which gives you\n the filepaths). This is useful for not having to clean up as many unused files in\n the event of failed compilation of labscripts. If you want all the run files to be\n created at some point, simply convert the returned generator to a list. The\n filenames the run files are given is simply the sequence_id with increasing integers\n appended.\"\"\"\n basename = os.path.join(output_folder, filename_prefix)\n nruns = len(shots)\n ndigits = int(np.ceil(np.log10(nruns)))\n if shuffle:\n random.shuffle(shots)\n for i, shot_globals in enumerate(shots):\n runfilename = ('%s_%0' + str(ndigits) + 'd.h5') % (basename, i)\n make_single_run_file(\n runfilename, sequence_globals, shot_globals, sequence_attrs, i, nruns\n )\n yield runfilename\n\n\ndef make_single_run_file(filename, sequenceglobals, runglobals, sequence_attrs, run_no, n_runs):\n \"\"\"Does what it says. runglobals is a dict of this run's globals, the format being\n the same as that of one element of the list returned by expand_globals.\n sequence_globals is a nested dictionary of the type returned by get_globals.\n sequence_attrs is a dict of attributes pertaining to this sequence, as returned by\n new_sequence_details. run_no and n_runs must be provided, if this run file is part\n of a sequence, then they should reflect how many run files are being generated in\n this sequence, all of which must have identical sequence_attrs.\"\"\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with h5py.File(filename, 'w') as f:\n f.attrs.update(sequence_attrs)\n f.attrs['run number'] = run_no\n f.attrs['n_runs'] = n_runs\n f.create_group('globals')\n if sequenceglobals is not None:\n for groupname, groupvars in sequenceglobals.items():\n group = f['globals'].create_group(groupname)\n unitsgroup = group.create_group('units')\n expansiongroup = group.create_group('expansion')\n for name, (value, units, expansion) in groupvars.items():\n group.attrs[name] = value\n unitsgroup.attrs[name] = units\n expansiongroup.attrs[name] = expansion\n for name, value in runglobals.items():\n if value is None:\n # Store it as a null object reference:\n value = h5py.Reference()\n try:\n f['globals'].attrs[name] = \"<function>\" if callable(value) else value\n except Exception as e:\n message = (f'Global {name} cannot be saved as an hdf5 attribute. ' +\n 'Globals can only have relatively simple datatypes, with no nested structures.' +\n 'If you really want something complicated, remember that lambda expressions are allowed' +\n 'Original error was:\\n' +\n '%s: %s' % (e.__class__.__name__, str(e)))\n raise ValueError(message)\n\n\ndef make_run_file_from_globals_files(labscript_file, globals_files, output_path, config=None):\n \"\"\"Creates a run file output_path, using all the globals from globals_files. Uses\n labscript_file to determine the sequence_attrs only\"\"\"\n groups = get_all_groups(globals_files)\n sequence_globals = get_globals(groups)\n evaled_globals, global_hierarchy, expansions = evaluate_globals(sequence_globals)\n shots = expand_globals(sequence_globals, evaled_globals)\n if len(shots) > 1:\n scanning_globals = []\n for global_name in expansions:\n if expansions[global_name]:\n scanning_globals.append(global_name)\n raise ValueError('Cannot compile to a single run file: The following globals are a sequence: ' +\n ', '.join(scanning_globals))\n\n sequence_attrs, _, _ = new_sequence_details(\n labscript_file, config=config, increment_sequence_index=True\n )\n make_single_run_file(output_path, sequence_globals, shots[0], sequence_attrs, 1, 1)\n\n\ndef compile_labscript(labscript_file, run_file):\n \"\"\"Compiles labscript_file with the run file, returning\n the processes return code, stdout and stderr.\"\"\"\n proc = subprocess.Popen([sys.executable, labscript_file, run_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n return proc.returncode, stdout, stderr\n\n\ndef compile_labscript_with_globals_files(labscript_file, globals_files, output_path):\n \"\"\"Creates a run file output_path, using all the globals from\n globals_files. Compiles labscript_file with the run file, returning\n the processes return code, stdout and stderr.\"\"\"\n make_run_file_from_globals_files(labscript_file, globals_files, output_path)\n returncode, stdout, stderr = compile_labscript(labscript_file, output_path)\n return returncode, stdout, stderr\n\n\ndef compile_labscript_async(labscript_file, run_file, stream_port, done_callback):\n \"\"\"Compiles labscript_file with run_file. This function is designed to be called in\n a thread. The stdout and stderr from the compilation will be shovelled into\n stream_port via zmq push as it spews forth, and when compilation is complete,\n done_callback will be called with a boolean argument indicating success. Note that\n the zmq communication will be encrypted, or not, according to security settings in\n labconfig. If you want to receive the data on a zmq socket, do so using a PULL\n socket created from a labscript_utils.ls_zprocess.Context, or using a\n labscript_utils.ls_zprocess.ZMQServer. These subclasses will also be configured\n with the appropriate security settings and will be able to receive the messages.\n \"\"\"\n compiler_path = os.path.join(os.path.dirname(__file__), 'batch_compiler.py')\n to_child, from_child, child = process_tree.subprocess(\n compiler_path, output_redirection_port=stream_port\n )\n to_child.put(['compile', [labscript_file, run_file]])\n while True:\n signal, data = from_child.get()\n if signal == 'done':\n success = data\n to_child.put(['quit', None])\n child.communicate()\n done_callback(success)\n break\n else:\n raise RuntimeError((signal, data))\n\n\ndef compile_multishot_async(labscript_file, run_files, stream_port, done_callback):\n \"\"\"Compiles labscript_file with run_files. This function is designed to be called in\n a thread. The stdout and stderr from the compilation will be shovelled into\n stream_port via zmq push as it spews forth, and when each compilation is complete,\n done_callback will be called with a boolean argument indicating success. Compilation\n will stop after the first failure. If you want to receive the data on a zmq socket,\n do so using a PULL socket created from a labscript_utils.ls_zprocess.Context, or\n using a labscript_utils.ls_zprocess.ZMQServer. These subclasses will also be\n configured with the appropriate security settings and will be able to receive the\n messages.\"\"\"\n compiler_path = os.path.join(os.path.dirname(__file__), 'batch_compiler.py')\n to_child, from_child, child = process_tree.subprocess(\n compiler_path, output_redirection_port=stream_port\n )\n try:\n for run_file in run_files:\n to_child.put(['compile', [labscript_file, run_file]])\n while True:\n signal, data = from_child.get()\n if signal == 'done':\n success = data\n done_callback(data)\n break\n if not success:\n break\n except Exception:\n error = traceback.format_exc()\n zmq_push_multipart(stream_port, data=[b'stderr', error.encode('utf-8')])\n to_child.put(['quit', None])\n child.communicate()\n raise\n to_child.put(['quit', None])\n child.communicate()\n\n\ndef compile_labscript_with_globals_files_async(labscript_file, globals_files, output_path, stream_port, done_callback):\n \"\"\"Same as compile_labscript_with_globals_files, except it launches a thread to do\n the work and does not return anything. Instead, stderr and stdout will be put to\n stream_port via zmq push in the multipart message format ['stdout','hello, world\\n']\n etc. When compilation is finished, the function done_callback will be called a\n boolean argument indicating success or failure. If you want to receive the data on\n a zmq socket, do so using a PULL socket created from a\n labscript_utils.ls_zprocess.Context, or using a\n labscript_utils.ls_zprocess.ZMQServer. These subclasses will also be configured with\n the appropriate security settings and will be able to receive the messages.\"\"\"\n try:\n make_run_file_from_globals_files(labscript_file, globals_files, output_path)\n thread = threading.Thread(\n target=compile_labscript_async, args=[labscript_file, output_path, stream_port, done_callback])\n thread.daemon = True\n thread.start()\n except Exception:\n error = traceback.format_exc()\n zmq_push_multipart(stream_port, data=[b'stderr', error.encode('utf-8')])\n t = threading.Thread(target=done_callback, args=(False,))\n t.daemon = True\n t.start()\n\n\ndef get_shot_globals(filepath):\n \"\"\"Returns the evaluated globals for a shot, for use by labscript or lyse.\n Simple dictionary access as in dict(h5py.File(filepath).attrs) would be fine\n except we want to apply some hacks, so it's best to do that in one place.\"\"\"\n params = {}\n with h5py.File(filepath, 'r') as f:\n for name, value in f['globals'].attrs.items():\n # Convert numpy bools to normal bools:\n if isinstance(value, np.bool_):\n value = bool(value)\n # Convert null HDF references to None:\n if isinstance(value, h5py.Reference) and not value:\n value = None\n # Convert numpy strings to Python ones.\n # DEPRECATED, for backward compat with old files.\n if isinstance(value, np.str_):\n value = str(value)\n if isinstance(value, bytes):\n value = value.decode()\n params[name] = value\n return params\n\n\ndef dict_diff(dict1, dict2):\n \"\"\"Return the difference between two dictionaries as a dictionary of key: [val1, val2] pairs.\n Keys unique to either dictionary are included as key: [val1, '-'] or key: ['-', val2].\"\"\"\n diff_keys = []\n common_keys = np.intersect1d(list(dict1.keys()), list(dict2.keys()))\n for key in common_keys:\n if np.iterable(dict1[key]) or np.iterable(dict2[key]):\n if not np.array_equal(dict1[key], dict2[key]):\n diff_keys.append(key)\n else:\n if dict1[key] != dict2[key]:\n diff_keys.append(key)\n\n dict1_unique = [key for key in dict1.keys() if key not in common_keys]\n dict2_unique = [key for key in dict2.keys() if key not in common_keys]\n\n diff = {}\n for key in diff_keys:\n diff[key] = [dict1[key], dict2[key]]\n\n for key in dict1_unique:\n diff[key] = [dict1[key], '-']\n\n for key in dict2_unique:\n diff[key] = ['-', dict2[key]]\n\n return diff\n\n\ndef find_comments(src):\n \"\"\"Return a list of start and end indices for where comments are in given Python\n source. Comments on separate lines with only whitespace in between them are\n coalesced. Whitespace preceding a comment is counted as part of the comment.\"\"\"\n line_start = 0\n comments = []\n tokens = tokenize.generate_tokens(io.StringIO(src).readline)\n try:\n for token_type, token_value, (_, start), (_, end), _ in tokens:\n if token_type == tokenize.COMMENT:\n comments.append((line_start + start, line_start + end))\n if token_value == '\\n':\n line_start += end\n except tokenize.TokenError:\n pass\n # coalesce comments with only whitespace between them:\n to_merge = []\n for i, ((start1, end1), (start2, end2)) in enumerate(zip(comments, comments[1:])):\n if not src[end1:start2].strip():\n to_merge.append(i)\n # Reverse order so deletion doesn't change indices:\n for i in reversed(to_merge):\n start1, end1 = comments[i]\n start2, end2 = comments[i + 1]\n comments[i] = (start1, end2)\n del comments[i + 1]\n # Extend each comment block to the left to include whitespace:\n for i, (start, end) in enumerate(comments):\n n_whitespace_chars = len(src[:start]) - len(src[:start].rstrip())\n comments[i] = start - n_whitespace_chars, end\n # Extend the final comment to the right to include whitespace:\n if comments:\n start, end = comments[-1]\n n_whitespace_chars = len(src[end:]) - len(src[end:].rstrip())\n comments[-1] = (start, end + n_whitespace_chars)\n return comments\n\n\ndef remove_comments_and_tokenify(src):\n \"\"\"Removes comments from source code, leaving it otherwise intact,\n and returns it. Also returns the raw tokens for the code, allowing\n comparisons between source to be made without being sensitive to\n whitespace.\"\"\"\n # Remove comments\n for (start, end) in reversed(find_comments(src)):\n src = src[:start] + src[end:]\n # Tokenify:\n tokens = []\n tokens_iter = tokenize.generate_tokens(io.StringIO(src).readline)\n try:\n for _, token_value, _, _, _ in tokens_iter:\n if token_value:\n tokens.append(token_value)\n except tokenize.TokenError:\n pass\n return src, tokens\n\n\ndef flatten_globals(sequence_globals, evaluated=False):\n \"\"\"Flattens the data structure of the globals. If evaluated=False,\n saves only the value expression string of the global, not the\n units or expansion.\"\"\"\n flattened_sequence_globals = {}\n for globals_group in sequence_globals.values():\n for name, value in globals_group.items():\n if evaluated:\n flattened_sequence_globals[name] = value\n else:\n value_expression, units, expansion = value\n flattened_sequence_globals[name] = value_expression\n return flattened_sequence_globals\n\n\ndef globals_diff_groups(active_groups, other_groups, max_cols=1000, return_string=True):\n \"\"\"Given two sets of globals groups, perform a diff of the raw\n and evaluated globals.\"\"\"\n our_sequence_globals = get_globals(active_groups)\n other_sequence_globals = get_globals(other_groups)\n\n # evaluate globals\n our_evaluated_sequence_globals, _, _ = evaluate_globals(our_sequence_globals, raise_exceptions=False)\n other_evaluated_sequence_globals, _, _ = evaluate_globals(other_sequence_globals, raise_exceptions=False)\n\n # flatten globals dictionaries\n our_globals = flatten_globals(our_sequence_globals, evaluated=False)\n other_globals = flatten_globals(other_sequence_globals, evaluated=False)\n our_evaluated_globals = flatten_globals(our_evaluated_sequence_globals, evaluated=True)\n other_evaluated_globals = flatten_globals(other_evaluated_sequence_globals, evaluated=True)\n\n # diff the *evaluated* globals\n value_differences = dict_diff(other_evaluated_globals, our_evaluated_globals)\n\n # We are interested only in displaying globals where *both* the\n # evaluated global *and* its unevaluated expression (ignoring comments\n # and whitespace) differ. This will minimise false positives where a\n # slight change in an expression still leads to the same value, or\n # where an object has a poorly defined equality operator that returns\n # False even when the two objects are identical.\n filtered_differences = {}\n for name, (other_value, our_value) in value_differences.items():\n our_expression = our_globals.get(name, '-')\n other_expression = other_globals.get(name, '-')\n # Strip comments, get tokens so we can diff without being sensitive to comments or whitespace:\n our_expression, our_tokens = remove_comments_and_tokenify(our_expression)\n other_expression, other_tokens = remove_comments_and_tokenify(other_expression)\n if our_tokens != other_tokens:\n filtered_differences[name] = [repr(other_value), repr(our_value), other_expression, our_expression]\n if filtered_differences:\n import pandas as pd\n df = pd.DataFrame.from_dict(filtered_differences, 'index')\n df = df.sort_index()\n df.columns = ['Prev (Eval)', 'Current (Eval)', 'Prev (Raw)', 'Current (Raw)']\n df_string = df.to_string(max_cols=max_cols)\n payload = df_string + '\\n\\n'\n else:\n payload = 'Evaluated globals are identical to those of selected file.\\n'\n if return_string:\n return payload\n else:\n print(payload)\n return df\n\n\ndef globals_diff_shots(file1, file2, max_cols=100):\n # Get file's globals groups\n active_groups = get_all_groups(file1)\n\n # Get other file's globals groups\n other_groups = get_all_groups(file2)\n\n print('Globals diff between:\\n%s\\n%s\\n\\n' % (file1, file2))\n return globals_diff_groups(active_groups, other_groups, max_cols=max_cols, return_string=False)\n" ]
[ [ "numpy.iterable", "numpy.log10", "numpy.array_equal", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
bartek-bartlomiej/qiskit-terra
[ "247f44ef87b08302514e512e4ed36601e95f33cd" ]
[ "qiskit/opflow/primitive_ops/pauli_op.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2020.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"PauliOp Class \"\"\"\n\nfrom typing import Dict, List, Optional, Set, Union, cast\n\nimport numpy as np\nfrom scipy.sparse import spmatrix\n\nfrom qiskit import QuantumCircuit\nfrom qiskit.circuit import Instruction, ParameterExpression\nfrom qiskit.circuit.library import IGate, RXGate, RYGate, RZGate, XGate, YGate, ZGate\nfrom qiskit.opflow.exceptions import OpflowError\nfrom qiskit.opflow.list_ops.summed_op import SummedOp\nfrom qiskit.opflow.list_ops.tensored_op import TensoredOp\nfrom qiskit.opflow.operator_base import OperatorBase\nfrom qiskit.opflow.primitive_ops.primitive_op import PrimitiveOp\nfrom qiskit.quantum_info import Pauli, SparsePauliOp, Statevector\n\nPAULI_GATE_MAPPING = {\"X\": XGate(), \"Y\": YGate(), \"Z\": ZGate(), \"I\": IGate()}\n\n\nclass PauliOp(PrimitiveOp):\n \"\"\"Class for Operators backed by Terra's ``Pauli`` module.\"\"\"\n\n primitive: Pauli\n\n def __init__(self, primitive: Pauli, coeff: Union[complex, ParameterExpression] = 1.0) -> None:\n \"\"\"\n Args:\n primitive: The Pauli which defines the behavior of the underlying function.\n coeff: A coefficient multiplying the primitive.\n\n Raises:\n TypeError: invalid parameters.\n \"\"\"\n if not isinstance(primitive, Pauli):\n raise TypeError(f\"PauliOp can only be instantiated with Paulis, not {type(primitive)}\")\n super().__init__(primitive, coeff=coeff)\n\n def primitive_strings(self) -> Set[str]:\n return {\"Pauli\"}\n\n @property\n def num_qubits(self) -> int:\n return len(self.primitive)\n\n def add(self, other: OperatorBase) -> OperatorBase:\n if not self.num_qubits == other.num_qubits:\n raise ValueError(\n \"Sum over operators with different numbers of qubits, {} and {}, is not well \"\n \"defined\".format(self.num_qubits, other.num_qubits)\n )\n\n if isinstance(other, PauliOp) and self.primitive == other.primitive:\n return PauliOp(self.primitive, coeff=self.coeff + other.coeff)\n\n # pylint: disable=cyclic-import\n from .pauli_sum_op import PauliSumOp\n\n if (\n isinstance(other, PauliOp)\n and isinstance(self.coeff, (int, float, complex))\n and isinstance(other.coeff, (int, float, complex))\n ):\n return PauliSumOp(\n SparsePauliOp(self.primitive, coeffs=[self.coeff])\n + SparsePauliOp(other.primitive, coeffs=[other.coeff])\n )\n\n if isinstance(other, PauliSumOp) and isinstance(self.coeff, (int, float, complex)):\n return PauliSumOp(SparsePauliOp(self.primitive, coeffs=[self.coeff])) + other\n\n return SummedOp([self, other])\n\n def adjoint(self) -> \"PauliOp\":\n return PauliOp(self.primitive, coeff=self.coeff.conjugate())\n\n def equals(self, other: OperatorBase) -> bool:\n if not isinstance(other, PauliOp) or not self.coeff == other.coeff:\n return False\n\n return self.primitive == other.primitive\n\n def _expand_dim(self, num_qubits: int) -> \"PauliOp\":\n return PauliOp(Pauli(\"I\" * num_qubits).expand(self.primitive), coeff=self.coeff)\n\n def tensor(self, other: OperatorBase) -> OperatorBase:\n # Both Paulis\n if isinstance(other, PauliOp):\n return PauliOp(self.primitive.tensor(other.primitive), coeff=self.coeff * other.coeff)\n\n # pylint: disable=cyclic-import\n from .pauli_sum_op import PauliSumOp\n\n if isinstance(other, PauliSumOp):\n new_primitive = SparsePauliOp(self.primitive).tensor(other.primitive)\n return PauliSumOp(new_primitive, coeff=self.coeff * other.coeff)\n\n from .circuit_op import CircuitOp\n\n if isinstance(other, CircuitOp):\n return self.to_circuit_op().tensor(other)\n\n return TensoredOp([self, other])\n\n def permute(self, permutation: List[int]) -> \"PauliOp\":\n \"\"\"Permutes the sequence of Pauli matrices.\n\n Args:\n permutation: A list defining where each Pauli should be permuted. The Pauli at index\n j of the primitive should be permuted to position permutation[j].\n\n Returns:\n A new PauliOp representing the permuted operator. For operator (X ^ Y ^ Z) and\n indices=[1,2,4], it returns (X ^ I ^ Y ^ Z ^ I).\n\n Raises:\n OpflowError: if indices do not define a new index for each qubit.\n \"\"\"\n pauli_string = self.primitive.__str__()\n length = max(permutation) + 1 # size of list must be +1 larger then its max index\n new_pauli_list = [\"I\"] * length\n if len(permutation) != self.num_qubits:\n raise OpflowError(\n \"List of indices to permute must \" \"have the same size as Pauli Operator\"\n )\n for i, index in enumerate(permutation):\n new_pauli_list[-index - 1] = pauli_string[-i - 1]\n return PauliOp(Pauli(\"\".join(new_pauli_list)), self.coeff)\n\n def compose(\n self, other: OperatorBase, permutation: Optional[List[int]] = None, front: bool = False\n ) -> OperatorBase:\n\n new_self, other = self._expand_shorter_operator_and_permute(other, permutation)\n new_self = cast(PauliOp, new_self)\n\n if front:\n return other.compose(new_self)\n # If self is identity, just return other.\n if not any(new_self.primitive.x + new_self.primitive.z):\n return other * new_self.coeff\n\n # Both Paulis\n if isinstance(other, PauliOp):\n product = new_self.primitive.dot(other.primitive)\n return PrimitiveOp(product, coeff=new_self.coeff * other.coeff)\n\n # pylint: disable=cyclic-import\n from .pauli_sum_op import PauliSumOp\n\n if isinstance(other, PauliSumOp):\n return PauliSumOp(\n SparsePauliOp(new_self.primitive).dot(other.primitive),\n coeff=new_self.coeff * other.coeff,\n )\n\n # pylint: disable=cyclic-import\n from .circuit_op import CircuitOp\n from ..state_fns.circuit_state_fn import CircuitStateFn\n\n if isinstance(other, (CircuitOp, CircuitStateFn)):\n return new_self.to_circuit_op().compose(other)\n\n return super(PauliOp, new_self).compose(other)\n\n def to_matrix(self, massive: bool = False) -> np.ndarray:\n OperatorBase._check_massive(\"to_matrix\", True, self.num_qubits, massive)\n return self.primitive.to_matrix() * self.coeff\n\n def to_spmatrix(self) -> spmatrix:\n \"\"\"Returns SciPy sparse matrix representation of the Operator.\n\n Returns:\n CSR sparse matrix representation of the Operator.\n\n Raises:\n ValueError: invalid parameters.\n \"\"\"\n return self.primitive.to_matrix(sparse=True) * self.coeff\n\n def __str__(self) -> str:\n prim_str = str(self.primitive)\n if self.coeff == 1.0:\n return prim_str\n else:\n return f\"{self.coeff} * {prim_str}\"\n\n def eval(\n self,\n front: Optional[\n Union[str, Dict[str, complex], np.ndarray, OperatorBase, Statevector]\n ] = None,\n ) -> Union[OperatorBase, complex]:\n if front is None:\n return self.to_matrix_op()\n\n # pylint: disable=cyclic-import\n from ..state_fns.state_fn import StateFn\n from ..state_fns.dict_state_fn import DictStateFn\n from ..state_fns.circuit_state_fn import CircuitStateFn\n from ..list_ops.list_op import ListOp\n from .circuit_op import CircuitOp\n\n new_front = None\n\n # For now, always do this. If it's not performant, we can be more granular.\n if not isinstance(front, OperatorBase):\n front = StateFn(front, is_measurement=False)\n\n if isinstance(front, ListOp) and front.distributive:\n new_front = front.combo_fn(\n [self.eval(front.coeff * front_elem) for front_elem in front.oplist]\n )\n\n else:\n\n if self.num_qubits != front.num_qubits:\n raise ValueError(\n \"eval does not support operands with differing numbers of qubits, \"\n \"{} and {}, respectively.\".format(self.num_qubits, front.num_qubits)\n )\n\n if isinstance(front, DictStateFn):\n\n new_dict: Dict[str, complex] = {}\n corrected_x_bits = self.primitive.x[::-1]\n corrected_z_bits = self.primitive.z[::-1]\n\n for bstr, v in front.primitive.items():\n bitstr = np.fromiter(bstr, dtype=int).astype(bool)\n new_b_str = np.logical_xor(bitstr, corrected_x_bits)\n new_str = \"\".join(map(str, 1 * new_b_str))\n z_factor = np.product(1 - 2 * np.logical_and(bitstr, corrected_z_bits))\n y_factor = np.product(\n np.sqrt(1 - 2 * np.logical_and(corrected_x_bits, corrected_z_bits) + 0j)\n )\n new_dict[new_str] = (v * z_factor * y_factor) + new_dict.get(new_str, 0)\n # The coefficient consists of:\n # 1. the coefficient of *this* PauliOp (self)\n # 2. the coefficient of the evaluated DictStateFn (front)\n # 3. AND acquires the phase of the internal primitive. This is necessary to\n # ensure that (X @ Z) and (-iY) return the same result.\n new_front = StateFn(\n new_dict, coeff=self.coeff * front.coeff * (-1j) ** self.primitive.phase\n )\n\n elif isinstance(front, StateFn) and front.is_measurement:\n raise ValueError(\"Operator composed with a measurement is undefined.\")\n\n # Composable types with PauliOp\n elif isinstance(front, (PauliOp, CircuitOp, CircuitStateFn)):\n new_front = self.compose(front)\n\n # Covers VectorStateFn and OperatorStateFn\n elif isinstance(front, StateFn):\n new_front = self.to_matrix_op().eval(front.to_matrix_op())\n\n return new_front\n\n def exp_i(self) -> OperatorBase:\n \"\"\"Return a ``CircuitOp`` equivalent to e^-iH for this operator H.\"\"\"\n # if only one qubit is significant, we can perform the evolution\n corrected_x = self.primitive.x[::-1]\n corrected_z = self.primitive.z[::-1]\n sig_qubits = np.logical_or(corrected_x, corrected_z)\n if np.sum(sig_qubits) == 0:\n # e^I is just a global phase, but we can keep track of it! Should we?\n # For now, just return identity\n return PauliOp(self.primitive)\n if np.sum(sig_qubits) == 1:\n sig_qubit_index = sig_qubits.tolist().index(True)\n coeff = (\n np.real(self.coeff)\n if not isinstance(self.coeff, ParameterExpression)\n else self.coeff\n )\n\n from .circuit_op import CircuitOp\n\n # Y rotation\n if corrected_x[sig_qubit_index] and corrected_z[sig_qubit_index]:\n rot_op = CircuitOp(RYGate(2 * coeff))\n # Z rotation\n elif corrected_z[sig_qubit_index]:\n rot_op = CircuitOp(RZGate(2 * coeff))\n # X rotation\n elif corrected_x[sig_qubit_index]:\n rot_op = CircuitOp(RXGate(2 * coeff))\n\n # pylint: disable=cyclic-import\n from ..operator_globals import I\n\n left_pad = I.tensorpower(sig_qubit_index)\n right_pad = I.tensorpower(self.num_qubits - sig_qubit_index - 1)\n # Need to use overloaded operators here in case left_pad == I^0\n return left_pad ^ rot_op ^ right_pad\n else:\n from ..evolutions.evolved_op import EvolvedOp\n\n return EvolvedOp(self)\n\n def to_circuit(self) -> QuantumCircuit:\n # If Pauli equals identity, don't skip the IGates\n is_identity = sum(self.primitive.x + self.primitive.z) == 0\n\n # Note: Reversing endianness!!\n qc = QuantumCircuit(len(self.primitive))\n for q, pauli_str in enumerate(reversed(self.primitive.to_label())):\n gate = PAULI_GATE_MAPPING[pauli_str]\n if not pauli_str == \"I\" or is_identity:\n qc.append(gate, qargs=[q])\n return qc\n\n def to_instruction(self) -> Instruction:\n # TODO should we just do the following because performance of adding and deleting IGates\n # doesn't matter?\n # (Reduce removes extra IGates).\n # return PrimitiveOp(self.primitive.to_instruction(), coeff=self.coeff).reduce()\n\n return self.to_circuit().to_instruction()\n\n def to_pauli_op(self, massive: bool = False) -> \"PauliOp\":\n return self\n" ]
[ [ "numpy.logical_xor", "numpy.logical_or", "numpy.real", "numpy.fromiter", "numpy.logical_and", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
axcochrane/gpt-2
[ "d51d19e74ffb1b2557e952cad6b7f31b5d99af60" ]
[ "train-horovod.py" ]
[ "#!/usr/bin/env python3\n# Usage:\n# PYTHONPATH=src ./train --dataset <file|directory|glob>\n\nimport fire\nimport json\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport random\nimport time\n\nimport horovod.tensorflow as hvd\n\nimport model, sample, encoder\nfrom load_dataset import load_dataset, Sampler\n\nCHECKPOINT_DIR = 'checkpoint'\nSAMPLE_DIR = 'samples'\n\nhvd.init()\n\ndef maketree(path):\n try:\n os.makedirs(path)\n except:\n pass\n\n\ndef train_main(dataset,\n model_name='117M',\n seed=None,\n batch_size=2,\n sample_length=1023,\n sample_num=1,\n sample_every=4500,\n run_name='run1',\n restore_from='latest',\n save_every=2000,\n combine=50000):\n\n enc = encoder.get_encoder(model_name)\n hparams = model.default_hparams()\n with open(os.path.join('models', model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if sample_length is None:\n sample_length = hparams.n_ctx // 2\n elif sample_length > hparams.n_ctx:\n raise ValueError(\n \"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n # TF config\n\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.visible_device_list = str(hvd.local_rank())\n config.gpu_options.allow_growth = True\n\n with tf.compat.v1.Session(config=config) as sess:\n context = tf.compat.v1.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.set_random_seed(seed)\n output = model.model(hparams=hparams, X=context)\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=context[:, 1:], logits=output['logits'][:, :-1]))\n\n tf_sample = sample.sample_sequence(\n hparams=hparams,\n length=sample_length,\n context=context,\n batch_size=batch_size,\n temperature=0.8,\n top_k=40)\n\n train_vars = [v for v in tf.compat.v1.trainable_variables() if 'model' in v.name]\n\n opt = tf.compat.v1.train.AdamOptimizer()\n opt = hvd.DistributedOptimizer(opt)\n train_op = opt.minimize(loss, var_list=train_vars)\n\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n # This is necessary to ensure consistent initialization of all workers when\n # training is started with random weights or restored from a checkpoint.\n bcast = hvd.broadcast_global_variables(0)\n\n saver = tf.compat.v1.train.Saver(\n var_list=train_vars,\n max_to_keep=5,\n keep_checkpoint_every_n_hours=2)\n\n sess.run(tf.compat.v1.global_variables_initializer())\n\n\n if restore_from == 'latest':\n ckpt = tf.train.latest_checkpoint(\n os.path.join(CHECKPOINT_DIR, run_name))\n if ckpt is None:\n # Get fresh GPT weights if new run.\n ckpt = tf.train.latest_checkpoint(\n os.path.join('models', model_name))\n elif restore_from == 'fresh':\n ckpt = tf.train.latest_checkpoint(\n os.path.join('models', model_name))\n else:\n ckpt = tf.train.latest_checkpoint(restore_from)\n print(str(hvd.local_rank()), 'Loading checkpoint', ckpt)\n saver.restore(sess, ckpt)\n\n bcast.run()\n\n print(str(hvd.local_rank()), 'Loading dataset...')\n chunks = load_dataset(enc, dataset, combine)\n data_sampler = Sampler(chunks)\n print(str(hvd.local_rank()), 'dataset has', data_sampler.total_size, 'tokens')\n print(str(hvd.local_rank()), 'Training...')\n\n counter = 1\n if os.path.exists(os.path.join(CHECKPOINT_DIR, run_name, 'counter')):\n # Load the step number if we're resuming a run\n # Add 1 so we don't immediately try to save again\n with open(os.path.join(CHECKPOINT_DIR, run_name, 'counter'),\n 'r') as fp:\n counter = int(fp.read()) + 1\n\n def save():\n maketree(os.path.join(CHECKPOINT_DIR, run_name))\n print(\n 'Saving',\n os.path.join(CHECKPOINT_DIR, run_name,\n 'model-{}').format(counter))\n saver.save(\n sess,\n os.path.join(CHECKPOINT_DIR, run_name, 'model'),\n global_step=counter)\n with open(os.path.join(CHECKPOINT_DIR, run_name, 'counter'),\n 'w') as fp:\n fp.write(str(counter) + '\\n')\n\n def generate_samples():\n context_tokens = data_sampler.sample(1)\n all_text = []\n index = 0\n while index < sample_num:\n out = sess.run(\n tf_sample, feed_dict={context: batch_size*[context_tokens]})\n for i in range(min(sample_num - index, batch_size)):\n text = enc.decode(out[i])\n text = '======== SAMPLE {} ========\\n{}\\n'.format(index + 1, text)\n all_text.append(text)\n index += 1\n print(text)\n maketree(os.path.join(SAMPLE_DIR, run_name))\n with open(\n os.path.join(SAMPLE_DIR, run_name,\n 'samples-{}').format(counter), 'w') as fp:\n fp.write('\\n'.join(all_text))\n\n avg_loss = (0.0, 0.0)\n start_time = time.time()\n\n try:\n while True:\n\n batch = [data_sampler.sample(1024) for _ in range(batch_size)]\n\n _, lv = sess.run((train_op, loss), feed_dict={context: batch})\n\n avg_loss = (avg_loss[0] * 0.99 + lv, avg_loss[1] * 0.99 + 1.0)\n\n if hvd.rank() == 0:\n if counter % save_every == 0:\n save()\n if counter % sample_every == 0:\n generate_samples()\n\n print(\n '[{counter} | {time:2.2f}] loss={loss:2.2f} avg={avg:2.2f}'\n .format(\n counter=counter,\n time=time.time() - start_time,\n loss=lv,\n avg=avg_loss[0] / avg_loss[1]))\n\n counter += 1\n\n except KeyboardInterrupt:\n print('interrupted')\n if hvd.rank() == 0:\n save()\n\n\nif __name__ == '__main__':\n fire.Fire(train_main)\n" ]
[ [ "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.train.latest_checkpoint", "numpy.random.seed", "tensorflow.compat.v1.trainable_variables", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.set_random_seed", "tensorflow.compat.v1.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
valueanalyticslabs/covid-19-germany-gae
[ "75542cdab7df1e2977ca5bbdef2ec8c54c0680a4" ]
[ "tools/plot-compare-sources.py" ]
[ "# MIT License\n\n# Copyright (c) 2020 Dr. Jan-Philip Gehrcke\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nThis program is part of https://github.com/jgehrcke/covid-19-germany-gae\n\"\"\"\n\nimport os\nimport logging\nimport sys\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as ticker\n\nimport bokeh.plotting\nimport bokeh.models\nfrom bokeh.layouts import column, layout\nimport bokeh.io\nimport bokeh.embed\nimport bokeh.resources\n\nimport jinja2\n\nlog = logging.getLogger()\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s.%(msecs)03d %(levelname)s: %(message)s\",\n datefmt=\"%y%m%d-%H:%M:%S\",\n)\n\n\nNOW = datetime.utcnow()\n\n\ndef main():\n\n # About \"Meldedatum\", vom RKI dashboard: Für die Darstellung der\n # neuübermittelten Fälle pro Tag wird das Meldedatum verwendet – das Datum,\n # an dem das lokale Gesundheitsamt Kenntnis über den Fall erlangt und ihn\n # elektronisch erfasst hat.\n\n # Zwischen der Meldung durch die Ärzte und Labore an das Gesundheitsamt und\n # der Übermittlung der Fälle an die zuständigen Landesbehörden und das RKI\n # können einige Tage vergehen (Melde- und Übermittlungsverzug). Jeden Tag\n # werden dem RKI neue Fälle übermittelt, die am gleichen Tag oder bereits\n # an früheren Tagen an das Gesundheitsamt gemeldet worden sind. Diese Fälle\n # werden in der Grafik Neue COVID-19-Fälle/Tag dann bei dem jeweiligen\n # Datum ergänzt.\n\n START_DATE = \"2020-03-09\"\n\n def _build_case_rate(df):\n # Get time differences (unit: seconds) in the df's datetimeindex. `dt`\n # is a magic accessor that yields an array of time deltas.\n dt_seconds = pd.Series(df.index).diff().dt.total_seconds()\n # Construct new series with original datetimeindex as index and time\n # differences (unit: days) as values.\n dt_days = pd.Series(dt_seconds) / 86400.0\n dt_days.index = df.index\n # print(dt_days)\n cases_change_per_day = df[\"sum_cases\"].diff().div(dt_days)\n df[\"cases_change_per_day\"] = cases_change_per_day\n print(df)\n\n # increase resolution and forward-fill values. Could also use\n # `interpolate()` but that's too artificial, I think it's fair to see\n # the actual discrete jumps in data as of \"batch processing\".\n df_case_change = df[\"cases_change_per_day\"].resample(\"1H\").pad()\n\n print(type(df_case_change))\n # sys.exit()\n\n # Should be >= 7 to be meaningful.\n window_width_days = 5\n window = df_case_change.rolling(window=\"%sD\" % window_width_days)\n\n # Manually build rolling window mean.\n wdw_norm = window.sum() / (window_width_days * 24.0)\n\n # During the rolling window analysis the value derived from the current\n # window position is assigned to the right window boundary (i.e. to the\n # newest timestamp in the window). For presentation it is more convenient\n # and intuitive to have it assigned to the temporal center of the time\n # window. Invoking `rolling(..., center=True)` however yields\n # `NotImplementedError: center is not implemented for datetimelike and\n # offset based windows`. As a workaround, shift the data by half the window\n # size to 'the left': shift the timestamp index by a constant / offset.\n offset = pd.DateOffset(days=window_width_days / 2.0)\n wdw_norm.index = wdw_norm.index - offset\n print(wdw_norm)\n # sys.exit()\n\n # cut the last 2 days worth of data, at least for RKI this is just too\n # much affected by Meldeverzug\n d_end = NOW - timedelta(days=3)\n # t_end = f\"{d_end.strftime('%Y-%m-%d')} 23:59:59\"\n return wdw_norm[:f\"{d_end.strftime('%Y-%m-%d')}\"]\n\n df_mixed_data = pd.read_csv(\n \"data.csv\",\n index_col=[\"time_iso8601\"],\n parse_dates=[\"time_iso8601\"],\n date_parser=lambda col: pd.to_datetime(col, utc=True),\n )[START_DATE:]\n df_mixed_data.index.name = \"time\"\n df_mixed_case_rate_rw = _build_case_rate(df_mixed_data)[START_DATE:]\n\n df_rl = pd.read_csv(\n \"cases-rl-crowdsource-by-state.csv\",\n index_col=[\"time_iso8601\"],\n parse_dates=[\"time_iso8601\"],\n )[START_DATE:]\n df_rl.index.name = \"time\"\n df_rl_case_rate_rw = _build_case_rate(df_rl)[START_DATE:]\n\n df_rki = pd.read_csv(\n \"cases-rki-by-state.csv\",\n index_col=[\"time_iso8601\"],\n parse_dates=[\"time_iso8601\"],\n )[START_DATE:]\n df_rki.index.name = \"time\"\n df_rki_case_rate_rw = _build_case_rate(df_rki)[START_DATE:]\n\n df_jhu = jhu_csse_csv_to_dataframe(os.environ[\"JHU_TS_CSV_PATH\"], \"germany\")[\n START_DATE:\n ]\n df_jhu.index.name = \"time\"\n df_jhu_case_rate_rw = _build_case_rate(df_jhu)[START_DATE:]\n\n # Normalize for 'sum_cases' plots\n for _df in [df_rki, df_jhu, df_mixed_data, df_rl]:\n _df[\"sum_cases\"] = _df[\"sum_cases\"] / 10000\n\n plt.figure()\n\n ax = df_rki[\"sum_cases\"].plot(linestyle=\"solid\", marker=\"x\", color=\"red\",)\n df_rl[\"sum_cases\"].plot(linestyle=\"solid\", marker=\"x\", color=\"black\", ax=ax)\n df_mixed_data[\"sum_cases\"].plot(\n linestyle=\"dashdot\", marker=\"x\", color=\"black\", ax=ax\n )\n df_jhu[\"sum_cases\"].plot(linestyle=\"dashdot\", marker=\"x\", color=\"gray\", ax=ax)\n\n ax.legend(\n [\n \"RKI data, by Meldedatum\",\n \"Risklayer/Tagesspiegel crowdsource data, daily snapshots\",\n \"ZEIT ONLINE, daily snapshots\",\n \"JHU (GitHub CSSEGISandData/COVID-19)\",\n ],\n numpoints=4,\n handlelength=8,\n )\n\n ax.xaxis.set_major_locator(mdates.DayLocator(interval=2))\n\n plt.xlabel(\"Time\")\n plt.ylabel(\"cumulative case count, all Germany / 10^4\")\n # plt.title(\"COVID-19 case count, Germany, comparison of data sources\")\n # set_title('Override command rate (from both DC/OS repositories)')\n # set_subtitle('Arithmetic mean over rolling time window')\n # plt.tight_layout(rect=(0, 0, 1, 0.95))\n\n plt.tight_layout()\n fig_filepath_wo_ext = (\n f\"gae/static/data-sources-comparison-{NOW.strftime('%Y-%m-%d')}\"\n )\n plt.savefig(fig_filepath_wo_ext + \".png\", dpi=150)\n plt.savefig(fig_filepath_wo_ext + \".pdf\")\n\n # -----------\n\n plt.figure(figsize=(16.0, 9.0))\n\n ax = df_rki[\"cases_change_per_day\"].plot(\n linestyle=\"None\", marker=\"x\", color=\"black\",\n )\n df_rki_case_rate_rw.plot(linestyle=\"solid\", marker=None, color=\"black\", ax=ax)\n df_jhu_case_rate_rw.plot(linestyle=\"dashdot\", marker=None, color=\"gray\", ax=ax)\n\n ax.legend(\n [\n 'raw RKI data, by date of report (\"Meldedatum\")',\n \"RKI rolling window mean (width: 5 days)\",\n \"JHU rolling window mean (width: 5 days)\",\n ],\n numpoints=4,\n handlelength=8,\n loc=\"upper left\",\n )\n plt.xlabel(\"\")\n plt.ylabel(\"COVID-19 cumulative case count, change per day (all Germany)\")\n plt.tight_layout()\n fig_filepath_wo_ext = f\"gae/static/case-rate-rw-{NOW.strftime('%Y-%m-%d')}\"\n plt.savefig(fig_filepath_wo_ext + \".png\", dpi=150)\n plt.savefig(fig_filepath_wo_ext + \".pdf\")\n # plt.show()\n plot_with_bokeh(df_rki, df_jhu, df_mixed_data, df_rl)\n\n\ndef _set_common_bokeh_fig_props(fig):\n fig.toolbar.active_drag = None\n fig.toolbar.active_scroll = None\n fig.toolbar.active_tap = None\n fig.outline_line_color = \"#333333\"\n fig.outline_line_width = 1\n fig.outline_line_alpha = 0.7\n\n fig.title.text_font_size = \"10px\"\n\n fig.legend.label_text_font_size = \"10px\"\n # fig.legend.label_text_font = \"'Open Sans Condensed', sans-serif\"\n fig.legend.spacing = 0\n fig.legend.margin = 3\n fig.legend.label_standoff = 5\n fig.legend.label_height = 0\n\n # import json\n # print(json.dumps(dir(fig.legend), indent=2))\n\n # fig.text_font_size = \"12pt\"\n fig.xaxis.ticker.desired_num_ticks = 21\n\n fig.xaxis.formatter = bokeh.models.DatetimeTickFormatter(days=[\"%b-%d\"])\n fig.xaxis.major_label_orientation = 3.1415 / 4 + 0.5\n\n # fig.xaxis.axis_label = \"Date\"\n fig.xaxis.axis_label_text_font_size = \"16px\"\n fig.xaxis.major_label_text_font_size = \"10px\"\n fig.xaxis.axis_label_text_font_style = \"normal\"\n\n fig.y_range.start = 0\n # fig.yaxis.axis_label = \"confirmed cases / 10000\"\n fig.yaxis.axis_label_text_font_size = \"10px\"\n fig.yaxis.axis_label_text_font_style = \"normal\"\n fig.yaxis.major_label_text_font_size = \"10px\"\n\n\ndef plot_with_bokeh(df_rki, df_jhu, df_mixed_data, df_rl):\n\n # html_file_path = 'bokeh-comp-plot.html'\n # bokeh.plotting.output_file(html_file_path)\n # bokeh.io.curdoc().theme = \"dark_minimal\"\n\n cname = \"sum_cases\"\n\n fig = bokeh.plotting.figure(\n # title=f\"Generated at {now.strftime('%Y-%m-%d %H:%M UTC')}\",\n title=\"Germany, cumulative cases / 10000\",\n x_axis_type=\"datetime\",\n toolbar_location=None,\n background_fill_color=\"#eeeeee\",\n height=450,\n )\n\n # Scatter and line seemingly need to be done separately.\n # RKI\n fig.line(\n \"time\",\n cname,\n line_color=\"red\",\n line_width=2,\n line_dash=\"solid\",\n legend_label=\"RKI data, by Meldedatum\",\n source=bokeh.models.ColumnDataSource(data=df_rki),\n )\n fig.scatter(\n \"time\",\n cname,\n marker=\"x\",\n line_color=\"red\",\n line_width=2,\n size=8,\n source=bokeh.models.ColumnDataSource(data=df_rki),\n )\n\n # JHU\n fig.line(\n \"time\",\n \"sum_cases\",\n line_color=\"black\",\n line_width=1,\n line_dash=\"solid\",\n legend_label=\"JHU (GitHub)\",\n source=bokeh.models.ColumnDataSource(data=df_jhu),\n )\n fig.scatter(\n \"time\",\n \"sum_cases\",\n marker=\"x\",\n line_color=\"black\",\n line_width=1,\n size=8,\n source=bokeh.models.ColumnDataSource(data=df_jhu),\n )\n\n # Risklayer\n fig.line(\n \"time\",\n \"sum_cases\",\n line_color=\"gray\",\n line_width=1,\n line_dash=\"dashdot\",\n legend_label=\"Risklayer / Tagesspiegel\",\n source=bokeh.models.ColumnDataSource(data=df_rl),\n )\n fig.scatter(\n \"time\",\n \"sum_cases\",\n marker=\"x\",\n line_color=\"gray\",\n line_width=1,\n size=8,\n source=bokeh.models.ColumnDataSource(data=df_rl),\n )\n\n fig.line(\n \"time\",\n \"sum_cases\",\n line_color=\"gray\",\n line_width=1,\n line_dash=\"solid\",\n legend_label=\"ZEIT ONLINE\",\n source=bokeh.models.ColumnDataSource(data=df_mixed_data),\n )\n fig.scatter(\n \"time\",\n \"sum_cases\",\n marker=\"x\",\n line_color=\"gray\",\n line_width=1,\n size=8,\n source=bokeh.models.ColumnDataSource(data=df_mixed_data),\n )\n\n # fig.line(\n # \"time\",\n # \"sum_cases\",\n # marker=\"x\",\n # size=8,\n # line_color=\"black\",\n # line_width=3,\n # ,\n # source=bokeh.models.ColumnDataSource(data=df_jhu),\n # )\n\n _set_common_bokeh_fig_props(fig)\n\n fig.legend.location = \"top_left\"\n\n templ_env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=\"./\"))\n template = templ_env.get_template(\"gae/static/index.html.template\")\n\n html = bokeh.embed.file_html(\n column(fig, fig, sizing_mode=\"stretch_both\"),\n template=template,\n resources=bokeh.resources.CDN,\n template_variables={\"today_string\": NOW.strftime(\"%Y-%m-%d\"),},\n )\n\n with open(\"gae/static/index.html\", \"wb\") as f:\n f.write(html.encode(\"utf-8\"))\n\n\ndef jhu_csse_csv_to_dataframe(data_file_path, location_name):\n \"\"\"\n data_file_path: expect an instance of `time_series_19-covid-Confirmed.csv`\n from https://github.com/CSSEGISandData/COVID-19/\n\n location_name: the lower-cased version of this must be a column in the\n processed data set.\n \"\"\"\n log.info(\"parse JHU data file\")\n df = pd.read_csv(data_file_path)\n\n log.info(\"process JHU data file\")\n # Merge location names into somewhat more managable identifiers.\n countries = [\n \"_\".join(c.lower().split()) if c != \"nan\" else \"\"\n for c in list(df[\"Country/Region\"].astype(\"str\"))\n ]\n provinces = [\n \"_\".join(p.lower().split()) if p != \"nan\" else \"\"\n for p in list(df[\"Province/State\"].astype(\"str\"))\n ]\n\n countries = [c.replace(\",\", \"\").replace(\".\", \"\") for c in countries]\n provinces = [p.replace(\",\", \"\").replace(\".\", \"\") for p in provinces]\n\n df[\"where\"] = [f\"{c}_{p}\" if p else c for c, p in zip(countries, provinces)]\n\n # Make each column represent a location, and each row represent a day\n # (date).\n\n df.drop([\"Lat\", \"Long\", \"Country/Region\", \"Province/State\"], axis=1, inplace=True)\n\n df = df.set_index(\"where\")\n df = df.transpose()\n\n # Parse date strings into pandas DateTime objects, set proper\n # DateTimeIndex.\n normalized_date_strings = [\n \"/\".join(t.zfill(2) for t in o.split(\"/\")) for o in list(df.index)\n ]\n df.index = normalized_date_strings\n df.index = pd.to_datetime(df.index, format=\"%m/%d/%y\")\n\n df.index.name = \"date\"\n # df.sort_index(inplace=True)\n\n # Only return series for specific location\n\n loc = location_name.lower()\n # rename column for consistency with other dfs\n df[\"sum_cases\"] = df[loc]\n return df[\"sum_cases\"].to_frame()\n\n\ndef matplotlib_config():\n plt.style.use(\"ggplot\")\n matplotlib.rcParams[\"figure.figsize\"] = [10.5, 7.0]\n matplotlib.rcParams[\"figure.dpi\"] = 100\n matplotlib.rcParams[\"savefig.dpi\"] = 150\n # mpl.rcParams['font.size'] = 12\n\n\nif __name__ == \"__main__\":\n matplotlib_config()\n main()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "pandas.read_csv", "pandas.to_datetime", "pandas.DateOffset", "pandas.Series", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.dates.DayLocator", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.style.use", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
whathisface/Vision-FPGA-SoM
[ "eab5a291983c95bcee844b187addde2d42ffd896" ]
[ "SoM/RTL/vision/sw/read_himax.py" ]
[ "from __future__ import division\nimport serial\nimport time\nimport threading\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n\nser = serial.Serial(port='COM17', baudrate=230400, timeout=1.0)\nser.set_buffer_size(rx_size = 25000, tx_size = 12800)\n\nprint(ser.name)\nser.flushInput()\nser.flushOutput()\n\n# \"Warm up\" the AEC\n#ser.write(b'x')\n#ser.write(b'x')\n#time.sleep(1)\n\n#plt.ion()\n\n# Throw away bad pixels\nwhile(True):\n ser.flushInput()\n ser.write(b'x')\n resp = ser.read(50000) # Max length to be read is a frame\n image = np.asarray(list(resp))\n\n cols = 162\n rows = int(np.floor(len(image)/cols))\n print(rows)\n image = image[0:rows*cols]\n image = image.reshape(rows, cols)\n\n plt.imshow(image, cmap='gray', vmin=0, vmax=255)\n plt.show()\n time.sleep(0.1)" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
debojyoti007/OpenCV
[ "6810e2242ce7c9ea5e492d4d951c45cc99782785" ]
[ "Chapter09/create_features.py" ]
[ "import os \nimport sys \nimport argparse \nimport _pickle as pickle \nimport json \n \nimport cv2 \nimport numpy as np \nfrom sklearn.cluster import KMeans \n \nclass DenseDetector(): \n def __init__(self, step_size=20, feature_scale=20, img_bound=20): \n # Create a dense feature detector \n self.initXyStep = step_size\n self.initFeatureScale = feature_scale\n self.initImgBound = img_bound\n \n def detect(self, img):\n keypoints = []\n rows, cols = img.shape[:2]\n for x in range(self.initImgBound, rows, self.initFeatureScale):\n for y in range(self.initImgBound, cols, self.initFeatureScale):\n keypoints.append(cv2.KeyPoint(float(x), float(y), self.initXyStep))\n return keypoints \n\nclass SIFTExtractor():\n def __init__(self):\n self.extractor = cv2.xfeatures2d.SIFT_create()\n\n def compute(self, image, kps): \n if image is None: \n print(\"Not a valid image\")\n raise TypeError \n \n gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) \n kps, des = self.extractor.detectAndCompute(gray_image, None) \n return kps, des \n\n# Vector quantization \nclass Quantizer(object): \n def __init__(self, num_clusters=32): \n self.num_dims = 128 \n self.extractor = SIFTExtractor() \n self.num_clusters = num_clusters \n self.num_retries = 10 \n \n def quantize(self, datapoints): \n # Create KMeans object \n kmeans = KMeans(self.num_clusters, \n n_init=max(self.num_retries, 1), \n max_iter=10, tol=1.0) \n \n # Run KMeans on the datapoints \n res = kmeans.fit(datapoints) \n \n # Extract the centroids of those clusters \n centroids = res.cluster_centers_\n \n return kmeans, centroids \n \n def normalize(self, input_data): \n sum_input = np.sum(input_data) \n if sum_input > 0: \n return input_data / sum_input \n else: \n return input_data \n \n # Extract feature vector from the image \n def get_feature_vector(self, img, kmeans, centroids): \n kps = DenseDetector().detect(img) \n kps, fvs = self.extractor.compute(img, kps) \n labels = kmeans.predict(fvs) \n fv = np.zeros(self.num_clusters) \n \n for i, item in enumerate(fvs): \n fv[labels[i]] += 1 \n \n fv_image = np.reshape(fv, ((1, fv.shape[0]))) \n return self.normalize(fv_image)\n\n\nclass FeatureExtractor(object): \n def extract_image_features(self, img): \n # Dense feature detector \n kps = DenseDetector().detect(img) \n \n # SIFT feature extractor \n kps, fvs = SIFTExtractor().compute(img, kps) \n \n return fvs \n \n # Extract the centroids from the feature points \n def get_centroids(self, input_map, num_samples_to_fit=10): \n kps_all = [] \n \n count = 0 \n cur_label = '' \n for item in input_map: \n if count >= num_samples_to_fit: \n if cur_label != item['label']: \n count = 0 \n else: \n continue \n \n count += 1 \n \n if count == num_samples_to_fit: \n print(\"Built centroids for\", item['label'])\n \n cur_label = item['label'] \n img = cv2.imread(item['image']) \n img = resize_to_size(img, 150) \n \n num_dims = 128 \n fvs = self.extract_image_features(img) \n kps_all.extend(fvs) \n \n kmeans, centroids = Quantizer().quantize(kps_all) \n return kmeans, centroids \n \n def get_feature_vector(self, img, kmeans, centroids): \n return Quantizer().get_feature_vector(img, kmeans, centroids) \n \n \ndef build_arg_parser(): \n parser = argparse.ArgumentParser(description='Creates features for given images')\n parser.add_argument(\"--samples\", dest=\"cls\", nargs=\"+\", action=\"append\", required=True,\\\n help=\"Folders containing the training images.\\nThe first element needs to be the class label.\") \n parser.add_argument(\"--codebook-file\", dest='codebook_file', required=True, \n help=\"Base file name to store the codebook\") \n parser.add_argument(\"--feature-map-file\", dest='feature_map_file', required=True,\\\n help=\"Base file name to store the feature map\") \n \n return parser \n \n# Loading the images from the input folder \ndef load_input_map(label, input_folder): \n combined_data = [] \n \n if not os.path.isdir(input_folder): \n raise IOError(\"The folder \" + input_folder + \" doesn't exist\") \n \n # Parse the input folder and assign the labels \n for root, dirs, files in os.walk(input_folder): \n for filename in (x for x in files if x.endswith('.jpg')): \n combined_data.append({'label': label, 'image': \n os.path.join(root, filename)}) \n \n return combined_data \n \ndef extract_feature_map(input_map, kmeans, centroids): \n feature_map = [] \n \n for item in input_map: \n temp_dict = {} \n temp_dict['label'] = item['label'] \n \n print(\"Extracting features for\", item['image'])\n img = cv2.imread(item['image']) \n img = resize_to_size(img, 150) \n \n temp_dict['feature_vector'] = FeatureExtractor().get_feature_vector(img, kmeans, centroids) \n \n if temp_dict['feature_vector'] is not None: \n feature_map.append(temp_dict) \n \n return feature_map \n \n# Resize the shorter dimension to 'new_size' \n# while maintaining the aspect ratio \ndef resize_to_size(input_image, new_size=150): \n h, w = input_image.shape[0], input_image.shape[1] \n ds_factor = new_size / float(h) \n \n if w < h: \n ds_factor = new_size / float(w) \n \n new_size = (int(w * ds_factor), int(h * ds_factor)) \n return cv2.resize(input_image, new_size) \n \nif __name__=='__main__': \n args = build_arg_parser().parse_args() \n \n input_map = [] \n for cls in args.cls:\n assert len(cls) >= 2, \"Format for classes is `<label> file`\" \n label = cls[0] \n input_map += load_input_map(label, cls[1]) \n \n # Building the codebook \n print(\"===== Building codebook =====\")\n kmeans, centroids = FeatureExtractor().get_centroids(input_map) \n if args.codebook_file: \n with open(args.codebook_file, 'wb') as f: \n print('kmeans', kmeans)\n print('centroids', centroids)\n pickle.dump((kmeans, centroids), f) \n \n # Input data and labels \n print(\"===== Building feature map =====\")\n feature_map = extract_feature_map(input_map, kmeans, \n centroids) \n if args.feature_map_file: \n with open(args.feature_map_file, 'wb') as f: \n pickle.dump(feature_map, f)" ]
[ [ "numpy.reshape", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
htem/cb2_project_analysis
[ "a677cbadc7e3bf0074975a94ed1d06b4801899c0", "a677cbadc7e3bf0074975a94ed1d06b4801899c0", "a677cbadc7e3bf0074975a94ed1d06b4801899c0", "a677cbadc7e3bf0074975a94ed1d06b4801899c0", "a677cbadc7e3bf0074975a94ed1d06b4801899c0" ]
[ "analysis/dimensionalty_sim/analysis2.py", "analysis/gen_db/mf_grc/gen_mf_locs_210518.py", "analysis/dimensionalty_sim/run_tests_210617.py", "analysis/dimensionalty_sim/sim_lite2.py", "analysis/dimensionalty_sim/neurons.py" ]
[ "# from bitarray import bitarray\n# import random\nimport math\nimport statistics\n# import copy\nimport numpy as np\n# import logging\nimport collections\nfrom numpy import linalg as LA\n\ndef get_covariance_matrix(data):\n arr = np.array(data)\n return np.cov(arr, bias=False)\n # return np.cov(arr, bias=True)\n # covMatrix = np.cov(data, bias=True)\n\ndef get_eigenvalues(arr):\n w, _ = LA.eig(arr)\n return np.real(w)\n\ndef get_dim_from_eigenvalues(evs):\n square_of_sum = 0\n sum_of_square = 0\n for ev in evs:\n square_of_sum += ev\n sum_of_square += ev*ev\n square_of_sum = square_of_sum*square_of_sum\n return square_of_sum/sum_of_square\n\ndef get_population_correlation_from_eigenvalues(evs):\n n = len(evs)\n sqrt_evs = [math.sqrt(abs(e)) for e in evs]\n max_ev = max(sqrt_evs)\n sum_of_root = 0\n for ev in sqrt_evs:\n # print(ev)\n sum_of_root += ev\n max_ev /= sum_of_root\n max_ev -= 1/n\n max_ev *= n/(n-1)\n return max_ev\n\ndef get_dim_from_acts(acts, ret_population_correlation=False):\n cov = get_covariance_matrix(acts)\n ev = get_eigenvalues(cov)\n dim = get_dim_from_eigenvalues(ev)\n if ret_population_correlation:\n pop_corr = get_population_correlation_from_eigenvalues(ev)\n return dim, pop_corr\n return dim\n\n# def get_average_hamming_distance(acts, ref_output):\n# dist_sum = 0\n# ref_output = [neuron_out[0] for neuron_out in ref_output]\n# num_samples = len(acts[0])\n# pattern_len = len(ref_output)\n# for i in range(num_samples):\n# pattern = [neuron_out[i] for neuron_out in acts]\n# for j in range(pattern_len):\n# if ref_output[j] != pattern[j]:\n# dist_sum += 1\n# return dist_sum/num_samples\n\ndef get_average_hamming_distance2(acts, ref_output):\n dist_sum = 0\n assert len(acts[0]) == len(ref_output)\n num_samples = len(acts)\n pattern_len = len(ref_output)\n for grc_pattern in acts:\n for i, j in zip(grc_pattern, ref_output):\n if i != j:\n dist_sum += 1\n return dist_sum/num_samples\n\ndef get_hamming_distance_hist(acts, ref_output):\n hist = collections.defaultdict(int)\n for grc_pattern in acts:\n for n, ij in enumerate(zip(grc_pattern, ref_output)):\n i, j = ij\n if i != j:\n hist[n] += 1\n\n hist = [(k, v) for k, v in hist.items()]\n hist.sort(key=lambda x: x[1], reverse=True)\n hist = [x[1] for x in hist]\n # print(hist[0:40])\n # print(hist)\n # print(hist[100:140])\n return hist\n\n\ndef get_normalized_mean_squared_distance(norm_hamming, f):\n # return norm_hamming / (2*f*(1-f))\n return norm_hamming/(2*f)/(1-f)\n # return norm_hamming/(2*f)\n\ndef get_binary_similarity(a, b):\n # print(a)\n # print(b)\n same = 0\n total = 0\n for j in range(len(a)):\n if a[j]:\n total += 1\n if b[j]:\n same += 1\n if total > 0:\n similarity = same/total\n # print(similarity)\n return similarity\n else:\n return 1\n\ndef variation_of_information(X, Y):\n # https://gist.github.com/jwcarr/626cbc80e0006b526688\n # print(X)\n # print(Y)\n n = float(sum([len(x) for x in X]))\n sigma = 0.0\n for x in X:\n p = len(x) / n\n for y in Y:\n q = len(y) / n\n r = len(set(x) & set(y)) / n\n if r > 0.0:\n sigma += r * (math.log(r / p, 2) + math.log(r / q, 2))\n return abs(sigma)\n\ndef get_binary_voi(a, b):\n assignments = []\n for v in [a, b]:\n assignment = [[], []]\n for i, e in enumerate(v):\n if e:\n assignment[0].append(i)\n else:\n assignment[1].append(i)\n assignments.append(assignment)\n return variation_of_information(assignments[0], assignments[1])\n\n# def get_average_metric(acts, ref_output, metric):\n# # ref_output = [neuron_out[0] for neuron_out in ref_output]\n# num_samples = len(acts[0])\n# total = 0\n# for i in range(num_samples):\n# pattern = [neuron_out[i] for neuron_out in acts]\n# if metric == 'voi':\n# total += get_binary_voi(ref_output, pattern)\n# elif metric == 'binary_similarity':\n# total += get_binary_similarity(ref_output, pattern)\n# return total/num_samples\n\n\ndef get_optimal_weights_change(act0, act1,\n valence_dir='01',\n irrelevant_bits='0',\n seed=0):\n weights = []\n assert len(act0) == len(act1)\n for a0, a1 in zip(act0, act1):\n if a0 < a1:\n weights.append(1 if valence_dir == '01' else 0)\n elif a0 > a1:\n weights.append(1 if valence_dir == '10' else 0)\n else:\n if irrelevant_bits == '0':\n weights.append(0)\n elif irrelevant_bits == '1':\n weights.append(1)\n elif irrelevant_bits == 'random':\n weights.append(random.randint(0, 1))\n elif irrelevant_bits == 'plus':\n # set weight where there is potential for even more difference in the valence_dir\n if valence_dir == '01':\n weights.append(1 if a0 == 0 else 0)\n elif valence_dir == '10':\n weights.append(1 if a0 == 1 else 0)\n else: assert 0\n else: assert 0\n\n assert len(act0) == len(weights)\n return weights\n\n\ndef get_directional_distance(a, b, valence_dir='01'):\n weights = get_optimal_weights_change(a, b, irrelevant_bits='0', valence_dir=valence_dir)\n return sum(weights)\n\n\ndef get_output_deviation(acts):\n sums = []\n for act in acts:\n sums.append(sum(act))\n mean = statistics.mean(sums)\n return mean, statistics.stdev(sums, mean)\n\ndef get_average_metric2(acts, ref_output, metric):\n # ref_output = [neuron_out[0] for neuron_out in ref_output]\n num_samples = len(acts)\n total = 0\n for pattern in acts:\n if metric == 'voi':\n total += get_binary_voi(ref_output, pattern)\n elif metric == 'binary_similarity':\n total += get_binary_similarity(ref_output, pattern)\n elif metric == 'dir_distance_01':\n total += get_directional_distance(ref_output, pattern, valence_dir='01')\n elif metric == 'dir_distance_10':\n total += get_directional_distance(ref_output, pattern, valence_dir='10')\n return total/num_samples\n\n", "import collections\nfrom collections import defaultdict\nimport sys\nimport json\nimport random\nfrom jsmin import jsmin\nfrom io import StringIO\nimport numpy as np\nimport copy\nimport os\n\nscript_n = os.path.basename(__file__).split('.')[0]\nscript_n = script_n.split('_', 1)[1]\n\ndef to_ng(loc):\n return (int(loc[0]/4), int(loc[1]/4), int(loc[2]/40))\n\n'''Load data'''\nimport compress_pickle\nfname = 'gen_210518_setup01_v2_syndb_threshold_20_coalesced.gz'\ngrc_mfs_locs = compress_pickle.load(fname)\n\nmfs_locs = defaultdict(list)\nfor grc in grc_mfs_locs:\n for mf in grc_mfs_locs[grc]:\n for syn in grc_mfs_locs[grc][mf]:\n mfs_locs[mf].append(syn['syn_loc0'])\n\n# print(mfs_locs[mf]); asdf\nasdff = (172644, 113468, 89)\nasdfff = (137580, 101824, 369)\n\n# white list for big boutons\nwhitelist = set([\n (172644, 113468, 89),\n (163520, 98364, 83),\n (113008, 109372, 1154),\n (70424, 116512, 71),\n (186536, 100020, 130),\n (86780, 110184, 81),\n (177992, 108528, 1164),\n (127368, 101716, 1143),\n (155036, 103252, 71),\n (97884, 104152, 1160),\n (109476, 104808, 76),\n (82936, 122484, 76),\n (113532, 104660, 1150),\n (78904, 115540, 1158),\n (190684, 91276, 1015),\n (160500, 99828, 1165),\n (109020, 115476, 74),\n (93516, 101476, 858),\n (126728, 104988, 86),\n (173456, 106376, 71),\n (197436, 95688, 898),\n (122752, 110608, 85),\n (122192, 119344, 70),\n (122396, 118840, 83),\n (204868, 103452, 145),\n (94212, 107860, 1137),\n (92360, 105844, 1162),\n (84704, 115452, 119),\n (54036, 105484, 394),\n (110624, 105800, 70),\n (170512, 99132, 107),\n (71200, 114308, 1123),\n (106588, 98692, 1160),\n (70164, 107908, 1015),\n (144772, 106812, 105),\n (asdff),\n (asdff),\n (asdff),\n])\n\n\nblacklist = set([\n (137580, 101824, 369),\n (127384, 115252, 746),\n (155268, 99276, 918),\n (182000, 91966, 716),\n (119828, 107400, 312),\n (171384, 94244, 573),\n (asdfff),\n (asdfff),\n (asdfff),\n (asdfff),\n (asdfff),\n (asdfff),\n])\n\n'''Cluster and extract locations of MF boutons'''\nfrom sklearn.cluster import DBSCAN\n\nmfs_bouton_locs = {}\n\n'''if a bouton location has less than this many synapses then it won't be considered in order to reduce false positives'''\n# bouton_synapse_threshold = 6 # safe for determining big bouton locations\nbouton_synapse_threshold = 2\nbouton_synapse_threshold = 3\nbouton_synapse_threshold = 4 # 4 is a bit iffy, since it has some semi big boutons\nbouton_synapse_threshold = 5\n# bouton_synapse_threshold = 6 # this threshold has quite a bit of FPs\n\nfor mf in mfs_locs:\n dbscan = DBSCAN(eps=8000, min_samples=2) # max dist set to 8um\n # dbscan = DBSCAN(eps=10000, min_samples=2) # max dist set to 8um\n dbscan.fit(mfs_locs[mf])\n loc_by_label = defaultdict(list)\n for loc, label in zip(mfs_locs[mf], dbscan.labels_):\n loc_by_label[label].append(loc)\n mf_bouton_locs = []\n for label in loc_by_label:\n if len(loc_by_label[label]) <= bouton_synapse_threshold:\n whitelisted = False\n for loc in loc_by_label[label]:\n if to_ng(loc) in whitelist:\n whitelisted = True\n if not whitelisted:\n if len(loc_by_label[label]) >= 2:\n print(f'Ignoring {mf} due to insufficient synapses')\n for loc in loc_by_label[label]:\n print(to_ng(loc))\n continue\n sum = [0, 0, 0]\n for loc in loc_by_label[label]:\n sum = [sum[0]+loc[0], sum[1]+loc[1], sum[2]+loc[2]]\n center = [\n int(sum[0]/len(loc_by_label[label])),\n int(sum[1]/len(loc_by_label[label])),\n int(sum[2]/len(loc_by_label[label])),\n ]\n mf_bouton_locs.append(center)\n mfs_bouton_locs[mf] = mf_bouton_locs\n # print(mf_bouton_locs)\n # for loc in mf_bouton_locs:\n # print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])\n\nmfs_bouton_count = defaultdict(list)\nfor mf in mfs_bouton_locs:\n mfs_bouton_count[len(mfs_bouton_locs[mf])].append(mf)\n\nfor count in sorted(mfs_bouton_count.keys()):\n print(f'{count}: {mfs_bouton_count[count]}')\n\n'''save mfs_bouton_locs'''\nimport compress_pickle\ncompress_pickle.dump((\n mfs_bouton_locs\n ), f\"{script_n}.gz\")\n\n\nasdf\n\nfor loc in mfs_bouton_locs['mf_431']:\n print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])\n\nfor loc in mfs_locs['mf_41']:\n print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])\n", "import random\nimport copy\nimport logging\nimport sys\nfrom collections import defaultdict\nimport itertools\nfrom sim_lite2 import SimulationLite\nimport compress_pickle\nimport functools\nimport random_patterns\nimport time\nimport numpy as np\nimport os\nfrom random_patterns import generate_patterns\n\nSILENT_MODE = False\nif \"SILENT_MODE\" in os.environ:\n SILENT_MODE = True\n\nNO_DIM_SIM = False\nnp.set_printoptions(linewidth=180, edgeitems=30)\n\nsys.path.insert(0, '/n/groups/htem/Segmentation/shared-nondev/cb2_segmentation/analysis_mf_grc')\n\n# from neurons2 import GranuleCell, MossyFiber, Simulation, generate_binary_patterns, add_noise_binary_patterns, add_noise_to_core_patterns, make_noisy_patterns_float\nfrom neurons2 import Simulation\nimport analysis2 as analysis\nfrom analysis import get_optimal_weights_change\n# logging.basicConfig(level=logging.DEBUG)\n\ndef make_sim(\n input_graph,\n sim=None,\n per_bouton=False,\n ):\n if sim is None:\n # removed = input_graph.remove_empty_mfs()\n # print(f'Removed {len(removed)} mfs')\n sim = Simulation(\n input_graph=input_graph, per_bouton=per_bouton,\n )\n return sim\n\ncalibrations = defaultdict(lambda: None)\ndefault_pattern_generator = random_patterns.generate_patterns\n\ndef calibrate_sim(\n sim,\n activation_level,\n pattern_generator=None,\n seed=1,\n ):\n global calibrations\n if calibrations[activation_level] is None:\n n_pattern = 512\n # n_pattern = 4096\n if pattern_generator is None:\n pattern_generator = default_pattern_generator\n patterns = pattern_generator(sim.num_mfs, count=n_pattern)\n sim.set_failure_rate(0, seed=seed)\n sim.evaluate(patterns, no_random=True,\n calibrate_activation_level=activation_level)\n calibrations[activation_level] = sim.get_activation_levels()\n\n random.seed(seed)\n np.random.seed(seed)\n\n try:\n sim.set_activation_levels(calibrations[activation_level])\n except:\n # calibrated values are missing some keys\n # rerunning from scratch\n calibrations[activation_level] = None\n return calibrate_sim(sim, activation_level, pattern_generator, seed)\n print(f'New calibrations: {sim.get_activation_levels()}')\n\ndef analyze_dims(\n grc_acts,\n mf_acts,\n sim,\n activation_level,\n ref_output=None,\n print_output=True,\n ):\n\n mf_dim = 1\n mf_pop_corr = 1\n mf_act_lv = 1\n if not NO_DIM_SIM and mf_acts is not None:\n mf_dim, mf_pop_corr = analysis.get_dim_from_acts(mf_acts, ret_population_correlation=True)\n mf_act_lv = sum(mf_acts[0])/len(mf_acts[0])\n\n voi = analysis.get_average_metric2(grc_acts, ref_output, metric='voi')\n binary_similarity = analysis.get_average_metric2(grc_acts, ref_output, metric='binary_similarity')\n hamming_distance = analysis.get_average_hamming_distance2(grc_acts, ref_output)\n normalized_mse = analysis.get_normalized_mean_squared_distance(hamming_distance/len(grc_acts), f=activation_level)\n # dir_distance_01 = analysis.get_average_metric2(grc_acts, ref_output, metric='dir_distance_01')\n # dir_distance_10 = analysis.get_average_metric2(grc_acts, ref_output, metric='dir_distance_10')\n # print(mf_acts); asdf\n # print(ref_output)\n # print(grc_acts[0:20]); asdf\n # print(grc_acts[0:20])\n\n grc_dim = 1\n grc_pop_corr = 1\n if not NO_DIM_SIM:\n grc_dim, grc_pop_corr = analysis.get_dim_from_acts(grc_acts, ret_population_correlation=True)\n\n grc_mean, grc_stdev = analysis.get_output_deviation(grc_acts)\n grc_stdev_pct = grc_stdev / grc_mean\n\n # grc_act_lv = sum(grc_acts[0])/len(grc_acts[0])\n grc_act_lv = grc_mean/len(grc_acts[0])\n pct_grc = int(grc_dim*1000/sim.num_grcs)/10\n pct_mfs = int(grc_dim*1000/sim.num_mfs)/10\n pct_mf_dim = int(grc_dim*1000/mf_dim)/10\n if print_output:\n # print(f'voi: {voi}')\n # print(f'binary_similarity: {binary_similarity}')\n print(f'hamming_distance: {hamming_distance}')\n # print(f'normalized_mse: {normalized_mse}')\n print(f'grc_act_lv: {grc_act_lv}')\n if not NO_DIM_SIM:\n print(f'mf_act_lv: {mf_act_lv}')\n print(f'grc_pop_corr: {grc_pop_corr} ({1/grc_pop_corr})')\n print(f'mf_pop_corr: {mf_pop_corr} ({1/mf_pop_corr})')\n print(f'Dim MFs: {mf_dim}')\n print(f'Dim GRCs: {grc_dim}')\n print(f'grc_mean: {grc_mean}')\n print(f'grc_stdev: {grc_stdev}')\n print(f'grc_stdev_pct: {grc_stdev_pct}')\n # print(f'dir_distance_01: {dir_distance_01}')\n # print(f'dir_distance_10: {dir_distance_10}')\n # print(f' = {pct_mfs}% of MFs')\n # print(f' = {pct_mf_dim}% of MF dim')\n # print(f' = {pct_grc}% of GrCs')\n print()\n res = {}\n res['voi'] = voi\n res['binary_similarity'] = binary_similarity\n res['hamming_distance'] = hamming_distance\n res['normalized_mse'] = normalized_mse\n res['grc_pop_corr'] = grc_pop_corr\n res['mf_dim'] = mf_dim\n res['mf_pop_corr'] = mf_pop_corr\n res['grc_dim'] = grc_dim\n res['pct_grc'] = pct_grc\n res['pct_mfs'] = pct_mfs\n res['pct_mf_dim'] = pct_mf_dim\n res['num_grcs'] = sim.num_grcs\n res['num_mfs'] = sim.num_mfs \n res['grc_mean'] = grc_mean\n res['grc_stdev'] = grc_stdev\n res['grc_stdev_pct'] = grc_stdev_pct\n # res['dir_distance_01'] = dir_distance_01\n return res\n\ndef add_synapse_fail_rate(\n fail_rate,\n act,\n acts=None,\n ):\n if fail_rate is None or fail_rate == 1.0:\n return act, acts\n assert fail_rate >= 0 and fail_rate <= 1\n pattern_len = len(act)\n fail_mask = np.ones(pattern_len, dtype=np.bool_)\n fail_mask[0:int((1-fail_rate)*pattern_len+.5)] = 0\n np.random.shuffle(fail_mask)\n act &= fail_mask\n for i, o in enumerate(acts):\n np.random.shuffle(fail_mask)\n acts[i] = o & fail_mask\n return act, acts\n\ndef test_across_grc_pcts(\n sim,\n activation_level,\n grc_pcts,\n # noise_probs,\n # failure_rates,\n pattern_generator=None,\n noise_generator=None,\n seed=0,\n test_len=512,\n print_output=True,\n synapse_fail_rate=None,\n grc_scale=1,\n ):\n noise = 1.0\n if pattern_generator is None:\n pattern_generator = default_pattern_generator\n patterns = pattern_generator(sim.num_mfs, count=1, seed=seed)\n test_patterns = [patterns[0]]\n calibrate_sim(sim, activation_level,\n pattern_generator=pattern_generator,\n )\n redundant_patterns = noise_generator(test_patterns, prob=noise, n=test_len, seed=seed)\n ref_pattern = patterns[0]\n sim.evaluate([ref_pattern], no_random=True)\n ref_output = sim.get_grc_activities()[0]\n ref_output = copy.deepcopy(ref_output)\n sim.evaluate(redundant_patterns, no_random=True)\n # mf_acts = sim.get_mfs_activities()\n grc_acts = sim.get_grc_activities()\n all_res = {}\n for grc_pct in grc_pcts:\n print(f'grc_pct={grc_pct}')\n pattern_len = len(ref_output)\n grc_mask = np.zeros(pattern_len, dtype=np.bool_)\n grc_mask[0:int(grc_pct*pattern_len*grc_scale+.5)] = 1\n np.random.shuffle(grc_mask)\n grc_acts_sub = grc_acts[:, grc_mask]\n ref_output_sub = ref_output[grc_mask]\n add_synapse_fail_rate(synapse_fail_rate, act=ref_output_sub, acts=grc_acts_sub)\n all_res[grc_pct] = analyze_dims(\n ref_output=ref_output_sub,\n grc_acts=grc_acts_sub,\n mf_acts=None,\n sim=sim,\n activation_level=activation_level,\n print_output=print_output,\n )\n return all_res\n\ndef test_across_synapse_fail_rates(\n sim,\n activation_level,\n synapse_fail_rates,\n # noise_probs,\n # failure_rates,\n pattern_generator=None,\n noise_generator=None,\n seed=0,\n test_len=512,\n print_output=True,\n # synapse_fail_rate=None,\n grc_scale=1,\n ):\n noise = 1.0\n if pattern_generator is None:\n pattern_generator = default_pattern_generator\n patterns = pattern_generator(sim.num_mfs, count=1, seed=seed)\n test_patterns = [patterns[0]]\n calibrate_sim(sim, activation_level,\n pattern_generator=pattern_generator,\n )\n redundant_patterns = noise_generator(test_patterns, prob=noise, n=test_len, seed=seed)\n ref_pattern = patterns[0]\n sim.evaluate([ref_pattern], no_random=True)\n ref_output = sim.get_grc_activities()[0]\n ref_output = copy.deepcopy(ref_output)\n sim.evaluate(redundant_patterns, no_random=True)\n # mf_acts = sim.get_mfs_activities()\n grc_acts = sim.get_grc_activities()\n all_res = {}\n for synapse_fail_rate in synapse_fail_rates:\n print(f'synapse_fail_rate={synapse_fail_rate}')\n pattern_len = len(ref_output)\n grc_mask = np.zeros(pattern_len, dtype=np.bool_)\n grc_mask[0:int(pattern_len*grc_scale+.5)] = 1\n np.random.shuffle(grc_mask)\n grc_acts_sub = grc_acts[:, grc_mask]\n ref_output_sub = ref_output[grc_mask]\n if synapse_fail_rate is not None:\n if synapse_fail_rate != 1.0:\n assert synapse_fail_rate >= 0 and synapse_fail_rate <= 1\n pattern_len = len(ref_output_sub)\n fail_mask = np.ones(pattern_len, dtype=np.bool_)\n fail_mask[0:int(synapse_fail_rate*pattern_len+.5)] = 0\n np.random.shuffle(fail_mask)\n ref_output_sub &= fail_mask\n for o in grc_acts_sub:\n np.random.shuffle(fail_mask)\n o &= fail_mask\n all_res[synapse_fail_rate] = analyze_dims(\n ref_output=ref_output_sub,\n grc_acts=grc_acts_sub,\n mf_acts=None,\n sim=sim,\n activation_level=activation_level,\n print_output=print_output,\n )\n return all_res\n\n\n\n# def get_optimal_weights_same(act0, act1,\n# valence_dir='0',\n# seed=0):\n# weights = []\n# assert len(act0) == len(act1)\n# weight_len = len(act0)\n# num_zeroes = int(weight_len/2)\n# num_ones = num_zeroes\n# count_zeroes = 0\n# count_ones = 0\n# for a0, a1 in zip(act0, act1):\n# if a0 == a1:\n# if valence_dir == '0':\n# if a0 == 1:\n# weights.append(0)\n# count_zeroes += 1\n# else:\n# weights.append(1)\n# count_ones += 1\n# elif valence_dir == '1':\n# if a0 == 1:\n# weights.append(1)\n# count_ones += 1\n# else:\n# weights.append(0)\n# count_zeroes += 1\n# else:\n# assert False\n# else:\n# weights.append(2)\n# assert len(act0) == len(weights)\n# print(f'sum(act0): {sum(act0)}')\n# print(f'sum(act1): {sum(act1)}')\n# print(f'sum(act0&act1): {sum(act0&act1)}')\n# print(f'count_ones: {count_ones}')\n# print(f'count_zeroes: {count_zeroes}')\n# print(f'len(weights): {len(weights)}')\n# asdf\n# num_ones -= count_ones\n# num_zeroes -= count_zeroes\n# prob_ones = num_ones / (num_ones+num_zeroes)\n# assert prob_ones <= 1.0\n# for i, w in enumerate(weights):\n# if w == 2:\n# weights[i] = random.random() < prob_ones\n# return weights\n\ndef get_optimal_weights_same(act0, act1,\n valence_dir='0',\n seed=0):\n weights = []\n assert len(act0) == len(act1)\n weight_len = len(act0)\n\n pass0_count = 0\n pass1_idx = []\n for i, (a0, a1) in enumerate(zip(act0, act1)):\n if a0 == a1:\n if a0 == 1:\n weights.append(0 if valence_dir == '0' else 1)\n pass0_count += 1\n else:\n weights.append(2)\n pass1_idx.append(i)\n else:\n weights.append(3)\n assert len(act0) == len(weights)\n assert pass0_count <= int(weight_len/2) # if act_level >= .5, need another algorithm\n\n random.shuffle(pass1_idx)\n for i in range(pass0_count):\n idx = pass1_idx[i]\n assert weights[idx] == 2\n weights[idx] = 1 if valence_dir == '0' else 0\n\n # number of ones and zeros should be balanced now\n # randomly assign the rest of weights to either 0 or 1\n for i, w in enumerate(weights):\n if w == 2 or w == 3:\n weights[i] = 1 if random.random() < .5 else 0\n\n assert 2 not in weights\n assert 3 not in weights\n\n # print(f'pass0_count: {pass0_count}')\n # print(f'len(pass1_idx): {len(pass1_idx)}')\n # print(f'sum(act0): {sum(act0)}')\n # print(f'sum(act1): {sum(act1)}')\n # print(f'sum(act0&act1): {sum(act0&act1)}')\n # print(f'len(weights): {len(weights)}')\n # print(f'sum(weights): {sum(weights)}')\n # asdf\n return weights\n\n\ndef get_output_delta(act0, act1, weights):\n out0 = 0\n out1 = 0\n for a0, a1, w in zip(act0, act1, weights):\n if w:\n out0 += a0\n out1 += a1\n return out1 - out0\n\ndef get_output_with_weights(act0, weights):\n out0 = 0\n for a0, w in zip(act0, weights):\n if w:\n out0 += a0\n return out0\n\ndef get_sum_hist(grc_acts, weights, ref_sum0):\n hist_sum = defaultdict(int)\n hist_delta = defaultdict(int)\n for grc_act in grc_acts:\n out_sum = get_output_with_weights(grc_act, weights)\n hist_sum[out_sum] += 1\n d = out_sum - ref_sum0\n hist_delta[d] += 1\n return hist_sum, hist_delta\n\n\ndef test_consistency_across_variations(\n sim,\n activation_level,\n variation_sizes,\n make_weights_fn,\n noise_scaling=None,\n noise_level=None,\n # failure_rates,\n pattern_generator=None,\n variation_generator=None,\n noise_generator=None,\n seed=0,\n test_len=512,\n print_output=True,\n ):\n if pattern_generator is None:\n pattern_generator = default_pattern_generator\n\n assert noise_scaling is not None or noise_level is not None\n assert noise_scaling is None or noise_level is None\n\n patterns = pattern_generator(sim.num_mfs, count=1, seed=seed)\n test_pattern = patterns[0]\n calibrate_sim(sim, activation_level,\n pattern_generator=pattern_generator,\n )\n all_res = {}\n for variation_size in variation_sizes:\n print(f'variation_size={variation_size}')\n\n ref_pattern = patterns[0]\n\n redundant_patterns = variation_generator([test_pattern], prob=variation_size, n=2, seed=seed)\n redundant_pattern = redundant_patterns[1]\n\n noise_mask = (redundant_pattern[0]^test_pattern[0])\n noise_mask = 1-noise_mask\n # print(f'test_pattern : {test_pattern[0:60]}')\n # print(f'redundant_pattern: {redundant_pattern[0:60]}')\n # print(f'noise_mask : {np.array(noise_mask)[0:60]}')\n noise_prob = noise_level\n if noise_scaling:\n noise_prob = variation_size*noise_scaling\n\n noisy_patterns = noise_generator(\n [redundant_pattern], prob=noise_prob, n=test_len, seed=seed,\n noise_mask=noise_mask,\n )\n noisy_ref_patterns = noise_generator(\n [ref_pattern], prob=noise_prob, n=int(test_len/4), seed=seed,\n noise_mask=noise_mask,\n )\n\n random_patterns = noise_generator(\n [ref_pattern], prob=1, n=int(test_len/8), seed=seed,\n # noise_mask=noise_mask,\n )\n\n random_masked_patterns = noise_generator(\n [ref_pattern], prob=1, n=int(test_len/8), seed=seed,\n noise_mask=noise_mask,\n )\n\n sim.evaluate([ref_pattern], no_random=True)\n ref_output = sim.get_grc_activities()[0]\n ref_output = copy.deepcopy(ref_output)\n sim.evaluate([redundant_pattern], no_random=True)\n grc_acts = sim.get_grc_activities()\n ref_output1 = grc_acts[0]\n weights = make_weights_fn(ref_output, ref_output1, seed=seed)\n ref_sum0 = get_output_with_weights(ref_output, weights)\n ref_sum1 = get_output_with_weights(ref_output1, weights)\n ref_delta = ref_sum1 - ref_sum0\n # print(f'ref_output : {ref_output[0:60]}')\n # print(f'ref_output1: {ref_output1[0:60]}')\n # print(f'weights : {np.array(weights)[0:60]}')\n\n sim.evaluate(noisy_patterns, no_random=True)\n sum_hist, delta_hist = get_sum_hist(sim.get_grc_activities(), weights, ref_sum0)\n\n sim.evaluate(noisy_ref_patterns, no_random=True)\n noisy_ref_sum_hist, noisy_ref_delta_hist = get_sum_hist(sim.get_grc_activities(), weights, ref_sum0)\n\n sim.evaluate(random_patterns, no_random=True)\n random_sum_hist, _ = get_sum_hist(sim.get_grc_activities(), weights, ref_sum0)\n\n sim.evaluate(random_masked_patterns, no_random=True)\n random_masked_sum_hist, _ = get_sum_hist(sim.get_grc_activities(), weights, ref_sum0)\n\n\n if not SILENT_MODE:\n print(f'ref_sum0: {ref_sum0}')\n print(f'ref_sum1: {ref_sum1}')\n print(f'ref_delta: {ref_delta}')\n # for k in sorted(hist.keys()):\n # print(f'{k}: {hist[k]}')\n print('noisy_ref_delta_hist')\n for k in sorted(noisy_ref_delta_hist.keys()):\n print(f'{k}: {noisy_ref_delta_hist[k]}')\n print('delta_hist')\n for k in sorted(delta_hist.keys()):\n print(f'{k}: {delta_hist[k]}')\n print('random_sum_hist')\n for k in sorted(random_sum_hist.keys()):\n print(f'{k}: {random_sum_hist[k]}')\n print('random_masked_sum_hist')\n for k in sorted(random_masked_sum_hist.keys()):\n print(f'{k}: {random_masked_sum_hist[k]}')\n # print(hist_sum)\n res = {}\n res['ref_sum0'] = ref_sum0\n res['ref_sum1'] = ref_sum1\n res['ref_delta'] = ref_delta\n # res['hist_raw'] = hist_raw\n res['sum_hist'] = dict(sum_hist)\n res['noisy_ref_sum_hist'] = dict(noisy_ref_sum_hist)\n res['random_sum_hist'] = dict(random_sum_hist)\n res['random_masked_sum_hist'] = dict(random_masked_sum_hist)\n # res['hist'] = hist\n all_res[variation_size] = res\n return all_res\n\n\ndef test_across_noise(\n sim,\n activation_level,\n noise_probs,\n # failure_rates,\n pattern_generator=None,\n noise_generator=None,\n seed=0,\n test_len=512,\n print_output=True,\n grc_pct=None,\n synapse_fail_rate=None,\n grc_pct_learned=False,\n ):\n if pattern_generator is None:\n pattern_generator = default_pattern_generator\n patterns = pattern_generator(sim.num_mfs, count=1, seed=seed)\n test_patterns = [patterns[0]]\n calibrate_sim(sim, activation_level,\n pattern_generator=pattern_generator,\n )\n all_res = {}\n for noise in noise_probs:\n print(f'noise={noise}')\n redundant_patterns = noise_generator(test_patterns, prob=noise, n=test_len, seed=seed)\n ref_pattern = patterns[0]\n sim.evaluate([ref_pattern], no_random=True)\n ref_output = sim.get_grc_activities()[0]\n ref_output = copy.deepcopy(ref_output)\n sim.evaluate(redundant_patterns, no_random=True)\n mf_acts = sim.get_mfs_activities()\n grc_acts = sim.get_grc_activities()\n\n if grc_pct is not None:\n pattern_len = len(ref_output)\n grc_mask = np.zeros(pattern_len, dtype=np.bool_)\n keep_n = int(grc_pct*pattern_len+.5)\n if grc_pct_learned:\n grc_mask = get_learned_mask2(ref_output, grc_acts, grc_mask, grc_pct)\n else:\n # drop random grcs \n grc_mask[0:keep_n] = 1\n np.random.shuffle(grc_mask)\n grc_acts = grc_acts[:, grc_mask]\n ref_output = ref_output[grc_mask]\n\n # print(ref_output)\n # print(grc_acts)\n ref_output, grc_acts = add_synapse_fail_rate(synapse_fail_rate, act=ref_output, acts=grc_acts)\n # print(ref_output); asdf\n # print(grc_acts); asdf\n\n all_res[noise] = analyze_dims(\n ref_output=ref_output,\n grc_acts=grc_acts,\n mf_acts=mf_acts,\n sim=sim,\n activation_level=activation_level,\n print_output=print_output,\n )\n return all_res\n\ndef test_across_failure(\n input_graph,\n activation_level,\n # noise_probs,\n failure_rates,\n seed=0,\n test_len=512,\n # core_noise=False,\n print_output=True,\n # scaled_noise=False,\n signal_mask=None,\n sim=None,\n ):\n random.seed(seed)\n if sim is None:\n removed = input_graph.remove_empty_mfs()\n print(f'Removed {len(removed)} mfs')\n sim = Simulation(\n input_graph=input_graph,\n )\n n_pattern = 1024*4 # 309\n # n_pattern = 512 # 309\n patterns = sim.generate_patterns(count=n_pattern)\n # print(patterns); asdf\n sim.set_failure_rate(0, seed=seed)\n sim.evaluate(patterns, no_random=True,\n calibrate_activation_level=activation_level)\n test_patterns = [patterns[0]]\n all_res = {}\n for failure_rate in failure_rates:\n print(f'failure_rate={failure_rate}')\n redundant_patterns = make_noisy_patterns_float(test_patterns, prob=1.0, n=test_len, seed=seed, signal_mask=signal_mask)\n # redundant_patterns = add_noise_to_core_patterns(test_patterns, prob=noise, n=2)\n # for p in redundant_patterns[0:2]:\n # print(p[0][0:20])\n # asdf\n # print(redundant_patterns)\n ref_pattern = patterns[0]\n sim.set_failure_rate(failure_rate, seed=seed)\n sim.evaluate([ref_pattern], no_random=True)\n ref_output = sim.get_grc_activities()[0]\n ref_output = copy.deepcopy(ref_output)\n\n sim.evaluate(redundant_patterns, no_random=True)\n mf_acts = sim.get_mfs_activities()\n # for act in mf_acts[0:2]:\n # print(act[0:18])\n mf_dim, mf_pop_corr = analysis.get_dim_from_acts(mf_acts, ret_population_correlation=True)\n grc_acts = sim.get_grc_activities()\n # for act in grc_acts[0:2]:\n # print(act[0:37])\n # print(f'ref_output:\\n{ref_output[0:37]}')\n # print(f'ref_pattern:\\n{ref_pattern[0][0:37]}')\n # print(f'redundant_patterns:\\n{redundant_patterns[0][0][0:37]}')\n # print(f'test_patterns:\\n{test_patterns[0][0][0:37]}')\n # asdf\n voi = analysis.get_average_metric2(grc_acts, ref_output, metric='voi')\n binary_similarity = analysis.get_average_metric2(grc_acts, ref_output, metric='binary_similarity')\n hamming_distance = analysis.get_average_hamming_distance2(grc_acts, ref_output)\n normalized_mse = analysis.get_normalized_mean_squared_distance(hamming_distance/len(grc_acts), f=activation_level)\n grc_dim, grc_pop_corr = analysis.get_dim_from_acts(grc_acts, ret_population_correlation=True)\n pct_grc = int(grc_dim*1000/sim.num_grcs)/10\n pct_mfs = int(grc_dim*1000/sim.num_mfs)/10\n pct_mf_dim = int(grc_dim*1000/mf_dim)/10\n if print_output:\n # print(f'voi: {voi}')\n # print(f'binary_similarity: {binary_similarity}')\n # print(f'hamming_distance: {hamming_distance}')\n # print(f'normalized_mse: {normalized_mse}')\n print(f'grc_pop_corr: {grc_pop_corr}')\n print(f'mf_pop_corr: {mf_pop_corr}')\n print(f'Dim MFs: {mf_dim}')\n print(f'Dim GRCs: {grc_dim}')\n print(f' = {pct_mfs}% of MFs')\n print(f' = {pct_mf_dim}% of MF dim')\n print(f' = {pct_grc}% of GrCs')\n res = {}\n res['voi'] = voi\n res['binary_similarity'] = binary_similarity\n res['hamming_distance'] = hamming_distance\n res['normalized_mse'] = normalized_mse\n res['grc_pop_corr'] = grc_pop_corr\n res['mf_dim'] = mf_dim\n res['mf_pop_corr'] = mf_pop_corr\n res['grc_dim'] = grc_dim\n res['pct_grc'] = pct_grc\n res['pct_mfs'] = pct_mfs\n res['pct_mf_dim'] = pct_mf_dim\n res['num_grcs'] = sim.num_grcs\n res['num_mfs'] = sim.num_mfs\n all_res[failure_rate] = res\n return all_res\n\n\ndef get_learned_mask(ref_output, grc_acts, grc_mask, grc_pct):\n pattern_len = len(ref_output)\n keep_n = int(grc_pct*pattern_len+.5)\n per_grc_sum = np.sum(grc_acts, axis=0)\n per_grc_f = per_grc_sum / len(grc_acts)\n grc_fs = [(i, f) for i, f in enumerate(per_grc_f)]\n # remove non-active grcs\n grc_fs = [(i, f) for i, f in enumerate(per_grc_f) if (f > 0.01 and f < 0.99)]\n # get the center keep_n grcs\n grc_fs.sort(key=lambda x: x[1])\n # print(grc_fs)\n # for k, v in grc_fs:\n # print(v)\n # asdf\n remove_n = max(0, len(grc_fs) - keep_n)\n grc_fs = grc_fs[int(remove_n/2):int(-remove_n/2)-1]\n # if keep_n >= len(grc_fs):\n # keep_start = 0\n # keep_end = len(grc_fs)\n # else:\n # keep_start = int(keep_n/2+.5)\n # keep_end = int(keep_n)\n for i, _ in grc_fs:\n grc_mask[i] = 1\n # for k, v in grc_fs:\n # print(v)\n # print(grc_mask); asdf\n # print(sum(grc_mask))\n return grc_mask\n\n\ndef get_learned_mask2(ref_output, grc_acts, grc_mask, grc_pct):\n\n pattern_len = len(ref_output)\n keep_n = int(grc_pct*pattern_len+.5)\n change_count = defaultdict(int)\n for act in grc_acts:\n for i, (a, b), in enumerate(zip(act, ref_output)):\n if a != b:\n change_count[i] += 1\n change_count = [(k, v) for k, v in change_count.items()]\n change_count.sort(key=lambda x: x[1], reverse=True)\n # for k, v in change_count:\n # print(v)\n for k, v in change_count[0:keep_n]:\n grc_mask[k] = 1\n return grc_mask\n", "import random\nimport math\nimport copy\nimport numpy as np\nimport logging\nimport collections\n\nimport pyximport\n# pyximport.install()\npyximport.install(setup_args={\n # \"script_args\":[\"--compiler=mingw32\"],\n \"include_dirs\":np.get_include()},\n # reload_support=True\n )\nimport cython_encode2 as cython_encode\n\n\ndef cython_normalize(input_pattern, normalize_f, binary_sum):\n assert input_pattern.data.c_contiguous\n assert input_pattern.dtype == np.uint8\n normalize = int(normalize_f*len(input_pattern))\n cython_encode.cython_normalize(\n np.ravel(input_pattern, order='A'),\n normalize,\n len(input_pattern),\n binary_sum,\n )\n return input_pattern\n\n\nclass SimulationLite():\n\n def __init__(self, sim):\n self.dendrite_counts = []\n self.dendrite_mf_map = []\n self.thresholds = []\n for grc in sim.grcs:\n self.dendrite_counts.append(len(grc.claws))\n self.thresholds.append(grc.act_lv_scale)\n for claw in grc.claws:\n assert claw <= 65535\n self.dendrite_mf_map.append(claw)\n\n self.dendrite_mf_map = np.array(self.dendrite_mf_map, dtype=np.uint16)\n self.dendrite_counts = np.array(self.dendrite_counts, dtype=np.uint8)\n self.thresholds = np.array(self.thresholds, dtype=np.float32)\n\n def encode(self, input_pattern, out_array=None, use_cython=True,\n normalize_f=None):\n n_grcs = len(self.dendrite_counts)\n if out_array is None:\n out_array = np.empty(n_grcs, dtype=np.uint8)\n\n if use_cython:\n assert input_pattern.data.c_contiguous\n assert out_array.data.c_contiguous\n assert self.dendrite_mf_map.data.c_contiguous\n assert self.dendrite_counts.data.c_contiguous\n assert self.thresholds.data.c_contiguous\n assert input_pattern.dtype == np.float32\n assert out_array.dtype == np.uint8\n assert self.dendrite_mf_map.dtype == np.uint16\n assert self.dendrite_counts.dtype == np.uint8\n assert self.thresholds.dtype == np.float32\n normalize = 0\n if normalize_f is not None:\n normalize = int(normalize_f*n_grcs)\n cython_encode.cython_encode(\n np.ravel(input_pattern, order='A'),\n np.ravel(self.dendrite_counts, order='A'),\n np.ravel(self.dendrite_mf_map, order='A'),\n np.ravel(self.thresholds, order='A'),\n n_grcs,\n np.ravel(out_array, order='A'),\n normalize\n )\n return out_array\n\n assert normalize_f is None\n\n dendrite_pos = 0\n for i, dendrite_count in enumerate(self.dendrite_counts):\n s = 0.0\n for j in range(dendrite_count):\n s += input_pattern[self.dendrite_mf_map[dendrite_pos]]\n dendrite_pos += 1\n if s >= self.thresholds[i]:\n out_array[i] = 1\n else:\n out_array[i] = 0\n return out_array\n\n", "import random\nimport math\nimport copy\nimport numpy as np\nimport logging\nimport collections\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass GranuleCell():\n def __init__(\n self,\n # num_mfs,\n # num_dendrite,\n claws,\n claw_weights=None,\n # act_threshold,\n # grc_act_on_failure_rate,\n # grc_act_off_failure_rate,\n # max_weight=255,\n # act_lv=0.01,\n ):\n self.activations = []\n self.inputs = []\n # self.act_threshold = act_threshold\n # self.max_weight = max_weight\n # self.output_weight = int(max_weight/2)\n # self.grc_act_on_failure_rate = grc_act_on_failure_rate\n # self.grc_act_off_failure_rate = grc_act_off_failure_rate\n # if act_threshold < 1:\n # act_threshold = act_threshold*num_dendrite\n # self.act_threshold = act_threshold\n self.claws = claws\n # while len(self.claws) < num_dendrite:\n # mf_id = random.randint(0, num_mfs-1)\n # if mf_id not in self.claws:\n # self.claws.append(mf_id)\n # self.claws.sort()\n # print(self.claws)\n self.claws.sort()\n if claw_weights:\n self.claw_weights = claw_weights\n assert False, \"Untested\"\n else:\n self.claw_weights = [1]*len(self.claws)\n self.activated = False\n self.act_lv_scale = 1\n self.broken = False\n # self.act_lv = act_lv\n\n def activate(\n self, pattern,\n # grc_act_off_failure_rate=None,\n ):\n if self.broken:\n self.activations.append(0)\n self.activated = False\n return False\n sum = 0.0\n for i, claw in enumerate(self.claws):\n sum += pattern[claw]\n self.inputs.append(sum)\n if sum >= self.act_lv_scale:\n activated = True\n self.activations.append(1)\n else:\n activated = False\n self.activations.append(0)\n self.activated = activated\n return activated\n\n def train(self, input_mfs, output):\n act = False\n act = self.activate(input_mfs, grc_act_off_failure_rate=0)\n if act:\n if output:\n self.output_weight = min(\n self.output_weight+1, self.max_weight)\n else:\n self.output_weight = max(\n self.output_weight-1, 0)\n def reset(self):\n self.inputs = []\n self.activations = []\n def calibrate_activation_level(self, act_lv):\n self.inputs.sort()\n idx = int((1-act_lv)*len(self.inputs))\n if self.inputs[idx] == 0:\n # print(self.inputs)\n print(self.claws)\n # scale = 1.0 / self.inputs[idx]\n # self.act_lv_scale = scale\n self.act_lv_scale = self.inputs[idx]\n\n\nclass MossyFiber():\n def __init__(self, mf_id):\n self.mf_id = mf_id\n self.activations = []\n pass\n def reset(self):\n self.activations = []\n def activate(self, pattern):\n self.activations.append(pattern[self.mf_id])\n\n\nclass Simulation():\n\n def __init__(\n self,\n input_graph,\n # num_grc=None,\n # num_mfs=None,\n # num_dendrite=None,\n # grc_act_threshold=None,\n # grc_act_on_failure_rate=0,\n # grc_act_off_failure_rate=0,\n # max_synapse_weight=255,\n # min_train_it=15000,\n min_eval_it=5000,\n # default_input_noise=0.05,\n # default_decoder_error_margin=0.10,\n # n_evaluate_sampling=1,\n # evaluate_sampling_majority=False,\n ):\n self.num_mfs = len(input_graph.mfs)\n self.num_grcs = len(input_graph.grcs)\n self.min_eval_it = min_eval_it\n self.init_mfs()\n self.init_grcs(input_graph)\n self.failure_rate = None\n\n def reset(self):\n for grc in self.grcs:\n grc.reset()\n for mf in self.mfs:\n mf.reset()\n # random.seed(0)\n\n def init_mfs(self):\n self.mfs = []\n for i in range(self.num_mfs):\n self.mfs.append(MossyFiber(mf_id=i))\n\n def init_grcs(self, input_graph):\n self.grcs = []\n mapping = {}\n counter = 0\n for mf_id, mf in input_graph.mfs.items():\n mapping[mf_id] = counter\n counter += 1\n for grc_id, grc in input_graph.grcs.items():\n claws = [mapping[mf_id] for mf_id, _ in grc.edges]\n self.grcs.append(\n GranuleCell(\n claws=claws,\n )\n )\n\n def set_failure_rate(self, failure_rate, seed):\n random.seed(seed)\n for grc in self.grcs:\n grc.broken = True if random.random() < failure_rate else False\n\n def generate_patterns(\n self,\n count,\n type='random',\n # independent_noise=0,\n ):\n patterns = []\n # outputs = []\n pattern_len = self.num_mfs\n\n for i in range(count):\n if type == 'random':\n b = [None]*pattern_len\n for k in range(pattern_len):\n b[k] = random.random()\n elif type == 'gaussian':\n mu, sigma = 0.5, 0.2 # mean and standard deviation\n b = np.random.normal(mu, sigma, pattern_len)\n output = random.randint(0, 1)\n # outputs.append(output)\n patterns.append((b, output))\n return patterns\n\n def add_input_noise(cls, pattern, input_noise, scaled_noise=False):\n if input_noise > 0:\n pattern = copy.deepcopy(pattern)\n if scaled_noise:\n p0 = 1-input_noise\n for i in range(len(pattern)):\n r = random.random()\n pattern[i] = pattern[i]*p0 + r*input_noise\n else:\n for i in range(len(pattern)):\n if random.random() < input_noise:\n pattern[i] = random.random()\n return pattern\n\n def train(\n self,\n patterns,\n n_iteration=None,\n # input_noise=None,\n seed=0\n ):\n\n if n_iteration is None:\n n_iteration = len(patterns)*10\n # if n_iteration < self.min_train_it:\n # n_iteration = self.min_train_it\n # if input_noise is None:\n # input_noise = self.default_input_noise\n\n # stats\n activated_grcs = 0\n random.seed(seed)\n\n for i in range(n_iteration):\n # print(patterns[random.randint(0, len(patterns)-1)])\n ind = random.randint(0, len(patterns)-1)\n # print(ind)\n # print(patterns[ind])\n pattern, output = patterns[ind]\n pattern = self.add_input_noise(pattern, input_noise)\n for grc in self.grcs:\n grc.train(pattern, output)\n if grc.activated:\n activated_grcs += 1\n\n # if i % 1000 == 0:\n # print(f'{i}..')\n\n activated_grcs_level = activated_grcs / len(self.grcs) / n_iteration\n logger.debug(f'activated_grcs_level: {activated_grcs_level} ({activated_grcs / n_iteration} grcs out of {len(self.grcs)})')\n\n def encode(self, input_pattern, out_array=None):\n if out_array is None:\n out_array = np.empty(len(self.grcs), dtype=np.uint8)\n for i, grc in enumerate(self.grcs):\n if grc.activate(input_pattern):\n out_array[i] = 1\n else:\n out_array[i] = 0\n return out_array\n\n def evaluate(\n self,\n patterns,\n n_iteration=None,\n no_random=False,\n # input_noise=None,\n # decoder_error_margin=None,\n seed=0,\n calibrate_activation_level=False,\n # output_act_lv=False,\n ):\n if n_iteration is None:\n n_iteration = 10*len(patterns)\n n_iteration = max(self.min_eval_it, n_iteration)\n\n if no_random:\n n_iteration = len(patterns)\n self.reset()\n\n # for grc in self.grcs[0:20]:\n # print(f'len: {len(grc.claws)}, scale: {grc.act_lv_scale:.2f}')\n\n random.seed(seed)\n for i in range(n_iteration):\n if no_random:\n pattern, output = patterns[i]\n else:\n pattern, output = patterns[random.randint(0, len(patterns)-1)]\n self.set_mfs_pattern(pattern)\n for grc in self.grcs:\n act = grc.activate(pattern)\n if calibrate_activation_level is not False:\n self.calibrate_grc_activation_level(calibrate_activation_level)\n return\n\n\n def print_grc_weights(self, count=200):\n\n weights = []\n for i, grc in enumerate(self.grcs):\n weights.append(grc.output_weight)\n if i > count:\n break\n print(weights)\n\n def set_mfs_pattern(self, pattern):\n for mf in self.mfs:\n mf.activate(pattern)\n\n def get_mfs_activities(self):\n # ret = []\n # for mf in self.mfs:\n # ret.append(mf.activations)\n\n for mf in self.mfs:\n xlen = len(self.mfs)\n ylen = len(mf.activations)\n break\n ret = np.empty((ylen, xlen), dtype=np.float32)\n for i, mf in enumerate(self.mfs):\n for j, val in enumerate(mf.activations):\n ret[j][i] = val\n\n return ret\n\n def get_grc_activities(self):\n # ret = []\n # for grc in self.grcs:\n # ret.append(grc.activations)\n # return ret\n\n for mf in self.grcs:\n xlen = len(self.grcs)\n ylen = len(mf.activations)\n break\n ret = np.empty((ylen, xlen), dtype=np.uint8)\n for i, mf in enumerate(self.grcs):\n for j, val in enumerate(mf.activations):\n ret[j][i] = val\n\n return ret\n\n\n def calibrate_grc_activation_level(self, act_lv=None):\n if act_lv is None:\n act_lv = self.act_lv\n for grc in self.grcs:\n grc.calibrate_activation_level(act_lv)\n\n def add_noise_patterns(\n self, patterns, prob, n, seed=None, scaled_noise=False):\n if seed is not None:\n random.seed(seed)\n out_arr = []\n for pattern_output in patterns:\n # print(pattern_output)\n pattern, output = pattern_output\n for i in range(n):\n new_pattern = self.add_input_noise(pattern, prob, scaled_noise)\n out_arr.append((new_pattern, output))\n return out_arr\n\n def print_grc_act_lv_scale(self):\n # scales = []\n # for grc in self.grcs:\n # scales.append(grc.act_lv_scale)\n # print(scales)\n print([grc.act_lv_scale for grc in self.grcs])\n\n\nfrom collections import defaultdict\nimport itertools\ndef count_redundancy(g):\n pos = 0\n grcs_claws = []\n mf_to_grcs = defaultdict(set)\n for grc_id, dendrite_count in enumerate(g.dendrite_counts):\n claws = []\n for j in range(dendrite_count):\n mf_id = g.dendrite_mf_map[pos]\n pos += 1\n claws.append(mf_id)\n mf_to_grcs[mf_id].add(grc_id)\n grcs_claws.append(set(claws))\n nshares = defaultdict(int)\n for mf_id, grcs in mf_to_grcs.items():\n for pair in itertools.combinations(grcs, 2):\n nshare = len(grcs_claws[pair[0]] & grcs_claws[pair[1]])\n nshares[nshare] += 1\n for n in sorted(nshares.keys()):\n print(f'{n}: {nshares[n]/len(g.dendrite_counts)}')\n\n# count_redundancy(sim_lite)\n\n\ndef generate_binary_patterns(pattern_len, count, f):\n patterns = []\n # np.random.seed(seed)\n # random.seed(seed)\n threshold = int(pattern_len*f+0.5)\n base = np.zeros(pattern_len, dtype=np.uint8)\n base[0:threshold] = 1\n for i in range(count):\n np.random.shuffle(base)\n b = base.copy()\n output = random.randint(0, 1)\n patterns.append((b, output))\n return patterns\n\n# def add_input_noise(pattern, input_noise):\n# if input_noise > 0:\n# pattern = copy.deepcopy(pattern)\n# for i in range(len(pattern)):\n# if random.random() < input_noise:\n# # pattern[i] = not pattern[i]\n# pattern[i] = random.random()\n# return pattern\n\n# def add_noise_binary_patterns(pattern, prob, f=None, n=1, seed=0):\n# if f is None:\n# f = pattern.sum() / len(pattern)\n# ones = []\n# zeros = []\n# for i, b in enumerate(pattern):\n# if b:\n# ones.append(i)\n# else:\n# zeros.append(i)\n# ones = np.array(ones, dtype=np.uint32)\n# zeros = np.array(zeros, dtype=np.uint32)\n# ret = []\n# num_flips = int(prob*f*len(pattern)+.5)\n# for i in range(n):\n# new_pat = pattern.copy()\n# np.random.shuffle(ones)\n# for j in range(num_flips):\n# new_pat[ones[j]] = 0\n# np.random.shuffle(zeros)\n# for j in range(num_flips):\n# new_pat[zeros[j]] = 1\n# ret.append(new_pat)\n# return ret\n\ndef add_noise_binary_patterns(pattern, prob, f=None, n=1, seed=0):\n if f is None:\n f = pattern.sum() / len(pattern)\n ret = []\n for i in range(n):\n noisy_pattern = copy.deepcopy(pattern)\n for i in range(len(noisy_pattern)):\n if random.random() < prob:\n r = random.random()\n if r < f:\n noisy_pattern[i] = 1\n else:\n noisy_pattern[i] = 0\n ret.append(noisy_pattern)\n return ret\n\ndef generate_random_pattern(pattern_len, type='random'):\n b = [None]*pattern_len\n for k in range(pattern_len):\n b[k] = random.random()\n return b\n\n\ndef make_noisy_patterns_float(\n patterns, prob, n, seed=None, scaled_noise=False, signal_mask=None):\n if signal_mask:\n assert not scaled_noise\n if seed is not None:\n random.seed(seed)\n out_arr = []\n for pattern_output in patterns:\n # print(pattern_output)\n pattern, output = pattern_output\n for i in range(n):\n new_pattern = add_input_noise_float(pattern, prob, scaled_noise, signal_mask)\n out_arr.append((new_pattern, output))\n return out_arr\n\ndef add_input_noise_float(\n pattern, input_noise, scaled_noise=False, signal_mask=None):\n if input_noise > 0:\n pattern = copy.deepcopy(pattern)\n if scaled_noise:\n p0 = 1-input_noise\n for i in range(len(pattern)):\n r = random.random()\n pattern[i] = pattern[i]*p0 + r*input_noise\n elif signal_mask:\n for i in range(len(pattern)):\n if not signal_mask[i]:\n if random.random() < input_noise:\n pattern[i] = random.random()\n else:\n for i in range(len(pattern)):\n if random.random() < input_noise:\n pattern[i] = random.random()\n return pattern\n\ndef add_noise_to_core_patterns(\n patterns, prob, n,\n seed=None):\n if seed:\n random.seed(seed)\n np.random.seed(seed)\n out_arr = []\n pattern_len = len(patterns[0][0])\n assert pattern_len <= 65535\n\n noise_mask_len = int(prob*pattern_len+0.5)\n noise_mask = np.zeros(pattern_len, dtype=np.uint8)\n noise_mask[0:noise_mask_len] = 1\n\n for pattern_output in patterns:\n pattern, output = pattern_output\n np.random.shuffle(noise_mask)\n for i in range(n):\n random_pat = copy.deepcopy(pattern)\n for j in range(pattern_len):\n if noise_mask[j]:\n random_pat[j] = random.random()\n out_arr.append((random_pat, output))\n return out_arr\n\n# def add_noise_to_core_patterns(\n# patterns, prob, n,\n# seed=0):\n# random.seed(seed)\n# np.random.seed(seed)\n# out_arr = []\n# pattern_len = len(patterns[0][0])\n# core_len = int(pattern_len*(1-prob))\n# random_core_indices = [k for k in range(pattern_len)]\n# assert pattern_len <= 65535\n# random_core_indices = np.array(random_core_indices, dtype=np.uint16)\n# for pattern_output in patterns:\n# pattern, output = pattern_output\n# np.random.shuffle(random_core_indices)\n# for i in range(n):\n# random_pat = generate_random_pattern(pattern_len)\n# for j in range(core_len):\n# random_pat[random_core_indices[j]] = pattern[j]\n# out_arr.append((random_pat, output))\n# return out_arr\n\n\n\n\n" ]
[ [ "numpy.linalg.eig", "numpy.cov", "numpy.array", "numpy.real" ], [ "sklearn.cluster.DBSCAN" ], [ "numpy.random.seed", "numpy.set_printoptions", "numpy.random.shuffle", "numpy.ones", "numpy.zeros", "numpy.sum" ], [ "numpy.ravel", "numpy.array", "numpy.get_include", "numpy.empty" ], [ "numpy.random.seed", "numpy.random.shuffle", "numpy.random.normal", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
microfluidix/Griottes
[ "31881fcf2c247e0816e1484c3190923c73599674" ]
[ "griottes/analyse/cell_property_extraction.py" ]
[ "import numpy as np\nimport skimage.measure\nimport skimage\nimport pandas\nfrom scipy.spatial import Delaunay\nfrom scipy.spatial import Voronoi\nfrom sklearn.decomposition import PCA\nfrom tqdm import tqdm\n\n# IMPORTANT CONVENTIONS: Following standard practice,\n# all images hvae shapes Z, X, Y, C where C in the\n# fluo channel.\n\n\ndef get_nuclei_properties(image, mask_channel):\n\n \"\"\"\n Get properties of nuclei in image.\n\n Parameters\n ----------\n image : numpy.ndarray\n Image with nuclei masks.\n mask_channel : int\n Channel of the mask.\n\n Returns\n -------\n pandas.DataFrame\n \"\"\"\n\n if mask_channel is None:\n\n properties = pandas.DataFrame(\n skimage.measure.regionprops_table(\n image, properties=[\"centroid\", \"area\", \"label\"]\n )\n )\n\n else:\n\n properties = pandas.DataFrame(\n skimage.measure.regionprops_table(\n image[..., mask_channel], properties=[\"centroid\", \"area\", \"label\"]\n )\n )\n\n return properties\n\ndef get_shape_properties(properties, image, mask_channel, min_area, ndim):\n\n for ind in tqdm(properties.index, leave=False):\n\n if (properties.loc[ind, \"area\"] > min_area) & (ndim == 3):\n\n label = properties.loc[ind, \"label\"]\n loc_mask = (image[..., mask_channel] == label) * 1\n nonzero = np.nonzero(loc_mask)\n\n pca = PCA(n_components=3)\n Y = np.c_[nonzero[0], nonzero[1], nonzero[2]]\n\n pca.fit(Y)\n vec = pca.components_[0]\n var = pca.explained_variance_\n\n properties.loc[ind, \"vec_0\"] = vec[0]\n properties.loc[ind, \"vec_1\"] = vec[1]\n properties.loc[ind, \"vec_2\"] = vec[2]\n properties.loc[ind, \"theta\"] = np.arctan2(vec[1], vec[2])\n properties.loc[ind, \"psi\"] = np.arctan2(\n vec[0], np.sqrt(vec[1] ** 2 + vec[2] ** 2)\n )\n properties.loc[ind, \"eccentricity\"] = np.abs(var[0]) / np.sqrt(\n var[1] * var[2]\n )\n\n if (properties.loc[ind, \"area\"] > min_area) & (ndim == 2):\n\n loc_mask = (image[0, ..., mask_channel] == ind) * 1\n nonzero = np.nonzero(loc_mask)\n\n pca = PCA(n_components=2)\n Y = np.c_[nonzero[0], nonzero[1]]\n pca.fit(Y)\n vec = pca.components_[0]\n var = pca.explained_variance_\n\n properties.loc[ind, \"vec_0\"] = vec[0]\n properties.loc[ind, \"vec_1\"] = vec[1]\n properties.loc[ind, \"theta\"] = np.arctan2(vec[0], vec[1])\n properties.loc[ind, \"eccentricity\"] = np.abs(var[0]) / np.sqrt(var[1])\n\n return properties\n\n\ndef get_fluo_properties(image, fluo_channel, mask_channel=0):\n\n properties_fluo = pandas.DataFrame(\n skimage.measure.regionprops_table(\n image[..., mask_channel],\n intensity_image=image[..., fluo_channel],\n properties=[\"mean_intensity\", \"label\"],\n )\n )\n\n return properties_fluo\n\n\ndef basic_fluo_prop_analysis(properties, image, mask_channel):\n\n for i in range(0, image.shape[-1], 1):\n\n if i != mask_channel:\n\n properties_fluo = get_fluo_properties(\n image=image, fluo_channel=i, mask_channel=mask_channel\n )\n\n properties_fluo = properties_fluo.rename(\n columns={\"mean_intensity\": \"mean_intensity_\" + str(i)}\n )\n\n properties = properties.merge(properties_fluo, how=\"outer\", on=\"label\")\n\n return properties\n\n\n## SPHERE ##\n\n\ndef sphere_mean_intensity(intensity_image, position, radius, percentile):\n\n n_Z, n_X, n_Y = np.shape(intensity_image)\n Z, X, Y = np.ogrid[:n_Z, :n_X, :n_Y]\n\n z_nuc, x_nuc, y_nuc = position\n\n mask = (\n np.sqrt((Z - z_nuc) ** 2 + (X - x_nuc) ** 2 + (Y - y_nuc) ** 2) < radius\n ).astype(int)\n\n points_in_sphere = np.argwhere(mask)\n\n return np.mean(\n intensity_image[tuple(points_in_sphere[points_in_sphere].T)]\n ), np.percentile(\n intensity_image[tuple(points_in_sphere[points_in_sphere].T)], percentile\n )\n\n\ndef get_fluo_properties_sphere(\n properties, image, fluo_channel, radius, mask_channel, percentile\n):\n\n for ind in properties.index:\n\n position = (\n int(properties.loc[ind, \"z\"]),\n int(properties.loc[ind, \"x\"]),\n int(properties.loc[ind, \"y\"]),\n )\n\n mean, percentile = sphere_mean_intensity(\n intensity_image=image[..., fluo_channel],\n position=position,\n radius=radius,\n percentile=percentile,\n )\n\n properties.loc[ind, \"mean_intensity\"] = mean\n properties.loc[ind, \"percentile_intensity\"] = percentile\n\n return properties[[\"mean_intensity\", \"percentile_intensity\"]]\n\n\ndef sphere_fluo_property_analysis(properties, image, mask_channel, radius, percentile):\n\n for i in range(0, image.shape[-1], 1):\n\n if i != mask_channel:\n\n properties_fluo = get_fluo_properties_sphere(\n properties=properties,\n image=image,\n fluo_channel=i,\n mask_channel=mask_channel,\n radius=radius,\n percentile=percentile,\n )\n\n properties_fluo = properties_fluo.rename(\n columns={\"mean_intensity\": \"mean_intensity_\" + str(i)}\n )\n\n properties_fluo = properties_fluo.rename(\n columns={\"percentile_intensity\": \"percentile_intensity_\" + str(i)}\n )\n\n properties = properties.merge(properties_fluo, how=\"outer\", on=\"label\")\n\n del properties[\"mean_intensity\"]\n del properties[\"percentile_intensity\"]\n\n return properties\n\n\n### VORONOI ###\n\n\ndef in_hull(p, hull):\n\n \"\"\"\n Test if points in `p` are in `hull`\n\n `p` should be a `NxK` coordinates of `N` points in `K` dimensions\n `hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the\n coordinates of `M` points in `K`dimensions for which Delaunay triangulation\n will be computed\n \"\"\"\n\n if not isinstance(hull, Delaunay):\n hull = Delaunay(hull)\n\n return hull.find_simplex(p) >= 0\n\n\ndef make_spherical_mask(image, point_coordinates, radius):\n\n n_Z, n_X, n_Y = np.shape(image)\n Z, X, Y = np.ogrid[:n_Z, :n_X, :n_Y]\n\n z_nuc, x_nuc, y_nuc = point_coordinates\n\n mask = np.sqrt((Z - z_nuc) ** 2 + (X - x_nuc) ** 2 + (Y - y_nuc) ** 2) < radius\n\n return mask.astype(int)\n\n\ndef make_voronoi_mask(properties, image, mask_channel, radius):\n\n intensity_image = image[..., mask_channel]\n\n label_matrix = np.zeros_like(intensity_image)\n vor = Voronoi(properties[[\"z\", \"x\", \"y\"]])\n\n print(\"Calculating voronoi\")\n\n for cell_label, point_number in tqdm(\n zip(properties.index, np.arange(len(properties)))\n ):\n\n region_label = properties.index[point_number]\n region_number = vor.point_region[point_number]\n voronoi_vertices = vor.regions[region_number]\n\n point_coordinates = vor.points[point_number]\n vertice_array = [\n vor.vertices[vertice_number] for vertice_number in voronoi_vertices\n ]\n\n spherical_mask = make_spherical_mask(\n image=intensity_image, point_coordinates=point_coordinates, radius=radius\n )\n\n points_in_sphere = np.argwhere(spherical_mask)\n\n sphere_points_in_vor = in_hull(points_in_sphere, vertice_array)\n\n # creating the label matrix\n label_matrix[tuple(points_in_sphere[sphere_points_in_vor].T)] = cell_label\n\n return label_matrix\n\n\ndef get_fluo_properties_voronoi(\n properties, image, fluo_channel, label_matrix, percentile\n):\n\n intensity_image = image[..., fluo_channel]\n\n for ind, cell_label in tqdm(zip(properties.index, properties.label)):\n\n # mask for quantification\n mask = np.zeros_like(label_matrix)\n mask[label_matrix == cell_label] = 1\n\n points_in_intersection = np.argwhere(mask)\n\n try:\n properties.loc[ind, \"mean_intensity\"] = np.mean(\n intensity_image[tuple(points_in_intersection[points_in_intersection].T)]\n )\n properties.loc[ind, \"percentile_intensity\"] = np.percentile(\n intensity_image[\n tuple(points_in_intersection[points_in_intersection].T)\n ],\n percentile,\n )\n\n except:\n properties.loc[ind, \"mean_intensity\"] = np.nan\n properties.loc[ind, \"percentile_intensity\"] = np.nan\n\n return properties[\n [\"mean_intensity\", \"percentile_intensity\", \"label\"]\n ]\n\n\ndef voronoi_fluo_property_analysis(\n properties, image, mask_channel, radius, labeled_voronoi_tesselation, percentile\n):\n\n \"\"\"\n\n Calculate the voronoi mask, then use the mask to\n estimate the intensities inside the mask.\n\n \"\"\"\n\n label_matrix = make_voronoi_mask(properties, image, mask_channel, radius)\n\n for i in range(0, image.shape[-1], 1):\n\n if i != mask_channel:\n\n properties_fluo = get_fluo_properties_voronoi(\n properties=properties,\n image=image,\n fluo_channel=i,\n label_matrix=label_matrix,\n percentile=percentile,\n )\n\n properties_fluo = properties_fluo.rename(\n columns={\"mean_intensity\": \"mean_intensity_\" + str(i)}\n )\n\n properties_fluo = properties_fluo.rename(\n columns={\"percentile_intensity\": \"percentile_intensity_\" + str(i)}\n )\n\n properties = properties.merge(properties_fluo, how=\"outer\", on=\"label\")\n\n del properties[\"mean_intensity\"]\n\n if labeled_voronoi_tesselation:\n\n return properties, label_matrix\n\n return properties\n\n\n### ALL ###\n\n\ndef get_cell_properties(\n image,\n mask_channel=0,\n analyze_fluo_channels=False,\n fluo_channel_analysis_method=\"basic\",\n cell_geometry_properties=False,\n labeled_voronoi_tesselation=False,\n radius=5,\n min_area=50,\n percentile=95,\n ndim=3,\n):\n\n if image.ndim - ndim < 0:\n print(\n \"the input image has less dimensions than it should. Please check that 'ndim' is correct.\"\n )\n return False\n elif image.ndim - ndim > 1:\n print(\n \"the input image has more dimensions than it should. Please check that 'ndim' is correct.\"\n )\n return False\n elif image.ndim - ndim == 0:\n print(\n \"The input image has the same number of dimensions as 'ndim', it will be analyzed as a labeled image.\"\n )\n analyze_fluo_channels = False\n mask_channel = None\n\n if ndim == 2:\n image = image[np.newaxis, ...]\n\n properties = get_nuclei_properties(image=image, mask_channel=mask_channel)\n\n print(\"nuclei properties extracted\")\n\n properties = properties.rename(\n columns={\"centroid-0\": \"z\", \"centroid-1\": \"x\", \"centroid-2\": \"y\"}\n )\n\n properties = properties[properties.area > min_area]\n\n if cell_geometry_properties:\n\n print(\"Calculating geometrical properties\")\n\n properties = get_shape_properties(\n properties=properties,\n image=image,\n mask_channel=mask_channel,\n min_area=min_area,\n ndim=ndim,\n )\n\n print(\"Done geometrical properties\")\n\n if analyze_fluo_channels:\n\n if fluo_channel_analysis_method == \"basic\":\n\n properties = basic_fluo_prop_analysis(properties, image, mask_channel)\n\n properties = properties.dropna()\n properties.index = np.arange(len(properties))\n\n return properties\n\n if fluo_channel_analysis_method == \"local_sphere\":\n\n properties = sphere_fluo_property_analysis(\n properties, image, mask_channel, radius, percentile\n )\n\n properties = properties.dropna()\n properties.index = np.arange(len(properties))\n\n return properties\n\n if fluo_channel_analysis_method == \"local_voronoi\":\n\n # Need to create voronoi tesselation, then store the\n # vertixes and use them to mark the convex hull\n # corresponding to each cell nuclei. Once the region\n # obtained use this area as a label for regionprops.\n\n # ATTENTION: verify that the area used to calculate\n # the properties corresponds to the intersection of\n # the voronoi and a sphere of radius R.\n\n if labeled_voronoi_tesselation:\n\n properties, label_matrix = voronoi_fluo_property_analysis(\n properties,\n image,\n mask_channel,\n radius,\n labeled_voronoi_tesselation,\n percentile,\n )\n\n properties = properties.dropna()\n # properties.index = np.arange(len(properties))\n\n return properties, label_matrix\n\n else:\n\n properties = voronoi_fluo_property_analysis(\n properties,\n image,\n mask_channel,\n radius,\n labeled_voronoi_tesselation,\n percentile,\n )\n\n properties = properties.dropna()\n properties.index = np.arange(len(properties))\n\n return properties\n\n else:\n\n properties = properties.dropna()\n properties.index = np.arange(len(properties))\n\n return properties\n" ]
[ [ "scipy.spatial.Voronoi", "numpy.sqrt", "numpy.nonzero", "numpy.abs", "scipy.spatial.Delaunay", "numpy.argwhere", "numpy.arctan2", "numpy.shape", "numpy.zeros_like", "sklearn.decomposition.PCA" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
afranck64/keras-easy
[ "a27c0fefe8f9796dc22eca7aa3123548ac5a4646" ]
[ "keras_easy/models/tools/generators.py" ]
[ "import os\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\nfrom keras import backend as K\nfrom keras.preprocessing.image import (ImageDataGenerator as _ImageDataGenerator, \n DirectoryIterator as _DirectoryIterator,\n Iterator as _Iterator,\n load_img, img_to_array, array_to_img)\nfrom keras.preprocessing import image\n\n\ndef _apply_func(args):\n func = args[0]\n args = args[1]\n return func(*args)\n\nPOOL = None\n\nclass CSVDataIterator(_Iterator):\n def __init__(self, labels_file, image_data_generator,\n target_size=(256, 256), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=None,\n data_format=None,\n save_to_dir=None, save_prefix='', save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest',\n row_processor=None):\n if row_processor is None:\n #row_processor = lambda row, idg, color_mode, target_size, interpolation, data_format: (row['id'], row['class'])\n raise ValueError('row_processor should be callable function')\n self.row_processor = row_processor\n self.samples = pd.read_csv(labels_file)\n super().__init__(len(self.samples), batch_size, shuffle, seed)\n self.image_data_generator = image_data_generator\n self.row_processor = row_processor\n\n self.target_size = tuple(target_size)\n if color_mode not in {'rgb', 'rgba', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\", \"rgba\", or \"grayscale\".')\n self.color_mode = color_mode\n self.data_format = data_format\n if self.color_mode == 'rgba':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (4,)\n else:\n self.image_shape = (4,) + self.target_size\n elif self.color_mode == 'rgb':\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (3,)\n else:\n self.image_shape = (3,) + self.target_size\n else:\n if self.data_format == 'channels_last':\n self.image_shape = self.target_size + (1,)\n else:\n self.image_shape = (1,) + self.target_size\n self.classes = classes\n if class_mode not in {'categorical', 'binary', 'sparse',\n 'input', None}:\n raise ValueError('Invalid class_mode:', class_mode,\n '; expected one of \"categorical\", '\n '\"binary\", \"sparse\", \"input\"'\n ' or None.')\n self.class_mode = class_mode\n self.save_to_dir = save_to_dir\n self.save_prefix = save_prefix\n self.save_format = save_format\n self.interpolation = interpolation\n #super().__init__('', *args, **kwargs)\n \n\n def _get_batches_of_transformed_samples(self, index_array):\n global POOL\n batch_x = np.zeros(\n (len(index_array),) + self.image_shape,\n dtype=K.floatx())\n batch_y = [None] * len(index_array)\n \n args = [(self.row_processor, (self.samples.loc[i], self.image_data_generator, self.color_mode, self.target_size, self.interpolation, self.data_format, self.class_mode)) for i in index_array]\n # build batch of image data\n #args = [self.row_processor, (self.samples.loc[j], self.image_data_generator, self.color_mode, self.target_size, self.interpolation, self.data_format, self.class_mode)]\n # if POOL is None:\n # POOL = mp.Pool()\n # x_y = POOL.map(_apply_func, args)\n x_y = [_apply_func(arg) for arg in args] ##Consume less memory than multi-processing\n for i, j in enumerate(index_array):\n #x, y = self.row_processor(self.samples.loc[j], self.image_data_generator, self.color_mode, self.target_size, self.interpolation, self.data_format, self.class_mode)\n # params = self.image_data_generator.get_random_transform(x.shape)\n # x = self.image_data_generator.apply_transform(x, params)\n # x = self.image_data_generator.standardize(x)\n x, y = x_y[i]\n batch_x[i] = x\n batch_y[i] = y\n\n batch_y = np.asarray(batch_y, dtype=K.floatx())\n # optionally save augmented images to disk for debugging purposes\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(\n prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e7),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n # build batch of labels\n # if self.class_mode == 'input':\n # batch_y = batch_x.copy()\n # elif self.class_mode == 'sparse':\n # batch_y = np.asarray(raw_y)\n # elif self.class_mode == 'binary':\n # batch_y = np.asarray(raw_y, dtype=K.floatx())\n # elif self.class_mode == 'categorical':\n # batch_y = np.zeros(\n # (len(batch_x), self.num_classes),\n # dtype=K.floatx())\n # for i, label in enumerate(raw_y[index_array]):\n # batch_y[i, label] = 1.\n # else:\n # return batch_x\n return batch_x, batch_y\n\n\n def next(self):\n \"\"\"For python 2.x.\n\n # Returns\n The next batch.\n \"\"\"\n # Keeps under lock only the mechanism which advances\n # the indexing of each batch.\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)\n\n\nclass DataGenerator(_ImageDataGenerator):\n def __init__(self, *args, row_processor=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.row_processor = row_processor\n \n def flow_from_file(self, labels_file,\n target_size=(256,256), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest'):\n return CSVDataIterator(\n labels_file=labels_file, image_data_generator=self,\n target_size=target_size, color_mode=color_mode,\n classes=classes, class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size, shuffle=shuffle, seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n follow_links=follow_links,\n subset=subset,\n interpolation=interpolation,\n row_processor=self.row_processor)\n\nclass CSVMemDataIterator(CSVDataIterator):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._cache_x = None\n self._cache_y = None\n\n def prefetch(self):\n #raise value\n raise NotImplementedError()\n self._cache_x, self._cache_y = super()._get_batches_of_transformed_samples(np.arange(len(self.samples)))\n\n def _get_batches_of_transformed_samples(self, index_array):\n return self._cache_x[index_array], self._cache_y[index_array]\n\nclass MemDataGenerator(DataGenerator):\n def __init__(self, *args, row_processor=None, **kwargs):\n super().__init__(*args, row_processor=row_processor, **kwargs)\n\n def flow_from_file(self,labels_file,\n target_size=(256,256), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest'):\n mem_iterator = CSVMemDataIterator(\n labels_file=labels_file, image_data_generator=self,\n target_size=target_size, color_mode=color_mode,\n classes=classes, class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size, shuffle=shuffle, seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n follow_links=follow_links,\n subset=subset,\n interpolation=interpolation,\n row_processor=self.row_processor)\n #TODO: remove after test\n raise Exception(\"Should not prefetch -_-\")\n mem_iterator.prefetch()\n return mem_iterator\n \n\nclass CSVImageIterator(_DirectoryIterator):\n \"\"\"Iterator capable of reading images listed in a csv file.\n\n # Arguments\n labels_file: str\n path to a CSV file containing path to images <filename> and\n the corresponding label <class> \n image_data_generator: Instance of `ImageDataGenerator`\n to use for random transformations and normalization.\n target_size: tuple of integers, dimensions to resize input images to.\n color_mode: One of `\"rgb\"`, `\"rgba\"`, `\"grayscale\"`.\n Color mode to read images.\n classes: Optional list of strings, names of subdirectories\n containing images from each class (e.g. `[\"dogs\", \"cats\"]`).\n It will be computed automatically if not set.\n class_mode: Mode for yielding the targets:\n `\"binary\"`: binary targets (if there are only two classes),\n `\"categorical\"`: categorical targets,\n `\"sparse\"`: integer targets,\n `\"input\"`: targets are images identical to input images (mainly\n used to work with autoencoders),\n `None`: no targets get yielded (only input images are yielded).\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample\n images (if `save_to_dir` is set).\n save_format: Format to use for saving sample images\n (if `save_to_dir` is set).\n subset: Subset of data (`\"training\"` or `\"validation\"`) if\n validation_split is set in ImageDataGenerator.\n interpolation: Interpolation method used to resample the image if the\n target size is different from that of the loaded image.\n Supported methods are \"nearest\", \"bilinear\", and \"bicubic\".\n If PIL version 1.1.3 or newer is installed, \"lanczos\" is also\n supported. If PIL version 3.4.0 or newer is installed, \"box\" and\n \"hamming\" are also supported. By default, \"nearest\" is used.\n \"\"\"\n def __init__(self, labels_file, *args, **kwargs):\n df = pd.read_csv(labels_file)\n self.filenames = df['filename'].values\n self.classes = kwargs.get('classes') or df['class'].values\n super().__init__(*args, **kwargs)\n\n def _get_batches_of_transformed_samples(self, index_array):\n batch_x = np.zeros(\n (len(index_array),) + self.image_shape,\n dtype=K.floatx())\n # build batch of image data\n for i, j in enumerate(index_array):\n fname = self.filenames[j]\n img = image.load_img(fname,\n color_mode=self.color_mode,\n target_size=self.target_size,\n interpolation=self.interpolation)\n x = image.img_to_array(img, data_format=self.data_format)\n # Pillow images should be closed after `load_img`,\n # but not PIL images.\n if hasattr(img, 'close'):\n img.close()\n params = self.image_data_generator.get_random_transform(x.shape)\n x = self.image_data_generator.apply_transform(x, params)\n x = self.image_data_generator.standardize(x)\n batch_x[i] = x\n # optionally save augmented images to disk for debugging purposes\n if self.save_to_dir:\n for i, j in enumerate(index_array):\n img = array_to_img(batch_x[i], self.data_format, scale=True)\n fname = '{prefix}_{index}_{hash}.{format}'.format(\n prefix=self.save_prefix,\n index=j,\n hash=np.random.randint(1e7),\n format=self.save_format)\n img.save(os.path.join(self.save_to_dir, fname))\n # build batch of labels\n if self.class_mode == 'input':\n batch_y = batch_x.copy()\n elif self.class_mode == 'sparse':\n batch_y = self.classes[index_array]\n elif self.class_mode == 'binary':\n batch_y = self.classes[index_array].astype(K.floatx())\n elif self.class_mode == 'categorical':\n batch_y = np.zeros(\n (len(batch_x), self.num_classes),\n dtype=K.floatx())\n for i, label in enumerate(self.classes[index_array]):\n batch_y[i, label] = 1.\n else:\n return batch_x\n return batch_x, batch_y\n\n\n\nclass ImageDataGenerator(_ImageDataGenerator):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n def flow_from_file(self, labels_file,\n target_size=(256, 256), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=None,\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest'):\n return CSVImageIterator(\n labels_file, self,\n target_size=target_size, color_mode=color_mode,\n classes=classes, class_mode=class_mode,\n data_format=self.data_format,\n batch_size=batch_size, shuffle=shuffle, seed=seed,\n save_to_dir=save_to_dir,\n save_prefix=save_prefix,\n save_format=save_format,\n follow_links=follow_links,\n subset=subset,\n interpolation=interpolation)\n\n\nif __name__ == \"__main__\":\n fic = \"./../data/datasets/titanic/train.csv\"\n dg = DataGenerator()\n it = dg.flow_from_file(fic)\n for i in it:\n print(it)" ]
[ [ "pandas.read_csv", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mmmats/Projet_final_AJC
[ "cc903331ef6cb7a1144f5c9b1aea74bfbab65e11" ]
[ "web_app/__init__.py" ]
[ "from flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\nfrom flask import send_file\nfrom flask import redirect\nfrom flask import url_for\nimport json\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.cuda.amp as amp\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nval_df = pd.read_csv('web_app/Human_protein_atlas/val.csv', nrows=25)\n\n\ndef create_app():\n\n #-----------------------------------------------------------------------------------#\n # INITIALISATION DE L'APPLICATION #\n #-----------------------------------------------------------------------------------#\n\n UPLOAD_FOLDER = 'web_app/static/uploads/'\n app = Flask(__name__)\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n #-----------------------------------------------------------------------------------#\n # Modèle #\n #-----------------------------------------------------------------------------------#\n\n device = torch.device(torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu'))\n use_amp = True\n\n class ModelWithAttention(nn.Module):\n\n def __init__(self):\n super().__init__()\n resnext = torch.hub.load(\n 'facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x8d_swsl')\n self.resnext = nn.Sequential(*list(resnext.children())[:-2])\n self.resnext.requires_grad_(False)\n self.attention_nn = nn.Sequential(\n nn.Linear(2048, 10),\n nn.Softmax(dim=1)\n )\n self.multi_label_classifier = nn.Conv1d(\n 10, 10, kernel_size=2048, groups=10)\n self.top = nn.ModuleList(\n [self.attention_nn, self.multi_label_classifier])\n\n @amp.autocast(enabled=use_amp)\n def forward(self, imgs):\n # shape : batch_size x 2048 x H x W\n encoded_imgs = self.resnext(imgs)\n # shape: batch_size x (HxW) x 2048\n encoded_imgs = encoded_imgs.reshape(\n *encoded_imgs.shape[:2], -1).swapaxes(1, 2)\n # shape: batch_size x (HxW) x 10\n weights = self.attention_nn(encoded_imgs)\n encoded_imgs = encoded_imgs.unsqueeze(dim=1).repeat(\n 1, 10, 1, 1) # shape: batch_size x 10 x (HxW) x 2048\n weights = weights.swapaxes(1, 2).unsqueeze(\n dim=-1) # shape: batch_size x 10 x (HxW) x 1\n # shape: batch_size x 10 x (HxW) x 2048\n outputs = weights * encoded_imgs\n outputs = outputs.sum(dim=2) # shape: batch_size x 10 x 2048\n # shape: batch_size x 10 x 1 => batch_size x 10 (after squeezing)\n outputs = self.multi_label_classifier(outputs).squeeze()\n return outputs, weights\n\n model = ModelWithAttention()\n model.to(device)\n model.load_state_dict(torch.load(\n 'web_app/model_checkpoints/model_epoch_32.pth'))\n\n thresholds = torch.tensor([0.866, 0.28, 0.95, 0.27599999, 0.52200001,\n 0.45899999, 0.68699998, 0.81699997, 0.75999999, 0.61299998], device=device)\n\n def visualize_att_mask(img_path, model, root_path, device=device, threshold=thresholds):\n\n tmp_files = os.listdir(root_path)\n for file in tmp_files:\n path = os.path.join(root_path, file)\n if os.path.isfile(path):\n os.remove(path)\n\n img = Image.open(img_path).convert('RGB')\n img_to_tensor = transforms.ToTensor()\n img = img_to_tensor(img)\n img = img.unsqueeze(dim=0).to(device) # shape : 1 x 3 x 512 x 512\n with torch.no_grad():\n with amp.autocast(enabled=use_amp):\n model.eval()\n logits, weights = model(img)\n probs = torch.sigmoid(logits)\n labels = probs >= threshold\n labels = torch.arange(10)[labels]\n if labels.shape == (0,):\n labels = probs.argmax(dim=-1, keepdim=True)\n labels = labels.cpu()\n weights = weights.squeeze()[labels].unsqueeze(\n dim=0).reshape(1, labels.shape[0], 16, 16).cpu()\n upsampled_weights = F.upsample(weights, size=512, mode='bilinear')\n img = img.cpu()\n for i, protein_idx in enumerate(labels):\n idx = protein_idx.item()\n fig = plt.figure(figsize=(13, 13))\n plt.imshow(img[0].permute(1, 2, 0), cmap='Greys_r')\n plt.imshow(upsampled_weights[0, i, :, :],\n cmap='Greys_r', alpha=0.6)\n plt.axis('off')\n plt.savefig(os.path.join(\n root_path, f'protein_{idx}.png'), bbox_inches='tight')\n plt.close(fig)\n return probs.tolist(), labels.tolist()\n #-----------------------------------------------------------------------------------#\n # PAGES #\n #-----------------------------------------------------------------------------------#\n\n @app.route('/')\n def homePage():\n return render_template(\"index.html\")\n\n @app.route('/', methods=['GET', 'POST'])\n def upload_file():\n if request.method == 'POST':\n if 'client_img' not in request.files:\n return 'there is no client_img in form!'\n clientImg = request.files['client_img']\n path = os.path.join(\n app.config['UPLOAD_FOLDER'], clientImg.filename)\n clientImg.save(path)\n messages = json.dumps({\"main\": clientImg.filename})\n return redirect(url_for(\"homePage\", messages=messages))\n\n return 'ok'\n return '''\n <h1>Upload new File</h1>\n <form method=\"post\" enctype=\"multipart/form-data\">\n <input type=\"file\" name=\"client_img\">\n <input type=\"submit\">\n </form>\n '''\n\n @app.route('/images/<image>')\n def get_image(image):\n if image[0] == 'p':\n filename = f'tmp_predictions/{image}'\n else:\n filename = f'Human_protein_atlas/train/{image}'\n return send_file(filename, mimetype='/images/png')\n\n @app.route('/uploads/<image>')\n def get_uploads(image):\n filename = f'static/uploads/{image}'\n return send_file(filename, mimetype='/images/png')\n\n #-----------------------------------------------------------------------------------#\n # APIs #\n #-----------------------------------------------------------------------------------#\n\n @app.route('/api/get_images')\n def get_images():\n data = val_df[['Image', 'Label']].head(25).to_dict('list')\n return jsonify(data)\n\n @app.route('/api/predict', methods=['POST'])\n def predict():\n data = request.json # {'Image': ____}\n img_path = os.path.join(\n 'web_app/Human_protein_atlas/train', str(data['Image'])+'.png')\n _, labels = visualize_att_mask(\n img_path, model, 'web_app/tmp_predictions', threshold=thresholds)\n\n return {\"classes\": labels}\n\n return app\n" ]
[ [ "torch.nn.functional.upsample", "torch.nn.Softmax", "matplotlib.pyplot.imshow", "pandas.read_csv", "torch.sigmoid", "torch.load", "torch.nn.ModuleList", "torch.arange", "torch.cuda.amp.autocast", "torch.tensor", "torch.nn.Linear", "torch.no_grad", "matplotlib.pyplot.axis", "matplotlib.pyplot.close", "torch.nn.Conv1d", "torch.cuda.is_available", "torch.hub.load", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jjkver/deep-learning-from-scratch
[ "29c528a7d714d80bc59f020ff0134c36a9b218e6" ]
[ "common/multi_layer_net_extend.py" ]
[ "# coding: utf-8\nimport sys, os\nsys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정\nimport numpy as np\nfrom collections import OrderedDict\nfrom common.layers import *\nfrom common.gradient import numerical_gradient\n\nclass MultiLayerNetExtend:\n \"\"\"완전 연결 다층 신경망(확장판)\n 가중치 감소, 드롭아웃, 배치 정규화 구현\n\n Parameters\n ----------\n input_size : 입력 크기(MNIST의 경우엔 784)\n hidden_size_list : 각 은닉층의 뉴런 수를 담은 리스트(e.g. [100, 100, 100])\n output_size : 출력 크기(MNIST의 경우엔 10)\n activation : 활성화 함수 - 'relu' 혹은 'sigmoid'\n weight_init_std : 가중치의 표준편차 지정(e.g. 0.01)\n 'relu'나 'he'로 지정하면 'He 초깃값'으로 설정\n 'sigmoid'나 'xavier'로 지정하면 'Xavier 초깃값'으로 설정\n weight_decay_lambda : 가중치 감소(L2 법칙)의 세기\n use_dropout : 드롭아웃 사용 여부\n dropout_ration : 드롭아웃 비율\n use_batchNorm : 배치 정규화 사용 여부\n \"\"\"\n def __init__(self, input_size, hidden_size_list, output_size,\n activation='relu', weight_init_std='relu', weight_decay_lambda=0, \n use_dropout = False, dropout_ration = 0.5, use_batchnorm=False):\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_size_list = hidden_size_list\n self.hidden_layer_num = len(hidden_size_list)\n self.use_dropout = use_dropout\n self.weight_decay_lambda = weight_decay_lambda\n self.use_batchnorm = use_batchnorm\n self.params = {}\n\n # 가중치 초기화\n self.__init_weight(weight_init_std)\n\n # 계층 생성\n activation_layer = {'sigmoid': Sigmoid, 'relu': Relu}\n self.layers = OrderedDict()\n for idx in range(1, self.hidden_layer_num+1):\n self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)],\n self.params['b' + str(idx)])\n if self.use_batchnorm:\n self.params['gamma' + str(idx)] = np.ones(hidden_size_list[idx-1])\n self.params['beta' + str(idx)] = np.zeros(hidden_size_list[idx-1])\n self.layers['BatchNorm' + str(idx)] = BatchNormalization(self.params['gamma' + str(idx)], self.params['beta' + str(idx)])\n \n self.layers['Activation_function' + str(idx)] = activation_layer[activation]()\n \n if self.use_dropout:\n self.layers['Dropout' + str(idx)] = Dropout(dropout_ration)\n\n idx = self.hidden_layer_num + 1\n self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)], self.params['b' + str(idx)])\n\n self.last_layer = SoftmaxWithLoss()\n\n def __init_weight(self, weight_init_std):\n \"\"\"가중치 초기화\n \n Parameters\n ----------\n weight_init_std : 가중치의 표준편차 지정(e.g. 0.01)\n 'relu'나 'he'로 지정하면 'He 초깃값'으로 설정\n 'sigmoid'나 'xavier'로 지정하면 'Xavier 초깃값'으로 설정\n \"\"\"\n all_size_list = [self.input_size] + self.hidden_size_list + [self.output_size]\n for idx in range(1, len(all_size_list)):\n scale = weight_init_std\n if str(weight_init_std).lower() in ('relu', 'he'):\n scale = np.sqrt(2.0 / all_size_list[idx - 1]) # ReLUを使う場合に推奨される初期値\n elif str(weight_init_std).lower() in ('sigmoid', 'xavier'):\n scale = np.sqrt(1.0 / all_size_list[idx - 1]) # sigmoidを使う場合に推奨される初期値\n self.params['W' + str(idx)] = scale * np.random.randn(all_size_list[idx-1], all_size_list[idx])\n self.params['b' + str(idx)] = np.zeros(all_size_list[idx])\n\n def predict(self, x, train_flg=False):\n for key, layer in self.layers.items():\n if \"Dropout\" in key or \"BatchNorm\" in key:\n x = layer.forward(x, train_flg)\n else:\n x = layer.forward(x)\n\n return x\n\n def loss(self, x, t, train_flg=False):\n \"\"\"손실 함수를 구한다.\n \n Parameters\n ----------\n x : 입력 데이터\n t : 정답 레이블 \n \"\"\"\n y = self.predict(x, train_flg)\n\n weight_decay = 0\n for idx in range(1, self.hidden_layer_num + 2):\n W = self.params['W' + str(idx)]\n weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W**2)\n\n return self.last_layer.forward(y, t) + weight_decay\n\n def accuracy(self, X, T):\n Y = self.predict(X, train_flg=False)\n Y = np.argmax(Y, axis=1)\n if T.ndim != 1 : T = np.argmax(T, axis=1)\n\n accuracy = np.sum(Y == T) / float(X.shape[0])\n return accuracy\n\n def numerical_gradient(self, X, T):\n \"\"\"기울기를 구한다(수치 미분).\n \n Parameters\n ----------\n x : 입력 데이터\n t : 정답 레이블\n \n Returns\n -------\n 각 층의 기울기를 담은 사전(dictionary) 변수\n grads['W1']、grads['W2']、... 각 층의 가중치\n grads['b1']、grads['b2']、... 각 층의 편향\n \"\"\"\n loss_W = lambda W: self.loss(X, T, train_flg=True)\n\n grads = {}\n for idx in range(1, self.hidden_layer_num+2):\n grads['W' + str(idx)] = numerical_gradient(loss_W, self.params['W' + str(idx)])\n grads['b' + str(idx)] = numerical_gradient(loss_W, self.params['b' + str(idx)])\n \n if self.use_batchnorm and idx != self.hidden_layer_num+1:\n grads['gamma' + str(idx)] = numerical_gradient(loss_W, self.params['gamma' + str(idx)])\n grads['beta' + str(idx)] = numerical_gradient(loss_W, self.params['beta' + str(idx)])\n\n return grads\n \n def gradient(self, x, t):\n # forward\n self.loss(x, t, train_flg=True)\n\n # backward\n dout = 1\n dout = self.last_layer.backward(dout)\n\n layers = list(self.layers.values())\n layers.reverse()\n for layer in layers:\n dout = layer.backward(dout)\n\n # 결과 저장\n grads = {}\n for idx in range(1, self.hidden_layer_num+2):\n grads['W' + str(idx)] = self.layers['Affine' + str(idx)].dW + self.weight_decay_lambda * self.params['W' + str(idx)]\n grads['b' + str(idx)] = self.layers['Affine' + str(idx)].db\n\n if self.use_batchnorm and idx != self.hidden_layer_num+1:\n grads['gamma' + str(idx)] = self.layers['BatchNorm' + str(idx)].dgamma\n grads['beta' + str(idx)] = self.layers['BatchNorm' + str(idx)].dbeta\n\n return grads\n" ]
[ [ "numpy.sqrt", "numpy.ones", "numpy.argmax", "numpy.random.randn", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dboeckenhoff/tikzplotlib
[ "45ca7fe6c40c547116cf51063f16aa4ce05514f2" ]
[ "tikzplotlib/_patch.py" ]
[ "import matplotlib as mpl\n\nfrom . import _path as mypath\nfrom ._text import _get_arrow_style\n\n\ndef draw_patch(data, obj):\n \"\"\"Return the PGFPlots code for patches.\n \"\"\"\n if isinstance(obj, mpl.patches.FancyArrowPatch):\n data, draw_options = mypath.get_draw_options(\n data,\n obj,\n obj.get_edgecolor(),\n # get_fillcolor for the arrow refers to the head, not the path\n None,\n obj.get_linestyle(),\n obj.get_linewidth(),\n obj.get_hatch(),\n )\n return _draw_fancy_arrow(data, obj, draw_options)\n\n # Gather the draw options.\n data, draw_options = mypath.get_draw_options(\n data,\n obj,\n obj.get_edgecolor(),\n obj.get_facecolor(),\n obj.get_linestyle(),\n obj.get_linewidth(),\n obj.get_hatch(),\n )\n\n if isinstance(obj, mpl.patches.Rectangle):\n # rectangle specialization\n return _draw_rectangle(data, obj, draw_options)\n elif isinstance(obj, mpl.patches.Ellipse):\n # ellipse specialization\n return _draw_ellipse(data, obj, draw_options)\n else:\n # regular patch\n return _draw_polygon(data, obj, draw_options)\n\n\ndef _is_in_legend(obj):\n label = obj.get_label()\n leg = obj.axes.get_legend()\n if leg is None:\n return False\n return label in [txt.get_text() for txt in leg.get_texts()]\n\n\ndef _patch_legend(obj, draw_options, legend_type):\n \"\"\" Decorator for handling legend of mpl.Patch \"\"\"\n legend = \"\"\n if _is_in_legend(obj):\n # Unfortunately, patch legend entries need \\addlegendimage in Pgfplots.\n do = \", \".join([legend_type] + draw_options) if draw_options else \"\"\n legend += \"\\\\addlegendimage{{{}}}\\n\\\\addlegendentry{{{}}}\\n\\n\".format(\n do, obj.get_label()\n )\n\n return legend\n\n\ndef zip_modulo(*seqs):\n n = max(len(seq) for seq in seqs)\n for i in range(n):\n yield tuple(seq[i % len(seq)] for seq in seqs)\n\n\ndef draw_patchcollection(data, obj):\n \"\"\"Returns PGFPlots code for a number of patch objects.\n \"\"\"\n content = []\n\n # recompute the face colors\n obj.update_scalarmappable()\n\n def ensure_list(x):\n return [None] if len(x) == 0 else x\n\n ecs = ensure_list(obj.get_edgecolor())\n fcs = ensure_list(obj.get_facecolor())\n lss = ensure_list(obj.get_linestyle())\n ws = ensure_list(obj.get_linewidth())\n ts = ensure_list(obj.get_transforms())\n offs = obj.get_offsets()\n\n paths = obj.get_paths()\n for path, ec, fc, ls, w, t, off in zip_modulo(paths, ecs, fcs, lss, ws, ts, offs):\n if t is None:\n t = mpl.transforms.IdentityTransform()\n\n path = path.transformed(mpl.transforms.Affine2D(t).translate(*off))\n data, draw_options = mypath.get_draw_options(data, obj, ec, fc, ls, w)\n data, cont, draw_options, is_area = mypath.draw_path(\n data, path, draw_options=draw_options\n )\n content.append(cont)\n\n legend_type = \"area legend\" if is_area else \"line legend\"\n legend = _patch_legend(obj, draw_options, legend_type) or \"\\n\"\n content.append(legend)\n\n return data, content\n\n\ndef _draw_polygon(data, obj, draw_options):\n data, content, _, is_area = mypath.draw_path(\n data, obj.get_path(), draw_options=draw_options\n )\n legend_type = \"area legend\" if is_area else \"line legend\"\n content += _patch_legend(obj, draw_options, legend_type)\n\n return data, content\n\n\ndef _draw_rectangle(data, obj, draw_options):\n \"\"\"Return the PGFPlots code for rectangles.\n \"\"\"\n # Objects with labels are plot objects (from bar charts, etc). Even those without\n # labels explicitly set have a label of \"_nolegend_\". Everything else should be\n # skipped because they likely correspong to axis/legend objects which are handled by\n # PGFPlots\n label = obj.get_label()\n if label == \"\":\n return data, []\n\n # Get actual label, bar charts by default only give rectangles labels of\n # \"_nolegend_\". See <https://stackoverflow.com/q/35881290/353337>.\n handles, labels = obj.axes.get_legend_handles_labels()\n labelsFound = [\n label for h, label in zip(handles, labels) if obj in h.get_children()\n ]\n if len(labelsFound) == 1:\n label = labelsFound[0]\n\n left_lower_x = obj.get_x()\n left_lower_y = obj.get_y()\n ff = data[\"float format\"]\n do = \",\".join(draw_options)\n right_upper_x = left_lower_x + obj.get_width()\n right_upper_y = left_lower_y + obj.get_height()\n cont = (\n f\"\\\\draw[{do}] (axis cs:{left_lower_x:{ff}},{left_lower_y:{ff}}) \"\n f\"rectangle (axis cs:{right_upper_x:{ff}},{right_upper_y:{ff}});\\n\"\n )\n\n if label != \"_nolegend_\" and label not in data[\"rectangle_legends\"]:\n data[\"rectangle_legends\"].add(label)\n cont += \"\\\\addlegendimage{{ybar,ybar legend,{}}};\\n\".format(\n \",\".join(draw_options)\n )\n cont += f\"\\\\addlegendentry{{{label}}}\\n\\n\"\n return data, cont\n\n\ndef _draw_ellipse(data, obj, draw_options):\n \"\"\"Return the PGFPlots code for ellipses.\n \"\"\"\n if isinstance(obj, mpl.patches.Circle):\n # circle specialization\n return _draw_circle(data, obj, draw_options)\n x, y = obj.center\n ff = data[\"float format\"]\n\n if obj.angle != 0:\n draw_options.append(\n f\"rotate around={{{obj.angle:{ff}}:(axis cs:{x:{ff}},{y:{ff}})}}\"\n )\n\n do = \",\".join(draw_options)\n content = (\n f\"\\\\draw[{do}] (axis cs:{x:{ff}},{y:{ff}}) ellipse \"\n f\"({0.5 * obj.width:{ff}} and {0.5 * obj.height:{ff}});\\n\"\n )\n content += _patch_legend(obj, draw_options, \"area legend\")\n\n return data, content\n\n\ndef _draw_circle(data, obj, draw_options):\n \"\"\"Return the PGFPlots code for circles.\n \"\"\"\n x, y = obj.center\n ff = data[\"float format\"]\n do = \",\".join(draw_options)\n content = (\n f\"\\\\draw[{do}] (axis cs:{x:{ff}},{y:{ff}}) circle ({obj.get_radius():{ff}});\\n\"\n )\n content += _patch_legend(obj, draw_options, \"area legend\")\n return data, content\n\n\ndef _draw_fancy_arrow(data, obj, draw_options):\n style = _get_arrow_style(obj, data)\n ff = data[\"float format\"]\n if obj._posA_posB is not None:\n posA, posB = obj._posA_posB\n do = \",\".join(style)\n content = (\n f\"\\\\draw[{do}] (axis cs:{posA[0]:{ff}},{posA[1]:{ff}}) -- \"\n f\"(axis cs:{posB[0]:{ff}},{posB[1]:{ff}});\\n\"\n )\n else:\n data, content, _, _ = mypath.draw_path(\n data, obj._path_original, draw_options=draw_options + style\n )\n content += _patch_legend(obj, draw_options, \"line legend\")\n return data, content\n" ]
[ [ "matplotlib.transforms.Affine2D", "matplotlib.transforms.IdentityTransform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
terhardt/DO-progression
[ "7ac2cdd5fb5ea48a66edb4fffd44285d607b1027" ]
[ "process_ramp_samplers.py" ]
[ "import numpy as np\nimport joblib as jl\nimport xarray as xr\nimport pandas as pd\nimport sys\nfrom os.path import exists\nfrom itertools import product\n\n\ndef get_traces(sampler, nthin):\n \"\"\"Extract traces from emcee.EnsebleSampler and apply\n invers transformation of parameters\n \"\"\"\n # load every nthin'th sample from the walkers and reshape to\n # final dimensions\n traces = sampler.chain[:, ::nthin, :].reshape(-1, sampler.dim).copy()\n # convert from sample space to meaningfull space\n traces[:, [1, 4, 5]] = np.exp(traces[:, [1, 4, 5]])\n return traces\n\n\nif __name__ == '__main__':\n try:\n CORE = sys.argv[1]\n except IndexError:\n print('Please give core name as argument')\n sys.exit(1)\n\n # Load event table\n events = pd.read_table('data/GIS_table.txt', usecols=(0, ),\n squeeze=True, comment='#').values\n # initiate the output data array\n events = np.array(events)\n params = np.array(('Ca', 'Na', 'lt', 'd18O'))\n model = np.array(('t0', 'dt', 'y0', 'dy', 'tau', 'sigma'))\n output_shape = (6000, len(model), len(params), len(events))\n da = xr.DataArray(np.full(output_shape, np.nan),\n dims=('sample', 'model', 'param', 'event'),\n coords={'model': model,\n 'param': params,\n 'event': events})\n\n sf_str = 'ramp_fits/sampler/{:s}_{:s}_{:s}.gz'\n # Load all samplers, extract traces and put into the DataArray\n for p, e in product(params, events):\n f = sf_str.format(CORE, e, p)\n if not exists(f):\n continue\n print('loading %s' % f)\n _, sampler = jl.load(f)\n if sampler.acceptance_fraction.mean() <= 0.3:\n print('\\t skipping %s' % f)\n print('\\t Acceptance fraction: %f' % (\n sampler.acceptance_fraction.mean()))\n continue\n else:\n traces = get_traces(sampler, nthin=600)\n da.sel(param=p, event=e)[:, :] = traces\n # Save data array to disk for later use\n trace_file = 'ramp_fits/traces/{:s}.gz'.format(CORE)\n print('saving traces to %s' % trace_file)\n jl.dump(da, trace_file, compress=3)\n" ]
[ [ "pandas.read_table", "numpy.array", "numpy.exp", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
wang93/Decreasing-Momentum-BN
[ "6a8f01f69732085d1c54d7653cff260a5dd0a5a1" ]
[ "special_batchnorm/batchnorm0.py" ]
[ "import torch\nfrom torch.nn.modules.batchnorm import _BatchNorm as origin_BN\n\n'''reimplement BN in module but not function'''\n\n\nclass _BatchNorm(origin_BN):\n @staticmethod\n def expand(stat, target_size):\n if len(target_size) == 4:\n stat = stat.unsqueeze(1).unsqueeze(2).expand(target_size[1:])\n # stat = stat.unsqueeze(1).unsqueeze(2).unsqueeze(0).expand(target_size[0], -1, target_size[2], target_size[3])\n # stat = stat.unsqueeze(1).unsqueeze(2).unsqueeze(0).repeat(target_size[0], 1, target_size[2],target_size[3])\n elif len(target_size) == 2:\n pass\n # stat = stat.unsqueeze(0).expand(target_size[0], -1)\n # stat = stat.unsqueeze(0).repeat(target_size[0], 1)\n else:\n raise NotImplementedError\n\n return stat\n\n def _check_input_dim(self, input):\n raise NotImplementedError\n\n def forward(self, input: torch.Tensor):\n self._check_input_dim(input)\n\n sz = input.size()\n if input.dim() == 4:\n new_size = [1, sz[1], 1, 1]\n elif input.dim() == 2:\n new_size = [1, sz[1]]\n else:\n raise NotImplementedError\n if self.training:\n exponential_average_factor = 0.0\n if self.track_running_stats:\n if self.num_batches_tracked is not None:\n self.num_batches_tracked += 1\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n\n if input.dim() == 4:\n reduced_dim = (0, 2, 3)\n elif input.dim() == 2:\n reduced_dim = (0, )\n else:\n raise NotImplementedError\n\n data = input.detach()\n # di_mean = torch.mean(data, dim=reduced_dim, keepdim=False)\n # di_var = torch.var(data, dim=reduced_dim, keepdim=False, unbiased=False)\n # di_var = torch.mean(data.square(), dim=reduced_dim, keepdim=False) - di_mean.square()\n di_var, di_mean = torch.var_mean(data, dim=reduced_dim, keepdim=False, unbiased=False)\n\n if self.track_running_stats:\n # self.running_mean = (1. - exponential_average_factor) * self.running_mean + (exponential_average_factor * di_mean)\n # self.running_var = (1. - exponential_average_factor) * self.running_var + (exponential_average_factor * di_var)\n self.running_mean.mul_(1. - exponential_average_factor).add_(di_mean, alpha=exponential_average_factor)\n self.running_var.mul_(1. - exponential_average_factor).add_(di_var, alpha=exponential_average_factor)\n else:\n self.running_mean = di_mean\n self.running_var = di_var\n\n # y = (input - self.expand(di_mean, sz)) \\\n # / self.expand(torch.sqrt(di_var + self.eps), sz)\n\n # y = (input - di_mean.view(new_size)) \\\n # / torch.full_like(di_var, 1e-2).max(di_var.sqrt()).view(new_size)\n\n y = (input - di_mean.view(new_size)) \\\n / torch.full_like(di_var, self.eps).add(di_var).sqrt().view(new_size)\n\n else:\n # y = (input - self.expand(self.running_mean, sz)) \\\n # / self.expand(torch.sqrt(self.running_var + self.eps), sz)\n\n # y = (input - self.running_mean.view(new_size)) \\\n # / torch.full_like(self.running_var, 1e-2).max(self.running_var.sqrt()).view(new_size)\n y = (input - self.running_mean.view(new_size)) \\\n / (torch.full_like(self.running_var, self.eps).add(self.running_var).sqrt().view(new_size))\n\n if self.affine:\n z = y * self.weight.view(new_size) + self.bias.view(new_size)\n else:\n z = y\n\n return z\n\n\nclass BatchNorm1d(_BatchNorm):\n def _check_input_dim(self, input):\n if input.dim() != 2 and input.dim() != 3:\n raise ValueError('expected 2D or 3D input (got {}D input)'\n .format(input.dim()))\n\n\nclass BatchNorm2d(_BatchNorm):\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(input.dim()))\n\n\ndef convert_model(module):\n if isinstance(module, torch.nn.DataParallel):\n mod = module.module\n mod = convert_model(mod)\n mod = torch.nn.DataParallel(mod, device_ids=module.device_ids)\n return mod\n\n mod = module\n for pth_module, id_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,\n torch.nn.modules.batchnorm.BatchNorm2d],\n [BatchNorm1d,\n BatchNorm2d]):\n if isinstance(module, pth_module):\n mod = id_module(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats)\n mod.running_mean = module.running_mean\n mod.running_var = module.running_var\n if module.affine:\n mod.weight.data = module.weight.data\n mod.bias.data = module.bias.data\n\n for name, child in module.named_children():\n mod.add_module(name, convert_model(child))\n\n return mod" ]
[ [ "torch.full_like", "torch.nn.DataParallel", "torch.var_mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SConsul/FLITE
[ "7e3f462e66845a5c05e909d6a21dc1862a58579b" ]
[ "scripts/compute_avg_image.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport os\nimport glob\nimport argparse\nimport numpy as np\nfrom PIL import Image\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_path\", help=\"Path to ORBIT benchmark dataset root\")\n args = parser.parse_args()\n\n train_video_dirs = glob.glob( os.path.join(args.data_path, \"train/*/*/*/*\" ))\n\n avgs = []\n for video_dir in train_video_dirs:\n print ('processing ' + video_dir)\n frames = glob.glob(os.path.join(video_dir, \"*.jpg\"))\n for f in frames:\n pil_image = Image.open(f)\n if pil_image.mode != 'RGB':\n pil_image = pil_image.convert('RGB')\n arr_image = np.array(pil_image,dtype=np.float)\n mean_image = arr_image.reshape((-1, 3)).mean(axis=0)\n avgs.append(mean_image)\n\n arr_avgs = np.array(avgs)\n avg = np.mean( arr_avgs, axis=0) / 255.\n std = np.std( arr_avgs, axis=0) / 255.\n print('pixel stats for train frames in {:}: {:} (avg), {:} (std)'.format(args.data_path, avg, std))\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.std", "numpy.array", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rasmus-rudling/degree-thesis
[ "d74581491ec9618149c582059e290dca9957951d" ]
[ "ros_ws/src/crazyswarm/scripts/perceived-safety-study/trajectoryStuff/followTrajectory.py" ]
[ "#!/usr/bin/env python\nimport csv\nimport sys\nimport matplotlib.pyplot as plt\nfrom tracemalloc import start\nimport time\nfrom TrajectoryUtils import TrajectoryUtils\nfrom TrajectoryUtils import euclidianDistance\nsys.path.append(\"../..\")\n\nfrom planner.cbFunctions import heuristicFunction\n\nfrom pycrazyswarm import Crazyswarm\nimport numpy as np\n\nMOCAP_ROOM_HEIGHT = 2.57\nMOCAP_ROOM_LENGTH = 4.5\nMOCAP_ROOM_WIDTH = 3.55\n\nHUMAN_RADIUS = 0.5\nDRONE_RADIUS = 0.2\nMIN_ALLOWED_DISTANCE_TO_HUMAN = 0.45\n\n\ndef getMaxVelocity(dronePos):\n x, y, _ = dronePos\n distanceToHuman = euclidianDistance(x, y, 0, 0)\n maxAllowedVelocity = heuristicFunction(distanceToHuman)\n\n return maxAllowedVelocity\n\n\ndef getSleepTime(velocity, tentacleLength):\n return tentacleLength / velocity\n\n\ndef executeCustomTrajectory(timeHelper,\n drone,\n rate=100,\n trajectoryLogger=None,\n trajectoryToFollow=None,\n droneHeight=2):\n num_timestamps = len(trajectoryToFollow.timestamps)\n\n for event_idx in range(num_timestamps):\n currentPosition = trajectoryToFollow.positions[event_idx]\n currentPosition[2] = droneHeight\n\n currentVelocity = trajectoryToFollow.velocities[event_idx]\n currentAcceleration = trajectoryToFollow.accelerations[event_idx]\n currentYaw = trajectoryToFollow.yaws[event_idx]\n currentOmega = trajectoryToFollow.omegas[event_idx]\n\n currentTimestamp = trajectoryToFollow.timestamps[event_idx]\n\n drone.cmdFullState(pos=currentPosition,\n vel=currentVelocity,\n acc=currentAcceleration,\n yaw=currentYaw % (2 * np.pi),\n omega=currentOmega)\n\n # timeHelper.sleepForRate(rate)\n v = getMaxVelocity(currentPosition)\n sleepTime = getSleepTime(v, trajectoryToFollow.tentacleLength)\n\n timeHelper.sleep(sleepTime)\n trajectoryLogger.appendDroneEvent(currentTimestamp, drone)\n\n\nif __name__ == \"__main__\":\n trajectoryToFollow = TrajectoryUtils(\"csvs/PathPlanningTrajectory.csv\")\n\n startX = trajectoryToFollow.positions[0][0]\n startY = trajectoryToFollow.positions[0][1]\n startYaw = trajectoryToFollow.yaws[0]\n\n obstacleRadius = obstacleToPlot.radius - DRONE_RADIUS\n\n crazyflies_yaml = str({\n 'crazyflies': [{\n 'channel': 100,\n 'id': 7,\n 'initialPosition': [startX, startY, startYaw],\n 'type': 'default'\n }]\n })\n swarm = Crazyswarm(crazyflies_yaml=crazyflies_yaml,\n obstacleRadius=obstacleRadius)\n timeHelper = swarm.timeHelper\n drone = swarm.allcfs.crazyflies[0]\n\n plt.gca().view_init(elev=-90, azim=-90)\n\n droneHeight = 2\n\n drone.takeoff(targetHeight=droneHeight, duration=2)\n timeHelper.sleep(2)\n\n rate = 15 # In Hz\n trajectoryLogger = TrajectoryUtils()\n\n executeCustomTrajectory(timeHelper, drone, rate, trajectoryLogger,\n trajectoryToFollow, droneHeight)\n\n trajectoryLogger.saveTrajectoryToCsv('csvs/loggedLatticeTrajectory.csv')\n # trajectoryLogger.compareWithOtherTrajectory(trajectoryToFollow)\n\n print(\"Follower done!\")\n\n drone.notifySetpointsStop()\n drone.land(targetHeight=0.03, duration=0.5)\n timeHelper.sleep(0.5)\n plt.close()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
thisIsMikeKane/building-controls-simulator
[ "c60ecb706bd3c008ada1d6d08c7f869b36d55ff8" ]
[ "src/python/BuildingControlsSimulator/StateEstimatorModels/LowPassFilter.py" ]
[ "# created by Tom Stesco [email protected]\n\nimport attr\nimport pandas as pd\nimport numpy as np\n\nfrom BuildingControlsSimulator.StateEstimatorModels.StateEstimatorModel import (\n StateEstimatorModel,\n)\nfrom BuildingControlsSimulator.DataClients.DataStates import STATES\nfrom BuildingControlsSimulator.Conversions.Conversions import Conversions\n\n\[email protected]\nclass LowPassFilter(StateEstimatorModel):\n \"\"\"LowPassFilter state estimator model\"\"\"\n\n # default is no filtering, use current measurement 100%\n alpha_temperature = attr.ib(default=1.0)\n alpha_humidity = attr.ib(default=1.0)\n step_output = attr.ib(factory=dict)\n step_size_seconds = attr.ib(default=None)\n current_t_idx = attr.ib(default=None)\n\n output = attr.ib(factory=dict)\n\n # for reference on how attr defaults wor for mutable types (e.g. list) see:\n # https://www.attrs.org/en/stable/init.html#defaults\n input_states = attr.ib()\n output_states = attr.ib()\n\n @input_states.default\n def get_input_states(self):\n return [\n STATES.THERMOSTAT_TEMPERATURE,\n STATES.THERMOSTAT_HUMIDITY,\n STATES.THERMOSTAT_MOTION,\n ]\n\n @output_states.default\n def get_output_states(self):\n return [\n STATES.THERMOSTAT_TEMPERATURE_ESTIMATE,\n STATES.THERMOSTAT_HUMIDITY_ESTIMATE,\n STATES.THERMOSTAT_MOTION_ESTIMATE,\n ]\n\n def get_model_name(self):\n _model_name = \"LowPass\"\n _model_name = _model_name.replace(\".\", \"_\")\n return _model_name\n\n def initialize(\n self,\n start_utc,\n t_start,\n t_end,\n t_step,\n data_spec,\n categories_dict,\n ):\n \"\"\"\"\"\"\n self.current_t_idx = 0\n self.step_size_seconds = t_step\n self.allocate_output_memory(\n t_start=t_start,\n t_end=t_end,\n t_step=t_step,\n data_spec=data_spec,\n categories_dict=categories_dict,\n )\n self.init_step_output()\n\n def allocate_output_memory(\n self, t_start, t_end, t_step, data_spec, categories_dict\n ):\n \"\"\"preallocate output memory to speed up simulation\"\"\"\n # reset output\n self.output = {}\n\n self.output = {\n STATES.SIMULATION_TIME: np.arange(\n t_start, t_end + t_step, t_step, dtype=\"int64\"\n )\n }\n n_s = len(self.output[STATES.SIMULATION_TIME])\n\n # add state variables\n for state in self.output_states:\n if data_spec.full.spec[state][\"dtype\"] == \"category\":\n self.output[state] = pd.Series(\n pd.Categorical(\n pd.Series(index=np.arange(n_s)),\n categories=categories_dict[state],\n )\n )\n else:\n (\n np_default_value,\n np_dtype,\n ) = Conversions.numpy_down_cast_default_value_dtype(\n data_spec.full.spec[state][\"dtype\"]\n )\n self.output[state] = np.full(\n n_s,\n np_default_value,\n dtype=np_dtype,\n )\n\n self.output[STATES.STEP_STATUS] = np.full(n_s, 0, dtype=\"int8\")\n\n def tear_down(self):\n \"\"\"tear down FMU\"\"\"\n pass\n\n def init_step_output(self):\n # initialize all off\n self.step_output = {state: None for state in self.output_states}\n\n def calc_t_control(self, step_sensor_input):\n t_ctrl = step_sensor_input[STATES.THERMOSTAT_TEMPERATURE]\n return t_ctrl\n\n @staticmethod\n def filter(state, prev_state_estimate, alpha):\n if prev_state_estimate:\n # y[i] := y[i-1] + α * (x[i] - y[i-1])\n state_estimate = prev_state_estimate + alpha * (state - prev_state_estimate)\n else:\n # cold start\n state_estimate = state\n return state_estimate\n\n def do_step(\n self,\n t_start,\n t_step,\n step_sensor_input,\n ):\n \"\"\"Simulate controller time step.\"\"\"\n self.step_output[STATES.STEP_STATUS] = 1\n\n self.step_output[STATES.THERMOSTAT_TEMPERATURE_ESTIMATE] = LowPassFilter.filter(\n state=step_sensor_input[STATES.THERMOSTAT_TEMPERATURE],\n prev_state_estimate=self.step_output[\n STATES.THERMOSTAT_TEMPERATURE_ESTIMATE\n ],\n alpha=self.alpha_temperature,\n )\n\n self.step_output[STATES.THERMOSTAT_HUMIDITY_ESTIMATE] = LowPassFilter.filter(\n state=step_sensor_input[STATES.THERMOSTAT_HUMIDITY],\n prev_state_estimate=self.step_output[STATES.THERMOSTAT_HUMIDITY_ESTIMATE],\n alpha=self.alpha_temperature,\n )\n\n # non filtered states\n self.step_output[STATES.THERMOSTAT_MOTION_ESTIMATE] = step_sensor_input[\n STATES.THERMOSTAT_MOTION\n ]\n\n self.step_output[STATES.STEP_STATUS] = 0\n self.add_step_to_output(self.step_output)\n self.current_t_idx += 1\n\n return self.step_output[STATES.STEP_STATUS]\n\n def add_step_to_output(self, step_output):\n for k, v in step_output.items():\n self.output[k][self.current_t_idx] = v\n\n def change_settings(self, new_settings):\n # this model has no settings\n pass\n" ]
[ [ "numpy.arange", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
visualCalculus/neural-style-transfer
[ "96f98a642dc9bf7b1ae59729b3712ff467afa38d" ]
[ "nst/losses.py" ]
[ "import torch \nfrom torch import nn \nimport torch.nn.functional as F \n\nclass ContentLoss(nn.Module):\n \"\"\"\n Content Loss for the neural style transfer algorithm.\n \"\"\"\n def __init__(self, target: torch.Tensor, device: torch.device) -> None:\n super(ContentLoss, self).__init__()\n batch_size, channels, height, width = target.size()\n target = target.view(batch_size * channels, height * width)\n self.target = target.detach().to(device)\n\n def __str__(self) -> str:\n return \"Content loss\"\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n batch_size, channels, height, width = input.size()\n input = input.view(batch_size * channels, height * width)\n return F.mse_loss(input, self.target)\n\n\nclass StyleLoss(nn.Module):\n \"\"\"\n Style loss for the neural style transfer algorithm.\n \"\"\"\n def __init__(self, target: torch.Tensor, device: torch.device) -> None:\n super(StyleLoss, self).__init__()\n self.target = self.compute_gram_matrix(target).detach().to(device)\n\n def __str__(self) -> str:\n return \"Style loss\"\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n input = self.compute_gram_matrix(input)\n return F.mse_loss(input, self.target)\n\n def compute_gram_matrix(self, input: torch.Tensor) -> torch.Tensor:\n batch_size, channels, height, width = input.size()\n input = input.view(batch_size * channels, height * width)\n return torch.matmul(input, input.T).div(batch_size * channels * height * width)" ]
[ [ "torch.nn.functional.mse_loss", "torch.matmul" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jsenellart/OpenNMT-tf
[ "75f84906c4a5a8a40ed4eaec77bc5f5c1c8c4bff" ]
[ "opennmt/training.py" ]
[ "\"\"\"Training related classes and functions.\"\"\"\n\nimport abc\nimport contextlib\nimport time\n\nimport tensorflow as tf\n\nfrom opennmt.optimizers import utils as optimizer_util\nfrom opennmt.utils import misc\n\n\nclass Trainer(abc.ABC):\n \"\"\"Base class for model trainer.\"\"\"\n\n def __init__(self, checkpoint, is_master=True):\n \"\"\"Initializes the trainer.\n\n Args:\n checkpoint: A :class:`opennmt.utils.Checkpoint` instance.\n is_master: Whether this trainer instance is the master trainer.\n \"\"\"\n self._checkpoint = checkpoint\n self._is_master = is_master\n self._model = checkpoint.model\n self._summary_writer = tf.summary.create_file_writer(checkpoint.model_dir)\n\n optimizer = checkpoint.optimizer\n if optimizer is None:\n raise ValueError(\"No optimizer is defined\")\n graph_optimizer_options = tf.config.optimizer.get_experimental_options()\n mixed_precision_enabled = graph_optimizer_options.get(\"auto_mixed_precision\")\n if (mixed_precision_enabled\n and not isinstance(optimizer, tf.keras.mixed_precision.experimental.LossScaleOptimizer)):\n optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(optimizer, \"dynamic\")\n self._optimizer = optimizer\n\n def __call__(self,\n dataset,\n max_step=None,\n accum_steps=1,\n report_steps=100,\n save_steps=5000,\n evaluator=None,\n eval_steps=5000,\n moving_average_decay=None):\n \"\"\"Runs the training.\n\n Args:\n dataset: A ``tf.data.Dataset`` or a function taking a ``tf.distribute.InputContext``\n instance and returning a ``tf.data.Dataset``.\n max_step: The final training step.\n accum_steps: The number of gradient accumulation steps.\n report_steps: Report status every this many steps.\n save_steps: Save a checkpoint every this many steps.\n evaluator: A :class:`opennmt.evaluation.Evaluator` instance to call for\n evaluation.\n eval_steps: Evaluate every this many steps.\n moving_average_decay: If set, maintain an exponential moving average of the model\n variables using this decay value (usually close to 1, e.g. 0.9999). See\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.\n \"\"\"\n if max_step is not None and self._optimizer.iterations.numpy() >= max_step:\n tf.get_logger().warning(\"Model already reached max_step = %d. Exiting.\", max_step)\n return\n if evaluator is not None and evaluator.should_stop():\n tf.get_logger().warning(\"Early stopping conditions are already met. Exiting.\")\n return\n\n with self._summary_writer.as_default():\n iterations = self._optimizer.iterations\n tf.summary.experimental.set_step(iterations)\n\n moving_average = None\n last_report_step = iterations.numpy()\n last_report_time = time.time()\n for loss in self._steps(dataset, accum_steps=accum_steps, report_steps=report_steps):\n if tf.math.is_nan(loss):\n raise RuntimeError(\"Model diverged with loss = NaN.\")\n\n if moving_average_decay is not None and self._is_master:\n if moving_average is None:\n moving_average = MovingAverage(\n self._model.trainable_variables,\n iterations,\n decay=moving_average_decay)\n else:\n moving_average.update()\n\n step = iterations.numpy()\n if step % report_steps == 0:\n _report_training_status(\n step,\n loss,\n self._optimizer.learning_rate,\n self._get_words_counters(),\n last_report_step,\n last_report_time)\n last_report_step = step\n last_report_time = time.time()\n if step == 1 or (save_steps is not None and step % save_steps == 0):\n self._save_checkpoint(step, moving_average=moving_average)\n if eval_steps is not None and step % eval_steps == 0:\n early_stop = self._evaluate(evaluator, step, moving_average=moving_average)\n if early_stop:\n tf.get_logger().warning(\"Early stopping conditions are met. Exiting.\")\n break\n if step == max_step:\n break\n\n self._save_checkpoint(step, moving_average=moving_average)\n self._evaluate(evaluator, step, moving_average=moving_average)\n\n @abc.abstractmethod\n def _steps(self, dataset, accum_steps=1, report_steps=None):\n \"\"\"Returns a generator over training steps (i.e. parameters update).\n\n Args:\n dataset: The training dataset.\n accum_steps: Accumulate the gradients of this many steps/batches.\n report_steps: Report summary statistics every this many steps. This should\n typically be used in a ``tf.summary.record_if`` context.\n\n Returns:\n A generator that yields a loss value to report for this step.\n \"\"\"\n raise NotImplementedError()\n\n def _get_words_counters(self):\n \"\"\"Returns the accumulated words counters and resets them.\n\n This is used to report the words per second in the training logs.\n\n Returns:\n A dictionary mapping a counter name to a Python value.\n \"\"\"\n return {}\n\n def _run_model(self, source, target):\n \"\"\"Computes the loss of the given source and target pair.\n\n Args:\n source: A nested structure of tensors.\n target: A nested structure of tensors.\n\n Returns:\n A tuple containing,\n\n - The loss to compute the gradients.\n - The loss to report.\n \"\"\"\n first_call = not self._model.built\n outputs, _ = self._model(\n source,\n labels=target,\n training=True,\n step=self._optimizer.iterations)\n loss = self._model.compute_loss(outputs, target, training=True)\n if isinstance(loss, tuple):\n training_loss = loss[0] / loss[1]\n reported_loss = loss[0] / loss[2] if len(loss) > 2 else training_loss\n else:\n training_loss, reported_loss = loss, loss\n training_loss = self._model.regularize_loss(\n training_loss, variables=self._model.trainable_variables)\n if first_call and self._is_master:\n self._model.visualize(self._checkpoint.model_dir)\n tf.get_logger().info(\"Number of model parameters: %d\", self._model.count_params())\n tf.get_logger().info(\n \"Number of model weights: %d (trainable = %d, non trainable = %d)\",\n len(self._model.weights),\n len(self._model.trainable_weights),\n len(self._model.non_trainable_weights))\n return training_loss, reported_loss\n\n def _save_checkpoint(self, step, moving_average=None):\n \"\"\"Saves a checkpoint for step.\"\"\"\n if not self._is_master or step == self._checkpoint.last_saved_step:\n return\n with moving_average.shadow_variables() if moving_average is not None else contextlib.suppress():\n self._checkpoint.save(step)\n\n def _evaluate(self, evaluator, step, moving_average=None):\n \"\"\"Runs evaluation for step. Returns ``True`` is early conditions are met.\"\"\"\n if not self._is_master or evaluator is None or step == evaluator.last_evaluated_step:\n return False\n with moving_average.shadow_variables() if moving_average is not None else contextlib.suppress():\n evaluator(step)\n return evaluator.should_stop()\n\n\nclass BasicTrainer(Trainer):\n \"\"\"Basic single GPU trainer.\"\"\"\n\n def _steps(self, dataset, accum_steps=1, report_steps=None):\n if accum_steps != 1:\n raise ValueError(\"BasicTrainer does not support gradient accumulation\")\n if callable(dataset):\n dataset = dataset(tf.distribute.InputContext())\n\n @tf.function(input_signature=dataset.element_spec)\n def _step(source, target):\n training_loss, reported_loss = self._run_model(source, target)\n variables = self._model.trainable_variables\n gradients = self._optimizer.get_gradients(training_loss, variables)\n self._optimizer.apply_gradients(list(zip(gradients, variables)))\n return reported_loss\n\n for source, target in dataset:\n yield _step(source, target)\n\n\nclass DistributionStrategyTrainer(Trainer):\n \"\"\"Trainer based on distribution strategies.\"\"\"\n\n def __init__(self, checkpoint, devices=None):\n \"\"\"Initializes the trainer.\n\n Args:\n checkpoint: A :class:`opennmt.utils.checkpoint.Checkpoint` instance.\n devices: List of device strings to use for training.\n \"\"\"\n super(DistributionStrategyTrainer, self).__init__(checkpoint)\n if not devices:\n devices = misc.get_devices(count=1) # Train with 1 device by default.\n self._strategy = tf.distribute.MirroredStrategy(devices=devices)\n self._words_counters = {}\n with self._strategy.scope():\n # Create some variables under the strategy scope.\n _ = self._optimizer.iterations\n self._gradient_accumulator = optimizer_util.GradientAccumulator()\n\n def _get_words_counters(self):\n return {name:value.numpy() for name, value in self._synchronize_words_counters().items()}\n\n def _steps(self, dataset, accum_steps=1, report_steps=None):\n self._gradient_accumulator.reset()\n self._words_counters.clear()\n for i, loss in enumerate(self._accumulate_next_gradients(dataset, report_steps=report_steps)):\n if tf.math.is_nan(loss):\n raise RuntimeError(\"Model diverged with loss = NaN.\")\n if i == 0 or (i + 1) % accum_steps == 0:\n self._apply_gradients()\n yield loss\n\n def _accumulate_next_gradients(self, dataset, report_steps=None):\n \"\"\"Accumulates the gradients from the next element in :obj:`dataset`.\"\"\"\n\n # We prefer not to use experimental_distribute_dataset here because it\n # sometimes fails to split the batches (noticed with tokens batch type).\n # We also assume for now that we are training with a single worker\n # otherwise we would need to correctly shard the input dataset.\n dataset_fn = dataset if callable(dataset) else lambda _: dataset\n distributed_dataset = self._strategy.experimental_distribute_datasets_from_function(\n dataset_fn)\n\n # Get the next element within the tf.function for more pipelining.\n # See: https://github.com/tensorflow/tensorflow/issues/29075#issuecomment-513390242\n iterator = iter(distributed_dataset)\n\n @tf.function\n def _accumulate_next():\n if report_steps is None:\n should_record_summaries = False\n else:\n should_record_summaries = tf.logical_and(\n tf.equal(self._optimizer.iterations % report_steps, 0),\n tf.equal(self._gradient_accumulator.step, 0))\n with tf.summary.record_if(should_record_summaries):\n per_replica_source, per_replica_target = next(iterator)\n return self._accumulate_gradients(per_replica_source, per_replica_target)\n\n while True:\n try:\n yield _accumulate_next()\n except tf.errors.OutOfRangeError:\n break\n\n def _accumulate_gradients(self, per_replica_source, per_replica_target):\n \"\"\"Accumulates the gradients (cross-replica).\"\"\"\n per_replica_loss = self._strategy.experimental_run_v2(\n self._accumulate_gradients_on_replica,\n args=(per_replica_source, per_replica_target))\n # TODO: this reduction could be delayed until _step is called.\n return self._strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)\n\n def _accumulate_gradients_on_replica(self, source, target):\n \"\"\"Accumulates the gradients (in replica).\"\"\"\n training_loss, reported_loss = self._run_model(source, target)\n variables = self._model.trainable_variables\n gradients = self._optimizer.get_gradients(training_loss, variables)\n self._gradient_accumulator(gradients)\n tf.summary.scalar(\"gradients/global_norm\", tf.linalg.global_norm(gradients))\n self._update_words_counter(\"source\", source)\n self._update_words_counter(\"target\", target)\n return reported_loss\n\n def _update_words_counter(self, name, features):\n \"\"\"Accumulates number of source and target tokens to report throughput.\"\"\"\n length = features.get(\"length\")\n if length is None:\n return\n num_words = tf.reduce_sum(length)\n counter = self._words_counters.get(name)\n if counter is None:\n counter = tf.Variable(\n tf.constant(0, dtype=tf.int64),\n trainable=False,\n synchronization=tf.VariableSynchronization.ON_READ,\n aggregation=tf.VariableAggregation.SUM)\n self._words_counters[name] = counter\n counter.assign_add(tf.cast(num_words, tf.int64), read_value=False)\n\n @tf.function\n def _synchronize_words_counters(self):\n \"\"\"Synchronizes and resets words counters values across replicas.\"\"\"\n sync_words_counters = {\n name:counter.read_value() for name, counter in self._words_counters.items()}\n self._strategy.experimental_run_v2(self._reset_words_counters_on_replica)\n return sync_words_counters\n\n def _reset_words_counters_on_replica(self):\n \"\"\"Resets the variables that count words (in replica).\"\"\"\n for counter in self._words_counters.values():\n counter.assign(tf.constant(0, dtype=tf.int64), read_value=False)\n\n @tf.function\n def _apply_gradients(self):\n \"\"\"Applies the gradients (cross-replica).\"\"\"\n self._strategy.experimental_run_v2(self._apply_gradients_on_replica)\n\n def _apply_gradients_on_replica(self):\n \"\"\"Applies the gradients (in replica).\"\"\"\n variables = self._model.trainable_variables\n # optimizer.apply_gradients will sum the gradients accross replicas.\n gradient_scale = self._gradient_accumulator.step * self._strategy.num_replicas_in_sync\n grads_and_vars = [\n (gradient / tf.cast(gradient_scale, gradient.dtype), variable)\n for gradient, variable in zip(self._gradient_accumulator.gradients, variables)]\n self._optimizer.apply_gradients(grads_and_vars)\n self._gradient_accumulator.reset()\n\n\ndef _report_training_status(step,\n loss,\n learning_rate,\n words_counters,\n last_report_step,\n last_report_time):\n elapsed_time = time.time() - last_report_time\n\n steps_per_sec = (step - last_report_step) / elapsed_time\n tf.summary.scalar(\"steps_per_sec\", steps_per_sec, description=\"Training steps per second\")\n steps_per_sec_fmt = \"steps/s = %0.2f\" % steps_per_sec\n\n words_per_sec_fmt = []\n for name, counter in words_counters.items():\n avg = int(counter / elapsed_time)\n tf.summary.scalar(\n \"words_per_sec/%s\" % name,\n avg,\n description=\"%s words per second\" % name.capitalize())\n words_per_sec_fmt.append(\"%s words/s = %d\" % (name, avg))\n\n if isinstance(learning_rate, tf.optimizers.schedules.LearningRateSchedule):\n learning_rate = learning_rate(step)\n elif isinstance(learning_rate, tf.Variable):\n learning_rate = learning_rate.value()\n\n tf.get_logger().info(\n \"Step = %d ; %s ; Learning rate = %f ; Loss = %f\",\n step,\n \", \".join([steps_per_sec_fmt] + list(sorted(words_per_sec_fmt))),\n learning_rate,\n loss)\n tf.summary.scalar(\"loss\", loss, description=\"Training loss\")\n tf.summary.scalar(\"optim/learning_rate\", learning_rate, description=\"Learning rate\")\n\n\nclass MovingAverage(object):\n \"\"\"Object holding an exponential moving average of variables.\"\"\"\n\n def __init__(self, variables, step, decay=0.9999):\n \"\"\"Initializes the moving average object.\n\n Args:\n variables: The list of variable for which to maintain a moving average.\n step: The training step counter as a ``tf.Variable``.\n decay: The decay rate of the exponential moving average. Usually close to\n 1, e.g. 0.9999, see the complete formula on\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.\n\n Raises:\n TypeError: is :obj:`step` is not a ``tf.Variable``.\n \"\"\"\n if not isinstance(step, tf.Variable):\n raise TypeError(\"step should be a tf.Variable\")\n if decay < 0.9 or decay > 1:\n tf.get_logger().warning(\"Moving average decay should be close to 1 (e.g. 0.9999) but you \"\n \"passed %f, is it correct? See https://www.tensorflow.org/api_docs\"\n \"/python/tf/train/ExponentialMovingAverage for details about the \"\n \"formula and recommended decay values.\")\n self._ema = tf.train.ExponentialMovingAverage(decay, num_updates=step)\n self._variables = variables\n self.update()\n\n @tf.function\n def update(self):\n \"\"\"Updates the moving average of the variables.\"\"\"\n self._ema.apply(var_list=list(map(misc.get_primary_variable, self._variables)))\n\n @contextlib.contextmanager\n def shadow_variables(self):\n \"\"\"Returns a context manager that assigns the variables to their moving\n average value on enter and restores the previous value on exit.\n\n Returns:\n A context manager.\n \"\"\"\n # TODO: Do we want to shadow the values on all replicas?\n previous_values = []\n for variable in self._variables:\n previous_values.append(variable.value())\n variable.assign(self._ema.average(misc.get_primary_variable(variable)))\n yield\n for previous_value, variable in zip(previous_values, self._variables):\n variable.assign(previous_value)\n" ]
[ [ "tensorflow.config.optimizer.get_experimental_options", "tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer", "tensorflow.constant", "tensorflow.distribute.InputContext", "tensorflow.reduce_sum", "tensorflow.summary.create_file_writer", "tensorflow.cast", "tensorflow.math.is_nan", "tensorflow.get_logger", "tensorflow.equal", "tensorflow.linalg.global_norm", "tensorflow.train.ExponentialMovingAverage", "tensorflow.function", "tensorflow.summary.experimental.set_step", "tensorflow.summary.record_if", "tensorflow.summary.scalar", "tensorflow.distribute.MirroredStrategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tianhm/zipline
[ "5343344929558ef42dc6ea75d433218471e91a0d", "5343344929558ef42dc6ea75d433218471e91a0d" ]
[ "zipline/pipeline/factors/factor.py", "zipline/lib/labelarray.py" ]
[ "\"\"\"\nfactor.py\n\"\"\"\nfrom functools import wraps\nfrom operator import attrgetter\nfrom numbers import Number\nfrom math import ceil\n\nfrom numpy import empty_like, inf, nan, where\nfrom scipy.stats import rankdata\n\nfrom zipline.errors import BadPercentileBounds, UnknownRankMethod\nfrom zipline.lib.normalize import naive_grouped_rowwise_apply\nfrom zipline.lib.rank import masked_rankdata_2d, rankdata_1d_descending\nfrom zipline.pipeline.api_utils import restrict_to_dtype\nfrom zipline.pipeline.classifiers import Classifier, Everything, Quantiles\nfrom zipline.pipeline.expression import (\n BadBinaryOperator,\n COMPARISONS,\n is_comparison,\n MATH_BINOPS,\n method_name_for_op,\n NumericalExpression,\n NUMEXPR_MATH_FUNCS,\n UNARY_OPS,\n unary_op_name,\n)\nfrom zipline.pipeline.filters import (\n Filter,\n NumExprFilter,\n PercentileFilter,\n NotNullFilter,\n NullFilter,\n)\nfrom zipline.pipeline.mixins import (\n AliasedMixin,\n CustomTermMixin,\n DownsampledMixin,\n LatestMixin,\n PositiveWindowLengthMixin,\n RestrictedDTypeMixin,\n SingleInputMixin,\n)\nfrom zipline.pipeline.sentinels import NotSpecified, NotSpecifiedType\nfrom zipline.pipeline.term import ComputableTerm, Term\nfrom zipline.utils.functional import with_doc, with_name\nfrom zipline.utils.input_validation import expect_types\nfrom zipline.utils.math_utils import nanmean, nanstd\nfrom zipline.utils.memoize import classlazyval\nfrom zipline.utils.numpy_utils import (\n bool_dtype,\n categorical_dtype,\n coerce_to_dtype,\n datetime64ns_dtype,\n float64_dtype,\n int64_dtype,\n)\n\n\n_RANK_METHODS = frozenset(['average', 'min', 'max', 'dense', 'ordinal'])\n\n\ndef coerce_numbers_to_my_dtype(f):\n \"\"\"\n A decorator for methods whose signature is f(self, other) that coerces\n ``other`` to ``self.dtype``.\n\n This is used to make comparison operations between numbers and `Factor`\n instances work independently of whether the user supplies a float or\n integer literal.\n\n For example, if I write::\n\n my_filter = my_factor > 3\n\n my_factor probably has dtype float64, but 3 is an int, so we want to coerce\n to float64 before doing the comparison.\n \"\"\"\n @wraps(f)\n def method(self, other):\n if isinstance(other, Number):\n other = coerce_to_dtype(self.dtype, other)\n return f(self, other)\n return method\n\n\ndef binop_return_type(op):\n if is_comparison(op):\n return NumExprFilter\n else:\n return NumExprFactor\n\n\ndef binop_return_dtype(op, left, right):\n \"\"\"\n Compute the expected return dtype for the given binary operator.\n\n Parameters\n ----------\n op : str\n Operator symbol, (e.g. '+', '-', ...).\n left : numpy.dtype\n Dtype of left hand side.\n right : numpy.dtype\n Dtype of right hand side.\n\n Returns\n -------\n outdtype : numpy.dtype\n The dtype of the result of `left <op> right`.\n \"\"\"\n if is_comparison(op):\n if left != right:\n raise TypeError(\n \"Don't know how to compute {left} {op} {right}.\\n\"\n \"Comparisons are only supported between Factors of equal \"\n \"dtypes.\".format(left=left, op=op, right=right)\n )\n return bool_dtype\n\n elif left != float64_dtype or right != float64_dtype:\n raise TypeError(\n \"Don't know how to compute {left} {op} {right}.\\n\"\n \"Arithmetic operators are only supported between Factors of \"\n \"dtype 'float64'.\".format(\n left=left.name,\n op=op,\n right=right.name,\n )\n )\n return float64_dtype\n\n\ndef binary_operator(op):\n \"\"\"\n Factory function for making binary operator methods on a Factor subclass.\n\n Returns a function, \"binary_operator\" suitable for implementing functions\n like __add__.\n \"\"\"\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n @with_doc(\"Binary Operator: '%s'\" % op)\n @with_name(method_name_for_op(op))\n @coerce_numbers_to_my_dtype\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n dtype=binop_return_dtype(op, self.dtype, other.dtype),\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Term):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n dtype=binop_return_dtype(op, self.dtype, other.dtype),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n dtype=binop_return_dtype(op, self.dtype, other.dtype),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n # .dtype access is safe here because coerce_numbers_to_my_dtype\n # will convert any input numbers to numpy equivalents.\n dtype=binop_return_dtype(op, self.dtype, other.dtype)\n )\n raise BadBinaryOperator(op, self, other)\n\n return binary_operator\n\n\ndef reflected_binary_operator(op):\n \"\"\"\n Factory function for making binary operator methods on a Factor.\n\n Returns a function, \"reflected_binary_operator\" suitable for implementing\n functions like __radd__.\n \"\"\"\n assert not is_comparison(op)\n\n @with_name(method_name_for_op(op, commute=True))\n @coerce_numbers_to_my_dtype\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n dtype=binop_return_dtype(op, other.dtype, self.dtype)\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n dtype=binop_return_dtype(op, other.dtype, self.dtype),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator\n\n\ndef unary_operator(op):\n \"\"\"\n Factory function for making unary operator methods for Factors.\n \"\"\"\n # Only negate is currently supported.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n @with_doc(\"Unary Operator: '%s'\" % op)\n @with_name(unary_op_name(op))\n def unary_operator(self):\n if self.dtype != float64_dtype:\n raise TypeError(\n \"Can't apply unary operator {op!r} to instance of \"\n \"{typename!r} with dtype {dtypename!r}.\\n\"\n \"{op!r} is only supported for Factors of dtype \"\n \"'float64'.\".format(\n op=op,\n typename=type(self).__name__,\n dtypename=self.dtype.name,\n )\n )\n\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n dtype=float64_dtype,\n )\n else:\n return NumExprFactor(\n \"{op}x_0\".format(op=op),\n (self,),\n dtype=float64_dtype,\n )\n return unary_operator\n\n\ndef function_application(func):\n \"\"\"\n Factory function for producing function application methods for Factor\n subclasses.\n \"\"\"\n if func not in NUMEXPR_MATH_FUNCS:\n raise ValueError(\"Unsupported mathematical function '%s'\" % func)\n\n @with_name(func)\n def mathfunc(self):\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{func}({expr})\".format(func=func, expr=self._expr),\n self.inputs,\n dtype=float64_dtype,\n )\n else:\n return NumExprFactor(\n \"{func}(x_0)\".format(func=func),\n (self,),\n dtype=float64_dtype,\n )\n return mathfunc\n\n\n# Decorators for Factor methods.\nif_not_float64_tell_caller_to_use_isnull = restrict_to_dtype(\n dtype=float64_dtype,\n message_template=(\n \"{method_name}() was called on a factor of dtype {received_dtype}.\\n\"\n \"{method_name}() is only defined for dtype {expected_dtype}.\"\n \"To filter missing data, use isnull() or notnull().\"\n )\n)\n\nfloat64_only = restrict_to_dtype(\n dtype=float64_dtype,\n message_template=(\n \"{method_name}() is only defined on Factors of dtype {expected_dtype},\"\n \" but it was called on a Factor of dtype {received_dtype}.\"\n )\n)\n\nFACTOR_DTYPES = frozenset([datetime64ns_dtype, float64_dtype, int64_dtype])\n\n\nclass Factor(RestrictedDTypeMixin, ComputableTerm):\n \"\"\"\n Pipeline API expression producing a numerical or date-valued output.\n\n Factors are the most commonly-used Pipeline term, representing the result\n of any computation producing a numerical result.\n\n Factors can be combined, both with other Factors and with scalar values,\n via any of the builtin mathematical operators (``+``, ``-``, ``*``, etc).\n This makes it easy to write complex expressions that combine multiple\n Factors. For example, constructing a Factor that computes the average of\n two other Factors is simply::\n\n >>> f1 = SomeFactor(...) # doctest: +SKIP\n >>> f2 = SomeOtherFactor(...) # doctest: +SKIP\n >>> average = (f1 + f2) / 2.0 # doctest: +SKIP\n\n Factors can also be converted into :class:`zipline.pipeline.Filter` objects\n via comparison operators: (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``).\n\n There are many natural operators defined on Factors besides the basic\n numerical operators. These include methods identifying missing or\n extreme-valued outputs (isnull, notnull, isnan, notnan), methods for\n normalizing outputs (rank, demean, zscore), and methods for constructing\n Filters based on rank-order properties of results (top, bottom,\n percentile_between).\n \"\"\"\n ALLOWED_DTYPES = FACTOR_DTYPES # Used by RestrictedDTypeMixin\n\n # Dynamically add functions for creating NumExprFactor/NumExprFilter\n # instances.\n clsdict = locals()\n clsdict.update(\n {\n method_name_for_op(op): binary_operator(op)\n # Don't override __eq__ because it breaks comparisons on tuples of\n # Factors.\n for op in MATH_BINOPS.union(COMPARISONS - {'=='})\n }\n )\n clsdict.update(\n {\n method_name_for_op(op, commute=True): reflected_binary_operator(op)\n for op in MATH_BINOPS\n }\n )\n clsdict.update(\n {\n unary_op_name(op): unary_operator(op)\n for op in UNARY_OPS\n }\n )\n\n clsdict.update(\n {\n funcname: function_application(funcname)\n for funcname in NUMEXPR_MATH_FUNCS\n }\n )\n\n __truediv__ = clsdict['__div__']\n __rtruediv__ = clsdict['__rdiv__']\n\n eq = binary_operator('==')\n\n @expect_types(\n mask=(Filter, NotSpecifiedType),\n groupby=(Classifier, NotSpecifiedType),\n )\n @float64_only\n def demean(self, mask=NotSpecified, groupby=NotSpecified):\n \"\"\"\n Construct a Factor that computes ``self`` and subtracts the mean from\n row of the result.\n\n If ``mask`` is supplied, ignore values where ``mask`` returns False\n when computing row means, and output NaN anywhere the mask is False.\n\n If ``groupby`` is supplied, compute by partitioning each row based on\n the values produced by ``groupby``, de-meaning the partitioned arrays,\n and stitching the sub-results back together.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n A Filter defining values to ignore when computing means.\n groupby : zipline.pipeline.Classifier, optional\n A classifier defining partitions over which to compute means.\n\n Examples\n --------\n Let ``f`` be a Factor which would produce the following output::\n\n AAPL MSFT MCD BK\n 2017-03-13 1.0 2.0 3.0 4.0\n 2017-03-14 1.5 2.5 3.5 1.0\n 2017-03-15 2.0 3.0 4.0 1.5\n 2017-03-16 2.5 3.5 1.0 2.0\n\n Let ``c`` be a Classifier producing the following output::\n\n AAPL MSFT MCD BK\n 2017-03-13 1 1 2 2\n 2017-03-14 1 1 2 2\n 2017-03-15 1 1 2 2\n 2017-03-16 1 1 2 2\n\n Let ``m`` be a Filter producing the following output::\n\n AAPL MSFT MCD BK\n 2017-03-13 False True True True\n 2017-03-14 True False True True\n 2017-03-15 True True False True\n 2017-03-16 True True True False\n\n Then ``f.demean()`` will subtract the mean from each row produced by\n ``f``.\n\n ::\n\n AAPL MSFT MCD BK\n 2017-03-13 -1.500 -0.500 0.500 1.500\n 2017-03-14 -0.625 0.375 1.375 -1.125\n 2017-03-15 -0.625 0.375 1.375 -1.125\n 2017-03-16 0.250 1.250 -1.250 -0.250\n\n ``f.demean(mask=m)`` will subtract the mean from each row, but means\n will be calculated ignoring values on the diagonal, and NaNs will\n written to the diagonal in the output. Diagonal values are ignored\n because they are the locations where the mask ``m`` produced False.\n\n ::\n\n AAPL MSFT MCD BK\n 2017-03-13 NaN -1.000 0.000 1.000\n 2017-03-14 -0.500 NaN 1.500 -1.000\n 2017-03-15 -0.166 0.833 NaN -0.666\n 2017-03-16 0.166 1.166 -1.333 NaN\n\n ``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and\n MCD/BK from their respective entries. The AAPL/MSFT are grouped\n together because both assets always produce 1 in the output of the\n classifier ``c``. Similarly, MCD/BK are grouped together because they\n always produce 2.\n\n ::\n\n AAPL MSFT MCD BK\n 2017-03-13 -0.500 0.500 -0.500 0.500\n 2017-03-14 -0.500 0.500 1.250 -1.250\n 2017-03-15 -0.500 0.500 1.250 -1.250\n 2017-03-16 -0.500 0.500 -0.500 0.500\n\n ``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of\n AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on\n the diagonal , and NaNs will be written to the diagonal in the output.\n\n ::\n\n AAPL MSFT MCD BK\n 2017-03-13 NaN 0.000 -0.500 0.500\n 2017-03-14 0.000 NaN 1.250 -1.250\n 2017-03-15 -0.500 0.500 NaN 0.000\n 2017-03-16 -0.500 0.500 0.000 NaN\n\n Notes\n -----\n Mean is sensitive to the magnitudes of outliers. When working with\n factor that can potentially produce large outliers, it is often useful\n to use the ``mask`` parameter to discard values at the extremes of the\n distribution::\n\n >>> base = MyFactor(...) # doctest: +SKIP\n >>> normalized = base.demean(\n ... mask=base.percentile_between(1, 99),\n ... ) # doctest: +SKIP\n\n ``demean()`` is only supported on Factors of dtype float64.\n\n See Also\n --------\n :meth:`pandas.DataFrame.groupby`\n \"\"\"\n return GroupedRowTransform(\n transform=demean,\n transform_args=(),\n factor=self,\n groupby=groupby,\n dtype=self.dtype,\n missing_value=self.missing_value,\n window_safe=self.window_safe,\n mask=mask,\n )\n\n @expect_types(\n mask=(Filter, NotSpecifiedType),\n groupby=(Classifier, NotSpecifiedType),\n )\n @float64_only\n def zscore(self, mask=NotSpecified, groupby=NotSpecified):\n \"\"\"\n Construct a Factor that Z-Scores each day's results.\n\n The Z-Score of a row is defined as::\n\n (row - row.mean()) / row.stddev()\n\n If ``mask`` is supplied, ignore values where ``mask`` returns False\n when computing row means and standard deviations, and output NaN\n anywhere the mask is False.\n\n If ``groupby`` is supplied, compute by partitioning each row based on\n the values produced by ``groupby``, z-scoring the partitioned arrays,\n and stitching the sub-results back together.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n A Filter defining values to ignore when Z-Scoring.\n groupby : zipline.pipeline.Classifier, optional\n A classifier defining partitions over which to compute Z-Scores.\n\n Returns\n -------\n zscored : zipline.pipeline.Factor\n A Factor producing that z-scores the output of self.\n\n Notes\n -----\n Mean and standard deviation are sensitive to the magnitudes of\n outliers. When working with factor that can potentially produce large\n outliers, it is often useful to use the ``mask`` parameter to discard\n values at the extremes of the distribution::\n\n >>> base = MyFactor(...) # doctest: +SKIP\n >>> normalized = base.zscore(\n ... mask=base.percentile_between(1, 99),\n ... ) # doctest: +SKIP\n\n ``zscore()`` is only supported on Factors of dtype float64.\n\n Examples\n --------\n See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth\n example of the semantics for ``mask`` and ``groupby``.\n\n See Also\n --------\n :meth:`pandas.DataFrame.groupby`\n \"\"\"\n return GroupedRowTransform(\n transform=zscore,\n transform_args=(),\n factor=self,\n groupby=groupby,\n dtype=self.dtype,\n missing_value=self.missing_value,\n mask=mask,\n window_safe=True,\n )\n\n def rank(self,\n method='ordinal',\n ascending=True,\n mask=NotSpecified,\n groupby=NotSpecified):\n \"\"\"\n Construct a new Factor representing the sorted rank of each column\n within each row.\n\n Parameters\n ----------\n method : str, {'ordinal', 'min', 'max', 'dense', 'average'}\n The method used to assign ranks to tied elements. See\n `scipy.stats.rankdata` for a full description of the semantics for\n each ranking method. Default is 'ordinal'.\n ascending : bool, optional\n Whether to return sorted rank in ascending or descending order.\n Default is True.\n mask : zipline.pipeline.Filter, optional\n A Filter representing assets to consider when computing ranks.\n If mask is supplied, ranks are computed ignoring any asset/date\n pairs for which `mask` produces a value of False.\n groupby : zipline.pipeline.Classifier, optional\n A classifier defining partitions over which to perform ranking.\n\n Returns\n -------\n ranks : zipline.pipeline.factors.Rank\n A new factor that will compute the ranking of the data produced by\n `self`.\n\n Notes\n -----\n The default value for `method` is different from the default for\n `scipy.stats.rankdata`. See that function's documentation for a full\n description of the valid inputs to `method`.\n\n Missing or non-existent data on a given day will cause an asset to be\n given a rank of NaN for that day.\n\n See Also\n --------\n :func:`scipy.stats.rankdata`\n :class:`zipline.pipeline.factors.factor.Rank`\n \"\"\"\n\n if groupby is NotSpecified:\n return Rank(self, method=method, ascending=ascending, mask=mask)\n\n return GroupedRowTransform(\n transform=rankdata if ascending else rankdata_1d_descending,\n transform_args=(method,),\n factor=self,\n groupby=groupby,\n dtype=float64_dtype,\n missing_value=nan,\n mask=mask,\n window_safe=True,\n )\n\n @expect_types(\n target=Term, correlation_length=int, mask=(Filter, NotSpecifiedType),\n )\n def pearsonr(self, target, correlation_length, mask=NotSpecified):\n \"\"\"\n Construct a new Factor that computes rolling pearson correlation\n coefficients between `target` and the columns of `self`.\n\n This method can only be called on factors which are deemed safe for use\n as inputs to other factors. This includes `Returns` and any factors\n created from `Factor.rank` or `Factor.zscore`.\n\n Parameters\n ----------\n target : zipline.pipeline.Term with a numeric dtype\n The term used to compute correlations against each column of data\n produced by `self`. This may be a Factor, a BoundColumn or a Slice.\n If `target` is two-dimensional, correlations are computed\n asset-wise.\n correlation_length : int\n Length of the lookback window over which to compute each\n correlation coefficient.\n mask : zipline.pipeline.Filter, optional\n A Filter describing which assets should have their correlation with\n the target slice computed each day.\n\n Returns\n -------\n correlations : zipline.pipeline.factors.RollingPearson\n A new Factor that will compute correlations between `target` and\n the columns of `self`.\n\n Examples\n --------\n Suppose we want to create a factor that computes the correlation\n between AAPL's 10-day returns and the 10-day returns of all other\n assets, computing each correlation over 30 days. This can be achieved\n by doing the following::\n\n returns = Returns(window_length=10)\n returns_slice = returns[sid(24)]\n aapl_correlations = returns.pearsonr(\n target=returns_slice, correlation_length=30,\n )\n\n This is equivalent to doing::\n\n aapl_correlations = RollingPearsonOfReturns(\n target=sid(24), returns_length=10, correlation_length=30,\n )\n\n See Also\n --------\n :func:`scipy.stats.pearsonr`\n :class:`zipline.pipeline.factors.RollingPearsonOfReturns`\n :meth:`Factor.spearmanr`\n \"\"\"\n from .statistical import RollingPearson\n return RollingPearson(\n base_factor=self,\n target=target,\n correlation_length=correlation_length,\n mask=mask,\n )\n\n @expect_types(\n target=Term, correlation_length=int, mask=(Filter, NotSpecifiedType),\n )\n def spearmanr(self, target, correlation_length, mask=NotSpecified):\n \"\"\"\n Construct a new Factor that computes rolling spearman rank correlation\n coefficients between `target` and the columns of `self`.\n\n This method can only be called on factors which are deemed safe for use\n as inputs to other factors. This includes `Returns` and any factors\n created from `Factor.rank` or `Factor.zscore`.\n\n Parameters\n ----------\n target : zipline.pipeline.Term with a numeric dtype\n The term used to compute correlations against each column of data\n produced by `self`. This may be a Factor, a BoundColumn or a Slice.\n If `target` is two-dimensional, correlations are computed\n asset-wise.\n correlation_length : int\n Length of the lookback window over which to compute each\n correlation coefficient.\n mask : zipline.pipeline.Filter, optional\n A Filter describing which assets should have their correlation with\n the target slice computed each day.\n\n Returns\n -------\n correlations : zipline.pipeline.factors.RollingSpearman\n A new Factor that will compute correlations between `target` and\n the columns of `self`.\n\n Examples\n --------\n Suppose we want to create a factor that computes the correlation\n between AAPL's 10-day returns and the 10-day returns of all other\n assets, computing each correlation over 30 days. This can be achieved\n by doing the following::\n\n returns = Returns(window_length=10)\n returns_slice = returns[sid(24)]\n aapl_correlations = returns.spearmanr(\n target=returns_slice, correlation_length=30,\n )\n\n This is equivalent to doing::\n\n aapl_correlations = RollingSpearmanOfReturns(\n target=sid(24), returns_length=10, correlation_length=30,\n )\n\n See Also\n --------\n :func:`scipy.stats.spearmanr`\n :class:`zipline.pipeline.factors.RollingSpearmanOfReturns`\n :meth:`Factor.pearsonr`\n \"\"\"\n from .statistical import RollingSpearman\n return RollingSpearman(\n base_factor=self,\n target=target,\n correlation_length=correlation_length,\n mask=mask,\n )\n\n @expect_types(\n target=Term, regression_length=int, mask=(Filter, NotSpecifiedType),\n )\n def linear_regression(self, target, regression_length, mask=NotSpecified):\n \"\"\"\n Construct a new Factor that performs an ordinary least-squares\n regression predicting the columns of `self` from `target`.\n\n This method can only be called on factors which are deemed safe for use\n as inputs to other factors. This includes `Returns` and any factors\n created from `Factor.rank` or `Factor.zscore`.\n\n Parameters\n ----------\n target : zipline.pipeline.Term with a numeric dtype\n The term to use as the predictor/independent variable in each\n regression. This may be a Factor, a BoundColumn or a Slice. If\n `target` is two-dimensional, regressions are computed asset-wise.\n regression_length : int\n Length of the lookback window over which to compute each\n regression.\n mask : zipline.pipeline.Filter, optional\n A Filter describing which assets should be regressed with the\n target slice each day.\n\n Returns\n -------\n regressions : zipline.pipeline.factors.RollingLinearRegression\n A new Factor that will compute linear regressions of `target`\n against the columns of `self`.\n\n Examples\n --------\n Suppose we want to create a factor that regresses AAPL's 10-day returns\n against the 10-day returns of all other assets, computing each\n regression over 30 days. This can be achieved by doing the following::\n\n returns = Returns(window_length=10)\n returns_slice = returns[sid(24)]\n aapl_regressions = returns.linear_regression(\n target=returns_slice, regression_length=30,\n )\n\n This is equivalent to doing::\n\n aapl_regressions = RollingLinearRegressionOfReturns(\n target=sid(24), returns_length=10, regression_length=30,\n )\n\n See Also\n --------\n :func:`scipy.stats.linregress`\n :class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`\n \"\"\"\n from .statistical import RollingLinearRegression\n return RollingLinearRegression(\n dependent=self,\n independent=target,\n regression_length=regression_length,\n mask=mask,\n )\n\n @expect_types(\n min_percentile=(int, float),\n max_percentile=(int, float),\n mask=(Filter, NotSpecifiedType),\n groupby=(Classifier, NotSpecifiedType),\n )\n @float64_only\n def winsorize(self,\n min_percentile,\n max_percentile,\n mask=NotSpecified,\n groupby=NotSpecified):\n \"\"\"\n Construct a Factor returns a winsorized row. Winsorizing changes values\n ranked less than the minimum percentile to to value at the minimum\n percentile. Similarly, values ranking above the maximum percentile will\n be changed to the value at the maximum percentile. This is useful\n when limiting the impact of extreme values.\n\n If ``mask`` is supplied, ignore values where ``mask`` returns False\n when computing row means and standard deviations, and output NaN\n anywhere the mask is False.\n\n If ``groupby`` is supplied, compute by partitioning each row based on\n the values produced by ``groupby``, winsorizing the partitioned arrays,\n and stitching the sub-results back together.\n\n Parameters\n ----------\n min_percentile: float, int\n Entries with values at or below this percentile will be replaced\n with the (len(inp) * min_percentile)th lowest value. If low values\n should not be clipped, use 0.\n max_percentile: float, int\n Entries with values at or above this percentile will be replaced\n with the (len(inp) * max_percentile)th lowest value. If high\n values should not be clipped, use 1.\n mask : zipline.pipeline.Filter, optional\n A Filter defining values to ignore when winsorizing.\n groupby : zipline.pipeline.Classifier, optional\n A classifier defining partitions over which to winsorize.\n\n Returns\n -------\n winsorized : zipline.pipeline.Factor\n A Factor producing a winsorized version of self.\n\n Examples\n --------\n .. code-block:: python\n\n price = USEquityPricing.close.latest\n columns={\n 'PRICE': price,\n 'WINSOR_1: price.winsorize(\n min_percentile=0.25, max_percentile=0.75\n ),\n 'WINSOR_2': price.winsorize(\n min_percentile=0.50, max_percentile=1.0\n ),\n 'WINSOR_3': price.winsorize(\n min_percentile=0.0, max_percentile=0.5\n ),\n\n }\n\n Given a pipeline with columns, defined above, the result for a\n given day could look like:\n\n ::\n\n 'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'\n Asset_1 1 2 4 3\n Asset_2 2 2 4 3\n Asset_3 3 3 4 3\n Asset_4 4 4 4 4\n Asset_5 5 5 5 4\n Asset_6 6 5 5 4\n\n See Also\n --------\n :func:`scipy.stats.mstats.winsorize`\n :meth:`pandas.DataFrame.groupby`\n \"\"\"\n if not 0.0 <= min_percentile < max_percentile <= 1.0:\n raise BadPercentileBounds(\n min_percentile=min_percentile,\n max_percentile=max_percentile,\n upper_bound=1.0,\n )\n return GroupedRowTransform(\n transform=winsorize,\n transform_args=(min_percentile, max_percentile),\n factor=self,\n groupby=groupby,\n dtype=self.dtype,\n missing_value=self.missing_value,\n mask=mask,\n window_safe=self.window_safe,\n )\n\n @expect_types(bins=int, mask=(Filter, NotSpecifiedType))\n def quantiles(self, bins, mask=NotSpecified):\n \"\"\"\n Construct a Classifier computing quantiles of the output of ``self``.\n\n Every non-NaN data point the output is labelled with an integer value\n from 0 to (bins - 1). NaNs are labelled with -1.\n\n If ``mask`` is supplied, ignore data points in locations for which\n ``mask`` produces False, and emit a label of -1 at those locations.\n\n Parameters\n ----------\n bins : int\n Number of bins labels to compute.\n mask : zipline.pipeline.Filter, optional\n Mask of values to ignore when computing quantiles.\n\n Returns\n -------\n quantiles : zipline.pipeline.classifiers.Quantiles\n A Classifier producing integer labels ranging from 0 to (bins - 1).\n \"\"\"\n if mask is NotSpecified:\n mask = self.mask\n return Quantiles(inputs=(self,), bins=bins, mask=mask)\n\n @expect_types(mask=(Filter, NotSpecifiedType))\n def quartiles(self, mask=NotSpecified):\n \"\"\"\n Construct a Classifier computing quartiles over the output of ``self``.\n\n Every non-NaN data point the output is labelled with a value of either\n 0, 1, 2, or 3, corresponding to the first, second, third, or fourth\n quartile over each row. NaN data points are labelled with -1.\n\n If ``mask`` is supplied, ignore data points in locations for which\n ``mask`` produces False, and emit a label of -1 at those locations.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n Mask of values to ignore when computing quartiles.\n\n Returns\n -------\n quartiles : zipline.pipeline.classifiers.Quantiles\n A Classifier producing integer labels ranging from 0 to 3.\n \"\"\"\n return self.quantiles(bins=4, mask=mask)\n\n @expect_types(mask=(Filter, NotSpecifiedType))\n def quintiles(self, mask=NotSpecified):\n \"\"\"\n Construct a Classifier computing quintile labels on ``self``.\n\n Every non-NaN data point the output is labelled with a value of either\n 0, 1, 2, or 3, 4, corresonding to quintiles over each row. NaN data\n points are labelled with -1.\n\n If ``mask`` is supplied, ignore data points in locations for which\n ``mask`` produces False, and emit a label of -1 at those locations.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n Mask of values to ignore when computing quintiles.\n\n Returns\n -------\n quintiles : zipline.pipeline.classifiers.Quantiles\n A Classifier producing integer labels ranging from 0 to 4.\n \"\"\"\n return self.quantiles(bins=5, mask=mask)\n\n @expect_types(mask=(Filter, NotSpecifiedType))\n def deciles(self, mask=NotSpecified):\n \"\"\"\n Construct a Classifier computing decile labels on ``self``.\n\n Every non-NaN data point the output is labelled with a value from 0 to\n 9 corresonding to deciles over each row. NaN data points are labelled\n with -1.\n\n If ``mask`` is supplied, ignore data points in locations for which\n ``mask`` produces False, and emit a label of -1 at those locations.\n\n Parameters\n ----------\n mask : zipline.pipeline.Filter, optional\n Mask of values to ignore when computing deciles.\n\n Returns\n -------\n deciles : zipline.pipeline.classifiers.Quantiles\n A Classifier producing integer labels ranging from 0 to 9.\n \"\"\"\n return self.quantiles(bins=10, mask=mask)\n\n def top(self, N, mask=NotSpecified, groupby=NotSpecified):\n \"\"\"\n Construct a Filter matching the top N asset values of self each day.\n\n If ``groupby`` is supplied, returns a Filter matching the top N asset\n values for each group.\n\n Parameters\n ----------\n N : int\n Number of assets passing the returned filter each day.\n mask : zipline.pipeline.Filter, optional\n A Filter representing assets to consider when computing ranks.\n If mask is supplied, top values are computed ignoring any\n asset/date pairs for which `mask` produces a value of False.\n groupby : zipline.pipeline.Classifier, optional\n A classifier defining partitions over which to perform ranking.\n\n Returns\n -------\n filter : zipline.pipeline.filters.Filter\n \"\"\"\n return self.rank(ascending=False, mask=mask, groupby=groupby) <= N\n\n def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):\n \"\"\"\n Construct a Filter matching the bottom N asset values of self each day.\n\n If ``groupby`` is supplied, returns a Filter matching the bottom N\n asset values for each group.\n\n Parameters\n ----------\n N : int\n Number of assets passing the returned filter each day.\n mask : zipline.pipeline.Filter, optional\n A Filter representing assets to consider when computing ranks.\n If mask is supplied, bottom values are computed ignoring any\n asset/date pairs for which `mask` produces a value of False.\n groupby : zipline.pipeline.Classifier, optional\n A classifier defining partitions over which to perform ranking.\n\n Returns\n -------\n filter : zipline.pipeline.Filter\n \"\"\"\n return self.rank(ascending=True, mask=mask, groupby=groupby) <= N\n\n def percentile_between(self,\n min_percentile,\n max_percentile,\n mask=NotSpecified):\n \"\"\"\n Construct a new Filter representing entries from the output of this\n Factor that fall within the percentile range defined by min_percentile\n and max_percentile.\n\n Parameters\n ----------\n min_percentile : float [0.0, 100.0]\n Return True for assets falling above this percentile in the data.\n max_percentile : float [0.0, 100.0]\n Return True for assets falling below this percentile in the data.\n mask : zipline.pipeline.Filter, optional\n A Filter representing assets to consider when percentile\n calculating thresholds. If mask is supplied, percentile cutoffs\n are computed each day using only assets for which ``mask`` returns\n True. Assets for which ``mask`` produces False will produce False\n in the output of this Factor as well.\n\n Returns\n -------\n out : zipline.pipeline.filters.PercentileFilter\n A new filter that will compute the specified percentile-range mask.\n\n See Also\n --------\n zipline.pipeline.filters.filter.PercentileFilter\n \"\"\"\n return PercentileFilter(\n self,\n min_percentile=min_percentile,\n max_percentile=max_percentile,\n mask=mask,\n )\n\n def isnull(self):\n \"\"\"\n A Filter producing True for values where this Factor has missing data.\n\n Equivalent to self.isnan() when ``self.dtype`` is float64.\n Otherwise equivalent to ``self.eq(self.missing_value)``.\n\n Returns\n -------\n filter : zipline.pipeline.filters.Filter\n \"\"\"\n if self.dtype == float64_dtype:\n # Using isnan is more efficient when possible because we can fold\n # the isnan computation with other NumExpr expressions.\n return self.isnan()\n else:\n return NullFilter(self)\n\n def notnull(self):\n \"\"\"\n A Filter producing True for values where this Factor has complete data.\n\n Equivalent to ``~self.isnan()` when ``self.dtype`` is float64.\n Otherwise equivalent to ``(self != self.missing_value)``.\n \"\"\"\n return NotNullFilter(self)\n\n @if_not_float64_tell_caller_to_use_isnull\n def isnan(self):\n \"\"\"\n A Filter producing True for all values where this Factor is NaN.\n\n Returns\n -------\n nanfilter : zipline.pipeline.filters.Filter\n \"\"\"\n return self != self\n\n @if_not_float64_tell_caller_to_use_isnull\n def notnan(self):\n \"\"\"\n A Filter producing True for values where this Factor is not NaN.\n\n Returns\n -------\n nanfilter : zipline.pipeline.filters.Filter\n \"\"\"\n return ~self.isnan()\n\n @if_not_float64_tell_caller_to_use_isnull\n def isfinite(self):\n \"\"\"\n A Filter producing True for values where this Factor is anything but\n NaN, inf, or -inf.\n \"\"\"\n return (-inf < self) & (self < inf)\n\n @classlazyval\n def _downsampled_type(self):\n return DownsampledMixin.make_downsampled_type(Factor)\n\n @classlazyval\n def _aliased_type(self):\n return AliasedMixin.make_aliased_type(Factor)\n\n\nclass NumExprFactor(NumericalExpression, Factor):\n \"\"\"\n Factor computed from a numexpr expression.\n\n Parameters\n ----------\n expr : string\n A string suitable for passing to numexpr. All variables in 'expr'\n should be of the form \"x_i\", where i is the index of the corresponding\n factor input in 'binds'.\n binds : tuple\n A tuple of factors to use as inputs.\n\n Notes\n -----\n NumExprFactors are constructed by numerical operators like `+` and `-`.\n Users should rarely need to construct a NumExprFactor directly.\n \"\"\"\n pass\n\n\nclass GroupedRowTransform(Factor):\n \"\"\"\n A Factor that transforms an input factor by applying a row-wise\n shape-preserving transformation on classifier-defined groups of that\n Factor.\n\n This is most often useful for normalization operators like ``zscore`` or\n ``demean`` or for performing ranking using ``rank``.\n\n Parameters\n ----------\n transform : function[ndarray[ndim=1] -> ndarray[ndim=1]]\n Function to apply over each row group.\n factor : zipline.pipeline.Factor\n The factor providing baseline data to transform.\n mask : zipline.pipeline.Filter\n Mask of entries to ignore when calculating transforms.\n groupby : zipline.pipeline.Classifier\n Classifier partitioning ``factor`` into groups to use when calculating\n means.\n transform_args : tuple[hashable]\n Additional positional arguments to forward to ``transform``.\n\n Notes\n -----\n Users should rarely construct instances of this factor directly. Instead,\n they should construct instances via factor normalization methods like\n ``zscore`` and ``demean`` or using ``rank`` with ``groupby``.\n\n See Also\n --------\n zipline.pipeline.factors.Factor.zscore\n zipline.pipeline.factors.Factor.demean\n zipline.pipeline.factors.Factor.rank\n \"\"\"\n window_length = 0\n\n def __new__(cls,\n transform,\n transform_args,\n factor,\n groupby,\n dtype,\n missing_value,\n mask,\n **kwargs):\n\n if mask is NotSpecified:\n mask = factor.mask\n else:\n mask = mask & factor.mask\n\n if groupby is NotSpecified:\n groupby = Everything(mask=mask)\n\n return super(GroupedRowTransform, cls).__new__(\n GroupedRowTransform,\n transform=transform,\n transform_args=transform_args,\n inputs=(factor, groupby),\n missing_value=missing_value,\n mask=mask,\n dtype=dtype,\n **kwargs\n )\n\n def _init(self, transform, transform_args, *args, **kwargs):\n self._transform = transform\n self._transform_args = transform_args\n return super(GroupedRowTransform, self)._init(*args, **kwargs)\n\n @classmethod\n def _static_identity(cls, transform, transform_args, *args, **kwargs):\n return (\n super(GroupedRowTransform, cls)._static_identity(*args, **kwargs),\n transform,\n transform_args,\n )\n\n def _compute(self, arrays, dates, assets, mask):\n data = arrays[0]\n groupby_expr = self.inputs[1]\n if groupby_expr.dtype == int64_dtype:\n group_labels = arrays[1]\n null_label = self.inputs[1].missing_value\n elif groupby_expr.dtype == categorical_dtype:\n # Coerce our LabelArray into an isomorphic array of ints. This is\n # necessary because np.where doesn't know about LabelArrays or the\n # void dtype.\n group_labels = arrays[1].as_int_array()\n null_label = arrays[1].missing_value_code\n else:\n raise TypeError(\n \"Unexpected groupby dtype: %s.\" % groupby_expr.dtype\n )\n\n # Make a copy with the null code written to masked locations.\n group_labels = where(mask, group_labels, null_label)\n return where(\n group_labels != null_label,\n naive_grouped_rowwise_apply(\n data=data,\n group_labels=group_labels,\n func=self._transform,\n func_args=self._transform_args,\n out=empty_like(data, dtype=self.dtype),\n ),\n self.missing_value,\n )\n\n @property\n def transform_name(self):\n return self._transform.__name__\n\n def short_repr(self):\n return type(self).__name__ + '(%r)' % self.transform_name\n\n\nclass Rank(SingleInputMixin, Factor):\n \"\"\"\n A Factor representing the row-wise rank data of another Factor.\n\n Parameters\n ----------\n factor : zipline.pipeline.factors.Factor\n The factor on which to compute ranks.\n method : str, {'average', 'min', 'max', 'dense', 'ordinal'}\n The method used to assign ranks to tied elements. See\n `scipy.stats.rankdata` for a full description of the semantics for each\n ranking method.\n\n See Also\n --------\n :func:`scipy.stats.rankdata`\n :class:`Factor.rank`\n\n Notes\n -----\n Most users should call Factor.rank rather than directly construct an\n instance of this class.\n \"\"\"\n window_length = 0\n dtype = float64_dtype\n window_safe = True\n\n def __new__(cls, factor, method, ascending, mask):\n return super(Rank, cls).__new__(\n cls,\n inputs=(factor,),\n method=method,\n ascending=ascending,\n mask=mask,\n )\n\n def _init(self, method, ascending, *args, **kwargs):\n self._method = method\n self._ascending = ascending\n return super(Rank, self)._init(*args, **kwargs)\n\n @classmethod\n def _static_identity(cls, method, ascending, *args, **kwargs):\n return (\n super(Rank, cls)._static_identity(*args, **kwargs),\n method,\n ascending,\n )\n\n def _validate(self):\n \"\"\"\n Verify that the stored rank method is valid.\n \"\"\"\n if self._method not in _RANK_METHODS:\n raise UnknownRankMethod(\n method=self._method,\n choices=set(_RANK_METHODS),\n )\n return super(Rank, self)._validate()\n\n def _compute(self, arrays, dates, assets, mask):\n \"\"\"\n For each row in the input, compute a like-shaped array of per-row\n ranks.\n \"\"\"\n return masked_rankdata_2d(\n arrays[0],\n mask,\n self.inputs[0].missing_value,\n self._method,\n self._ascending,\n )\n\n def __repr__(self):\n return \"{type}({input_}, method='{method}', mask={mask})\".format(\n type=type(self).__name__,\n input_=self.inputs[0],\n method=self._method,\n mask=self.mask,\n )\n\n\nclass CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor):\n '''\n Base class for user-defined Factors.\n\n Parameters\n ----------\n inputs : iterable, optional\n An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),\n describing the data to load and pass to `self.compute`. If this\n argument is not passed to the CustomFactor constructor, we look for a\n class-level attribute named `inputs`.\n outputs : iterable[str], optional\n An iterable of strings which represent the names of each output this\n factor should compute and return. If this argument is not passed to the\n CustomFactor constructor, we look for a class-level attribute named\n `outputs`.\n window_length : int, optional\n Number of rows to pass for each input. If this argument is not passed\n to the CustomFactor constructor, we look for a class-level attribute\n named `window_length`.\n mask : zipline.pipeline.Filter, optional\n A Filter describing the assets on which we should compute each day.\n Each call to ``CustomFactor.compute`` will only receive assets for\n which ``mask`` produced True on the day for which compute is being\n called.\n\n Notes\n -----\n Users implementing their own Factors should subclass CustomFactor and\n implement a method named `compute` with the following signature:\n\n .. code-block:: python\n\n def compute(self, today, assets, out, *inputs):\n ...\n\n On each simulation date, ``compute`` will be called with the current date,\n an array of sids, an output array, and an input array for each expression\n passed as inputs to the CustomFactor constructor.\n\n The specific types of the values passed to `compute` are as follows::\n\n today : np.datetime64[ns]\n Row label for the last row of all arrays passed as `inputs`.\n assets : np.array[int64, ndim=1]\n Column labels for `out` and`inputs`.\n out : np.array[self.dtype, ndim=1]\n Output array of the same shape as `assets`. `compute` should write\n its desired return values into `out`. If multiple outputs are\n specified, `compute` should write its desired return values into\n `out.<output_name>` for each output name in `self.outputs`.\n *inputs : tuple of np.array\n Raw data arrays corresponding to the values of `self.inputs`.\n\n ``compute`` functions should expect to be passed NaN values for dates on\n which no data was available for an asset. This may include dates on which\n an asset did not yet exist.\n\n For example, if a CustomFactor requires 10 rows of close price data, and\n asset A started trading on Monday June 2nd, 2014, then on Tuesday, June\n 3rd, 2014, the column of input data for asset A will have 9 leading NaNs\n for the preceding days on which data was not yet available.\n\n Examples\n --------\n\n A CustomFactor with pre-declared defaults:\n\n .. code-block:: python\n\n class TenDayRange(CustomFactor):\n \"\"\"\n Computes the difference between the highest high in the last 10\n days and the lowest low.\n\n Pre-declares high and low as default inputs and `window_length` as\n 10.\n \"\"\"\n\n inputs = [USEquityPricing.high, USEquityPricing.low]\n window_length = 10\n\n def compute(self, today, assets, out, highs, lows):\n from numpy import nanmin, nanmax\n\n highest_highs = nanmax(highs, axis=0)\n lowest_lows = nanmin(lows, axis=0)\n out[:] = highest_highs - lowest_lows\n\n\n # Doesn't require passing inputs or window_length because they're\n # pre-declared as defaults for the TenDayRange class.\n ten_day_range = TenDayRange()\n\n A CustomFactor without defaults:\n\n .. code-block:: python\n\n class MedianValue(CustomFactor):\n \"\"\"\n Computes the median value of an arbitrary single input over an\n arbitrary window..\n\n Does not declare any defaults, so values for `window_length` and\n `inputs` must be passed explicitly on every construction.\n \"\"\"\n\n def compute(self, today, assets, out, data):\n from numpy import nanmedian\n out[:] = data.nanmedian(data, axis=0)\n\n # Values for `inputs` and `window_length` must be passed explicitly to\n # MedianValue.\n median_close10 = MedianValue([USEquityPricing.close], window_length=10)\n median_low15 = MedianValue([USEquityPricing.low], window_length=15)\n\n A CustomFactor with multiple outputs:\n\n .. code-block:: python\n\n class MultipleOutputs(CustomFactor):\n inputs = [USEquityPricing.close]\n outputs = ['alpha', 'beta']\n window_length = N\n\n def compute(self, today, assets, out, close):\n computed_alpha, computed_beta = some_function(close)\n out.alpha[:] = computed_alpha\n out.beta[:] = computed_beta\n\n # Each output is returned as its own Factor upon instantiation.\n alpha, beta = MultipleOutputs()\n\n # Equivalently, we can create a single factor instance and access each\n # output as an attribute of that instance.\n multiple_outputs = MultipleOutputs()\n alpha = multiple_outputs.alpha\n beta = multiple_outputs.beta\n\n Note: If a CustomFactor has multiple outputs, all outputs must have the\n same dtype. For instance, in the example above, if alpha is a float then\n beta must also be a float.\n '''\n dtype = float64_dtype\n\n def __getattribute__(self, name):\n outputs = object.__getattribute__(self, 'outputs')\n if outputs is NotSpecified:\n return super(CustomFactor, self).__getattribute__(name)\n elif name in outputs:\n return RecarrayField(factor=self, attribute=name)\n else:\n try:\n return super(CustomFactor, self).__getattribute__(name)\n except AttributeError:\n raise AttributeError(\n 'Instance of {factor} has no output named {attr!r}. '\n 'Possible choices are: {choices}.'.format(\n factor=type(self).__name__,\n attr=name,\n choices=self.outputs,\n )\n )\n\n def __iter__(self):\n if self.outputs is NotSpecified:\n raise ValueError(\n '{factor} does not have multiple outputs.'.format(\n factor=type(self).__name__,\n )\n )\n return (RecarrayField(self, attr) for attr in self.outputs)\n\n\nclass RecarrayField(SingleInputMixin, Factor):\n \"\"\"\n A single field from a multi-output factor.\n \"\"\"\n def __new__(cls, factor, attribute):\n return super(RecarrayField, cls).__new__(\n cls,\n attribute=attribute,\n inputs=[factor],\n window_length=0,\n mask=factor.mask,\n dtype=factor.dtype,\n missing_value=factor.missing_value,\n window_safe=factor.window_safe\n )\n\n def _init(self, attribute, *args, **kwargs):\n self._attribute = attribute\n return super(RecarrayField, self)._init(*args, **kwargs)\n\n @classmethod\n def _static_identity(cls, attribute, *args, **kwargs):\n return (\n super(RecarrayField, cls)._static_identity(*args, **kwargs),\n attribute,\n )\n\n def _compute(self, windows, dates, assets, mask):\n return windows[0][self._attribute]\n\n\nclass Latest(LatestMixin, CustomFactor):\n \"\"\"\n Factor producing the most recently-known value of `inputs[0]` on each day.\n\n The `.latest` attribute of DataSet columns returns an instance of this\n Factor.\n \"\"\"\n window_length = 1\n\n def compute(self, today, assets, out, data):\n out[:] = data[-1]\n\n\n# Functions to be passed to GroupedRowTransform. These aren't defined inline\n# because the transformation function is part of the instance hash key.\ndef demean(row):\n return row - nanmean(row)\n\n\ndef zscore(row):\n return (row - nanmean(row)) / nanstd(row)\n\n\ndef winsorize(row, min_percentile, max_percentile):\n \"\"\"\n This implementation is based on scipy.stats.mstats.winsorize\n \"\"\"\n a = row.copy()\n num = a.size\n idx = a.argsort()\n if min_percentile > 0:\n lowidx = int(min_percentile * num)\n a[idx[:lowidx]] = a[idx[lowidx]]\n if max_percentile < 1:\n upidx = int(ceil(num * max_percentile))\n # upidx could return as the length of the array, in this case\n # no modification to the right tail is necessary.\n if upidx < num:\n a[idx[upidx:]] = a[idx[upidx - 1]]\n\n return a\n", "\"\"\"\nAn ndarray subclass for working with arrays of strings.\n\"\"\"\nfrom functools import partial, total_ordering\nfrom operator import eq, ne\nimport re\n\nimport numpy as np\nfrom numpy import ndarray\nimport pandas as pd\nfrom toolz import compose\n\nfrom zipline.utils.compat import unicode\nfrom zipline.utils.functional import instance\nfrom zipline.utils.preprocess import preprocess\nfrom zipline.utils.sentinel import sentinel\nfrom zipline.utils.input_validation import (\n coerce,\n expect_kinds,\n expect_types,\n optional,\n)\nfrom zipline.utils.numpy_utils import (\n bool_dtype,\n unsigned_int_dtype_with_size_in_bytes,\n is_object,\n)\nfrom zipline.utils.pandas_utils import ignore_pandas_nan_categorical_warning\n\nfrom ._factorize import (\n factorize_strings,\n factorize_strings_known_categories,\n smallest_uint_that_can_hold,\n)\n\n\ndef compare_arrays(left, right):\n \"Eq check with a short-circuit for identical objects.\"\n return (\n left is right\n or ((left.shape == right.shape) and (left == right).all())\n )\n\n\ndef _make_unsupported_method(name):\n def method(*args, **kwargs):\n raise NotImplementedError(\n \"Method %s is not supported on LabelArrays.\" % name\n )\n method.__name__ = name\n method.__doc__ = \"Unsupported LabelArray Method: %s\" % name\n return method\n\n\nclass MissingValueMismatch(ValueError):\n \"\"\"\n Error raised on attempt to perform operations between LabelArrays with\n mismatched missing_values.\n \"\"\"\n def __init__(self, left, right):\n super(MissingValueMismatch, self).__init__(\n \"LabelArray missing_values don't match:\"\n \" left={}, right={}\".format(left, right)\n )\n\n\nclass CategoryMismatch(ValueError):\n \"\"\"\n Error raised on attempt to perform operations between LabelArrays with\n mismatched category arrays.\n \"\"\"\n def __init__(self, left, right):\n (mismatches,) = np.where(left != right)\n assert len(mismatches), \"Not actually a mismatch!\"\n super(CategoryMismatch, self).__init__(\n \"LabelArray categories don't match:\\n\"\n \"Mismatched Indices: {mismatches}\\n\"\n \"Left: {left}\\n\"\n \"Right: {right}\".format(\n mismatches=mismatches,\n left=left[mismatches],\n right=right[mismatches],\n )\n )\n\n\n_NotPassed = sentinel('_NotPassed')\n\n\nclass LabelArray(ndarray):\n \"\"\"\n An ndarray subclass for working with arrays of strings.\n\n Factorizes the input array into integers, but overloads equality on strings\n to check against the factor label.\n\n Parameters\n ----------\n values : array-like\n Array of values that can be passed to np.asarray with dtype=object.\n missing_value : str\n Scalar value to treat as 'missing' for operations on ``self``.\n categories : list[str], optional\n List of values to use as categories. If not supplied, categories will\n be inferred as the unique set of entries in ``values``.\n sort : bool, optional\n Whether to sort categories. If sort is False and categories is\n supplied, they are left in the order provided. If sort is False and\n categories is None, categories will be constructed in a random order.\n\n Attributes\n ----------\n categories : ndarray[str]\n An array containing the unique labels of self.\n reverse_categories : dict[str -> int]\n Reverse lookup table for ``categories``. Stores the index in\n ``categories`` at which each entry each unique entry is found.\n missing_value : str or None\n A sentinel missing value with NaN semantics for comparisons.\n\n Notes\n -----\n Consumers should be cautious when passing instances of LabelArray to numpy\n functions. We attempt to disallow as many meaningless operations as\n possible, but since a LabelArray is just an ndarray of ints with some\n additional metadata, many numpy functions (for example, trigonometric) will\n happily accept a LabelArray and treat its values as though they were\n integers.\n\n In a future change, we may be able to disallow more numerical operations by\n creating a wrapper dtype which doesn't register an implementation for most\n numpy ufuncs. Until that change is made, consumers of LabelArray should\n assume that it is undefined behavior to pass a LabelArray to any numpy\n ufunc that operates on semantically-numerical data.\n\n See Also\n --------\n http://docs.scipy.org/doc/numpy-1.10.0/user/basics.subclassing.html\n \"\"\"\n SUPPORTED_SCALAR_TYPES = (bytes, unicode, type(None))\n SUPPORTED_NON_NONE_SCALAR_TYPES = (bytes, unicode)\n\n @preprocess(\n values=coerce(list, partial(np.asarray, dtype=object)),\n categories=coerce(np.ndarray, list),\n )\n @expect_types(\n values=np.ndarray,\n missing_value=SUPPORTED_SCALAR_TYPES,\n categories=optional(list),\n )\n @expect_kinds(values=(\"O\", \"S\", \"U\"))\n def __new__(cls,\n values,\n missing_value,\n categories=None,\n sort=True):\n\n # Numpy's fixed-width string types aren't very efficient. Working with\n # object arrays is faster than bytes or unicode arrays in almost all\n # cases.\n if not is_object(values):\n values = values.astype(object)\n\n if categories is None:\n codes, categories, reverse_categories = factorize_strings(\n values.ravel(),\n missing_value=missing_value,\n sort=sort,\n )\n else:\n codes, categories, reverse_categories = (\n factorize_strings_known_categories(\n values.ravel(),\n categories=categories,\n missing_value=missing_value,\n sort=sort,\n )\n )\n categories.setflags(write=False)\n\n return cls.from_codes_and_metadata(\n codes=codes.reshape(values.shape),\n categories=categories,\n reverse_categories=reverse_categories,\n missing_value=missing_value,\n )\n\n @classmethod\n def from_codes_and_metadata(cls,\n codes,\n categories,\n reverse_categories,\n missing_value):\n \"\"\"\n Rehydrate a LabelArray from the codes and metadata.\n\n Parameters\n ----------\n codes : np.ndarray[integral]\n The codes for the label array.\n categories : np.ndarray[object]\n The unique string categories.\n reverse_categories : dict[str, int]\n The mapping from category to its code-index.\n missing_value : any\n The value used to represent missing data.\n \"\"\"\n ret = codes.view(type=cls, dtype=np.void)\n ret._categories = categories\n ret._reverse_categories = reverse_categories\n ret._missing_value = missing_value\n return ret\n\n @classmethod\n def from_categorical(cls, categorical, missing_value=None):\n \"\"\"\n Create a LabelArray from a pandas categorical.\n\n Parameters\n ----------\n categorical : pd.Categorical\n The categorical object to convert.\n missing_value : bytes, unicode, or None, optional\n The missing value to use for this LabelArray.\n\n Returns\n -------\n la : LabelArray\n The LabelArray representation of this categorical.\n \"\"\"\n return LabelArray(\n categorical,\n missing_value,\n categorical.categories,\n )\n\n @property\n def categories(self):\n # This is a property because it should be immutable.\n return self._categories\n\n @property\n def reverse_categories(self):\n # This is a property because it should be immutable.\n return self._reverse_categories\n\n @property\n def missing_value(self):\n # This is a property because it should be immutable.\n return self._missing_value\n\n @property\n def missing_value_code(self):\n return self.reverse_categories[self.missing_value]\n\n def has_label(self, value):\n return value in self.reverse_categories\n\n def __array_finalize__(self, obj):\n \"\"\"\n Called by Numpy after array construction.\n\n There are three cases where this can happen:\n\n 1. Someone tries to directly construct a new array by doing::\n\n >>> ndarray.__new__(LabelArray, ...) # doctest: +SKIP\n\n In this case, obj will be None. We treat this as an error case and\n fail.\n\n 2. Someone (most likely our own __new__) does::\n\n >>> other_array.view(type=LabelArray) # doctest: +SKIP\n\n In this case, `self` will be the new LabelArray instance, and\n ``obj` will be the array on which ``view`` is being called.\n\n The caller of ``obj.view`` is responsible for setting category\n metadata on ``self`` after we exit.\n\n 3. Someone creates a new LabelArray by slicing an existing one.\n\n In this case, ``obj`` will be the original LabelArray. We're\n responsible for copying over the parent array's category metadata.\n \"\"\"\n if obj is None:\n raise TypeError(\n \"Direct construction of LabelArrays is not supported.\"\n )\n\n # See docstring for an explanation of when these will or will not be\n # set.\n self._categories = getattr(obj, 'categories', None)\n self._reverse_categories = getattr(obj, 'reverse_categories', None)\n self._missing_value = getattr(obj, 'missing_value', None)\n\n def as_int_array(self):\n \"\"\"\n Convert self into a regular ndarray of ints.\n\n This is an O(1) operation. It does not copy the underlying data.\n \"\"\"\n return self.view(\n type=ndarray,\n dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),\n )\n\n def as_string_array(self):\n \"\"\"\n Convert self back into an array of strings.\n\n This is an O(N) operation.\n \"\"\"\n return self.categories[self.as_int_array()]\n\n def as_categorical(self, name=None):\n \"\"\"\n Coerce self into a pandas categorical.\n\n This is only defined on 1D arrays, since that's all pandas supports.\n \"\"\"\n if len(self.shape) > 1:\n raise ValueError(\"Can't convert a 2D array to a categorical.\")\n\n with ignore_pandas_nan_categorical_warning():\n return pd.Categorical.from_codes(\n self.as_int_array(),\n # We need to make a copy because pandas >= 0.17 fails if this\n # buffer isn't writeable.\n self.categories.copy(),\n ordered=False,\n name=name,\n )\n\n def as_categorical_frame(self, index, columns, name=None):\n \"\"\"\n Coerce self into a pandas DataFrame of Categoricals.\n \"\"\"\n if len(self.shape) != 2:\n raise ValueError(\n \"Can't convert a non-2D LabelArray into a DataFrame.\"\n )\n\n expected_shape = (len(index), len(columns))\n if expected_shape != self.shape:\n raise ValueError(\n \"Can't construct a DataFrame with provided indices:\\n\\n\"\n \"LabelArray shape is {actual}, but index and columns imply \"\n \"that shape should be {expected}.\".format(\n actual=self.shape,\n expected=expected_shape,\n )\n )\n\n return pd.Series(\n index=pd.MultiIndex.from_product([index, columns]),\n data=self.ravel().as_categorical(name=name),\n ).unstack()\n\n def __setitem__(self, indexer, value):\n self_categories = self.categories\n\n if isinstance(value, LabelArray):\n value_categories = value.categories\n if compare_arrays(self_categories, value_categories):\n return super(LabelArray, self).__setitem__(indexer, value)\n else:\n raise CategoryMismatch(self_categories, value_categories)\n\n elif isinstance(value, self.SUPPORTED_SCALAR_TYPES):\n value_code = self.reverse_categories.get(value, -1)\n if value_code < 0:\n raise ValueError(\"%r is not in LabelArray categories.\" % value)\n self.as_int_array()[indexer] = value_code\n else:\n raise NotImplementedError(\n \"Setting into a LabelArray with a value of \"\n \"type {type} is not yet supported.\".format(\n type=type(value).__name__,\n ),\n )\n\n def __setslice__(self, i, j, sequence):\n \"\"\"\n This method was deprecated in Python 2.0. It predates slice objects,\n but Python 2.7.11 still uses it if you implement it, which ndarray\n does. In newer Pythons, __setitem__ is always called, but we need to\n manuallly forward in py2.\n \"\"\"\n self.__setitem__(slice(i, j), sequence)\n\n def __getitem__(self, indexer):\n result = super(LabelArray, self).__getitem__(indexer)\n if result.ndim:\n # Result is still a LabelArray, so we can just return it.\n return result\n\n # Result is a scalar value, which will be an instance of np.void.\n # Map it back to one of our category entries.\n index = result.view(\n unsigned_int_dtype_with_size_in_bytes(self.itemsize),\n )\n return self.categories[index]\n\n def is_missing(self):\n \"\"\"\n Like isnan, but checks for locations where we store missing values.\n \"\"\"\n return (\n self.as_int_array() == self.reverse_categories[self.missing_value]\n )\n\n def not_missing(self):\n \"\"\"\n Like ~isnan, but checks for locations where we store missing values.\n \"\"\"\n return (\n self.as_int_array() != self.reverse_categories[self.missing_value]\n )\n\n def _equality_check(op):\n \"\"\"\n Shared code for __eq__ and __ne__, parameterized on the actual\n comparison operator to use.\n \"\"\"\n def method(self, other):\n\n if isinstance(other, LabelArray):\n self_mv = self.missing_value\n other_mv = other.missing_value\n if self_mv != other_mv:\n raise MissingValueMismatch(self_mv, other_mv)\n\n self_categories = self.categories\n other_categories = other.categories\n if not compare_arrays(self_categories, other_categories):\n raise CategoryMismatch(self_categories, other_categories)\n\n return (\n op(self.as_int_array(), other.as_int_array())\n & self.not_missing()\n & other.not_missing()\n )\n\n elif isinstance(other, ndarray):\n # Compare to ndarrays as though we were an array of strings.\n # This is fairly expensive, and should generally be avoided.\n return op(self.as_string_array(), other) & self.not_missing()\n\n elif isinstance(other, self.SUPPORTED_SCALAR_TYPES):\n i = self._reverse_categories.get(other, -1)\n return op(self.as_int_array(), i) & self.not_missing()\n\n return op(super(LabelArray, self), other)\n return method\n\n __eq__ = _equality_check(eq)\n __ne__ = _equality_check(ne)\n del _equality_check\n\n def view(self, dtype=_NotPassed, type=_NotPassed):\n if type is _NotPassed and dtype not in (_NotPassed, self.dtype):\n raise TypeError(\"Can't view LabelArray as another dtype.\")\n\n # The text signature on ndarray.view makes it look like the default\n # values for dtype and type are `None`, but passing None explicitly has\n # different semantics than not passing an arg at all, so we reconstruct\n # the kwargs dict here to simulate the args not being passed at all.\n kwargs = {}\n if dtype is not _NotPassed:\n kwargs['dtype'] = dtype\n if type is not _NotPassed:\n kwargs['type'] = type\n return super(LabelArray, self).view(**kwargs)\n\n # In general, we support resizing, slicing, and reshaping methods, but not\n # numeric methods.\n SUPPORTED_NDARRAY_METHODS = frozenset([\n 'base',\n 'compress',\n 'copy',\n 'data',\n 'diagonal',\n 'dtype',\n 'flat',\n 'flatten',\n 'item',\n 'itemset',\n 'itemsize',\n 'nbytes',\n 'ndim',\n 'ravel',\n 'repeat',\n 'reshape',\n 'resize',\n 'setflags',\n 'shape',\n 'size',\n 'squeeze',\n 'strides',\n 'swapaxes',\n 'take',\n 'trace',\n 'transpose',\n 'view'\n ])\n PUBLIC_NDARRAY_METHODS = frozenset([\n s for s in dir(ndarray) if not s.startswith('_')\n ])\n\n # Generate failing wrappers for all unsupported methods.\n locals().update(\n {\n method: _make_unsupported_method(method)\n for method in PUBLIC_NDARRAY_METHODS - SUPPORTED_NDARRAY_METHODS\n }\n )\n\n def __repr__(self):\n repr_lines = repr(self.as_string_array()).splitlines()\n repr_lines[0] = repr_lines[0].replace('array(', 'LabelArray(', 1)\n repr_lines[-1] = repr_lines[-1].rsplit(',', 1)[0] + ')'\n # The extra spaces here account for the difference in length between\n # 'array(' and 'LabelArray('.\n return '\\n '.join(repr_lines)\n\n def empty_like(self, shape):\n \"\"\"\n Make an empty LabelArray with the same categories as ``self``, filled\n with ``self.missing_value``.\n \"\"\"\n return type(self).from_codes_and_metadata(\n codes=np.full(\n shape,\n self.reverse_categories[self.missing_value],\n dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),\n ),\n categories=self.categories,\n reverse_categories=self.reverse_categories,\n missing_value=self.missing_value,\n )\n\n def map_predicate(self, f):\n \"\"\"\n Map a function from str -> bool element-wise over ``self``.\n\n ``f`` will be applied exactly once to each non-missing unique value in\n ``self``. Missing values will always return False.\n \"\"\"\n # Functions passed to this are of type str -> bool. Don't ever call\n # them on None, which is the only non-str value we ever store in\n # categories.\n if self.missing_value is None:\n def f_to_use(x):\n return False if x is None else f(x)\n else:\n f_to_use = f\n\n # Call f on each unique value in our categories.\n results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)\n\n # missing_value should produce False no matter what\n results[self.reverse_categories[self.missing_value]] = False\n\n # unpack the results form each unique value into their corresponding\n # locations in our indices.\n return results[self.as_int_array()]\n\n def map(self, f):\n \"\"\"\n Map a function from str -> str element-wise over ``self``.\n\n ``f`` will be applied exactly once to each non-missing unique value in\n ``self``. Missing values will always map to ``self.missing_value``.\n \"\"\"\n # f() should only return None if None is our missing value.\n if self.missing_value is None:\n allowed_outtypes = self.SUPPORTED_SCALAR_TYPES\n else:\n allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES\n\n def f_to_use(x,\n missing_value=self.missing_value,\n otypes=allowed_outtypes):\n\n # Don't call f on the missing value; those locations don't exist\n # semantically. We return _sortable_sentinel rather than None\n # because the np.unique call below sorts the categories array,\n # which raises an error on Python 3 because None and str aren't\n # comparable.\n if x == missing_value:\n return _sortable_sentinel\n\n ret = f(x)\n\n if not isinstance(ret, otypes):\n raise TypeError(\n \"LabelArray.map expected function {f} to return a string\"\n \" or None, but got {type} instead.\\n\"\n \"Value was {value}.\".format(\n f=f.__name__,\n type=type(ret).__name__,\n value=ret,\n )\n )\n\n if ret == missing_value:\n return _sortable_sentinel\n\n return ret\n\n new_categories_with_duplicates = (\n np.vectorize(f_to_use, otypes=[object])(self.categories)\n )\n\n # If f() maps multiple inputs to the same output, then we can end up\n # with the same code duplicated multiple times. Compress the categories\n # by running them through np.unique, and then use the reverse lookup\n # table to compress codes as well.\n new_categories, bloated_inverse_index = np.unique(\n new_categories_with_duplicates,\n return_inverse=True\n )\n\n if new_categories[0] is _sortable_sentinel:\n # f_to_use return _sortable_sentinel for locations that should be\n # missing values in our output. Since np.unique returns the uniques\n # in sorted order, and since _sortable_sentinel sorts before any\n # string, we only need to check the first array entry.\n new_categories[0] = self.missing_value\n\n # `reverse_index` will always be a 64 bit integer even if we can hold a\n # smaller array.\n reverse_index = bloated_inverse_index.astype(\n smallest_uint_that_can_hold(len(new_categories))\n )\n new_codes = np.take(reverse_index, self.as_int_array())\n\n return self.from_codes_and_metadata(\n new_codes,\n new_categories,\n dict(zip(new_categories, range(len(new_categories)))),\n missing_value=self.missing_value,\n )\n\n def startswith(self, prefix):\n \"\"\"\n Element-wise startswith.\n\n Parameters\n ----------\n prefix : str\n\n Returns\n -------\n matches : np.ndarray[bool]\n An array with the same shape as self indicating whether each\n element of self started with ``prefix``.\n \"\"\"\n return self.map_predicate(lambda elem: elem.startswith(prefix))\n\n def endswith(self, suffix):\n \"\"\"\n Elementwise endswith.\n\n Parameters\n ----------\n suffix : str\n\n Returns\n -------\n matches : np.ndarray[bool]\n An array with the same shape as self indicating whether each\n element of self ended with ``suffix``\n \"\"\"\n return self.map_predicate(lambda elem: elem.endswith(suffix))\n\n def has_substring(self, substring):\n \"\"\"\n Elementwise contains.\n\n Parameters\n ----------\n substring : str\n\n Returns\n -------\n matches : np.ndarray[bool]\n An array with the same shape as self indicating whether each\n element of self ended with ``suffix``.\n \"\"\"\n return self.map_predicate(lambda elem: substring in elem)\n\n @preprocess(pattern=coerce(from_=(bytes, unicode), to=re.compile))\n def matches(self, pattern):\n \"\"\"\n Elementwise regex match.\n\n Parameters\n ----------\n pattern : str or compiled regex\n\n Returns\n -------\n matches : np.ndarray[bool]\n An array with the same shape as self indicating whether each\n element of self was matched by ``pattern``.\n \"\"\"\n return self.map_predicate(compose(bool, pattern.match))\n\n # These types all implement an O(N) __contains__, so pre-emptively\n # coerce to `set`.\n @preprocess(container=coerce((list, tuple, np.ndarray), set))\n def element_of(self, container):\n \"\"\"\n Check if each element of self is an of ``container``.\n\n Parameters\n ----------\n container : object\n An object implementing a __contains__ to call on each element of\n ``self``.\n\n Returns\n -------\n is_contained : np.ndarray[bool]\n An array with the same shape as self indicating whether each\n element of self was an element of ``container``.\n \"\"\"\n return self.map_predicate(container.__contains__)\n\n\n@instance # This makes _sortable_sentinel a singleton instance.\n@total_ordering\nclass _sortable_sentinel(object):\n \"\"\"Dummy object that sorts before any other python object.\n \"\"\"\n def __eq__(self, other):\n return self is other\n\n def __lt__(self, other):\n return True\n" ]
[ [ "numpy.empty_like", "numpy.where" ], [ "numpy.vectorize", "numpy.where", "pandas.MultiIndex.from_product", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
yangkevin2/emnlp2020-stream-beam-semantic
[ "130cc7bac1b9555c4522816daa0a33528250b503", "130cc7bac1b9555c4522816daa0a33528250b503" ]
[ "seq2seq/geoqueries/attention/util.py", "seq2tree/atis/attention/main.py" ]
[ "import random\nimport math\nfrom random import randint\nimport pickle as pkl\nimport numpy as np\nimport torch\nimport tree\nfrom operator import itemgetter\n\nrandom.seed(1)\nclass SymbolsManager():\n def __init__(self, whether_add_special_tags):\n self.symbol2idx = {}\n self.idx2symbol = {}\n self.vocab_size = 0\n self.whether_add_special_tags = whether_add_special_tags\n if whether_add_special_tags:\n # start symbol = 0\n self.add_symbol('<S>')\n # end symbol = 1\n self.add_symbol('<E>')\n # UNK symbol = 2\n self.add_symbol('<U>')\n\n def add_symbol(self,s):\n if s not in self.symbol2idx:\n self.symbol2idx[s] = self.vocab_size\n self.idx2symbol[self.vocab_size] = s\n self.vocab_size = self.vocab_size + 1\n return self.symbol2idx[s]\n\n def get_symbol_idx(self, s):\n if s not in self.symbol2idx:\n if self.whether_add_special_tags:\n return self.symbol2idx['<U>']\n else:\n print(\"this should never be reached (always add <U>\")\n return 0\n return self.symbol2idx[s]\n\n def get_idx_symbol(self, idx):\n if idx not in self.idx2symbol:\n return '<U>'\n return self.idx2symbol[idx]\n\n def init_from_file(self, fn, min_freq, max_vocab_size):\n print(\"loading vocabulary file: {}\\n\".format(fn))\n with open(fn, \"r\") as f:\n for line in f:\n l_list = line.strip().split('\\t')\n c = int(l_list[1])\n if c >= min_freq:\n self.add_symbol(l_list[0])\n if self.vocab_size >= max_vocab_size:\n break\n\n def get_symbol_idx_for_list(self,l):\n r = []\n for i in range(len(l)):\n r.append(self.get_symbol_idx(l[i]))\n return r\n\nclass MinibatchLoader():\n def __init__(self, opt, mode, using_gpu):\n data = pkl.load( open(\"{}/{}.pkl\".format(opt.data_dir, mode), \"rb\" ) )\n if len(data) % opt.batch_size != 0:\n n = len(data)\n for i in range(len(data)%opt.batch_size):\n data.insert(n-i-1, data[n-i-1])\n self.enc_batch_list = []\n self.enc_len_batch_list = []\n self.dec_batch_list = []\n p = 0\n while p + opt.batch_size <= len(data):\n # build encoder matrix\n max_len = len(data[p + opt.batch_size - 1][0])\n m_text = torch.zeros((opt.batch_size, max_len + 2), dtype=torch.long)\n if using_gpu:\n m_text = m_text.cuda()\n enc_len_list = []\n # add <S>\n m_text[:,0] = 0\n for i in range(opt.batch_size):\n w_list = data[p + i][0]\n # reversed order\n for j in range(len(w_list)):\n #print(max_len+2)\n m_text[i][j+1] = w_list[len(w_list) - j -1]\n #m_text[i][j+1] = w_list[j]\n # -- add <E> (for encoder, we need dummy <E> at the end)\n for j in range(len(w_list)+1, max_len+2):\n m_text[i][j] = 1\n enc_len_list.append(len(w_list)+2)\n self.enc_batch_list.append(m_text)\n self.enc_len_batch_list.append(enc_len_list)\n # build decoder matrix\n max_len = -1\n for i in range(opt.batch_size):\n w_list = data[p+i][1]\n if len(w_list) > max_len:\n max_len = len(w_list)\n m_text = torch.zeros((opt.batch_size, max_len + 2), dtype=torch.long)\n if using_gpu:\n m_text = m_text.cuda()\n # add <S>\n m_text[:,0] = 0\n for i in range(opt.batch_size):\n w_list = data[p+i][1]\n for j in range(len(w_list)):\n m_text[i][j+1] = w_list[j]\n # add <E>\n m_text[i][len(w_list)+1] = 1\n self.dec_batch_list.append(m_text)\n p += opt.batch_size\n\n self.num_batch = len(self.enc_batch_list)\n assert(len(self.enc_batch_list) == len(self.dec_batch_list))\n\n def random_batch(self):\n p = randint(0,self.num_batch-1)\n return self.enc_batch_list[p], self.enc_len_batch_list[p], self.dec_batch_list[p]\n\n def all_batch(self):\n r = []\n for p in range(self.num_batch):\n r.append([self.enc_batch_list[p], self.enc_len_batch_list[p], self.dec_batch_list[p]])\n return r\n\ndef convert_to_tree(r_list, i_left, i_right, form_manager):\n t = tree.Tree()\n level = 0\n left = -1\n for i in range(i_left, i_right):\n if r_list[i] == form_manager.get_symbol_idx('('):\n if level == 0:\n left = i\n level = level + 1\n elif r_list[i] == form_manager.get_symbol_idx(')'):\n #print(\"closing\")\n level = level -1\n if level == 0:\n if i == left+1:\n c = r_list[i]\n else:\n c = convert_to_tree(r_list, left + 1, i, form_manager)\n #print(\"tree add\")\n t.add_child(c)\n elif level == 0:\n #print(\"child\")\n t.add_child(r_list[i])\n return t\n\ndef norm_tree(r_list, form_manager):\n #print(\"starting norm tree\")\n #print(r_list)\n #test = convert_to_tree(r_list, 0, len(r_list), form_manager)\n #print(\"test\")\n #print(test)\n q = [convert_to_tree(r_list, 0, len(r_list), form_manager)]\n #print(\"after convert\")\n head = 0\n #for t in q:\n while head < len(q):\n #print(\"head; {}, len q: {}\\n\".format(head, len(q)))\n t = q[head]\n #print('string')\n #print(t.to_string())\n #print('num')\n #print(t.num_children)\n #print(form_manager.get_symbol_idx('and')) = 6\n #print(form_manager.get_symbol_idx('or')) =53\n # if this level is \"and/or\" operator\n #print('children')\n #print(t.children)\n if (t.children[0] == form_manager.get_symbol_idx('and')) or (t.children[0] == form_manager.get_symbol_idx('or')):\n # sort the following subchildren\n #k = {}\n k = []\n for i in range(1, len(t.children)):\n if isinstance(t.children[i], tree.Tree):\n #print(\"tree inside and/or if statement\")\n #print(t.children[i].to_string())\n #print('tree child ', t.children[i].to_string())\n #k[t.children[i].to_string()] = i\n k.append((t.children[i].to_string(), i))\n else:\n #print(\"not a tree child\")\n #print('reg child ', str(t.children[i]))\n #k[str(t.children[i])] = i\n k.append((str(t.children[i]), i))\n sorted_t_dict = []\n #print('len k ', len(k))\n k.sort(key=itemgetter(0))\n #for key1 in sorted(k):\n for key1 in k:\n sorted_t_dict.append(t.children[key1[1]])\n #print(len(t.children))\n #print(len(sorted_t_dict))\n #print(\"print sorted\")\n #print(sorted(k))\n #print(sorted_t_dict)\n #print(t.to_string())\n #print(len(t.children))\n #print(t.num_children)\n #print('len ', len(sorted_t_dict))\n #print('dict ', sorted_t_dict)\n #print('num children ', t.num_children)\n for i in range(t.num_children-1):\n #print('i ', i)\n t.children[i+1] = \\\n sorted_t_dict[i]\n # add children to q\n for i in range(len(t.children)):\n if isinstance(t.children[i], tree.Tree):\n #print(\"this is a tree: {}\".format(t.children[i].to_string()))\n q.append(t.children[i])\n\n head = head + 1\n return q[0]\n\n\ndef is_all_same(c1, c2):\n if len(c1) == len(c2):\n all_same = True\n for j in range(len(c1)):\n if c1[j] != c2[j]:\n all_same = False\n break\n return all_same\n else:\n return False\n\ndef compute_accuracy(candidate_list, reference_list):\n if len(candidate_list) != len(reference_list):\n print(\"candidate list has length {}, reference list has length {}\\n\".format(len(candidate_list), len(reference_list)))\n\n len_min = min(len(candidate_list), len(reference_list))\n c = 0\n for i in range(len_min):\n print(candidate_list[i])\n print(reference_list[i])\n if is_all_same(candidate_list[i], reference_list[i]):\n print(\"above was all same\")\n c = c+1\n return c/float(len_min)\n\ndef compute_tree_accuracy(candidate_list_, reference_list_, form_manager):\n candidate_list = []\n for i in range(len(candidate_list_)):\n #print(\"candidate\\n\\n\")\n candidate_list.append(norm_tree(candidate_list_[i], form_manager).to_list(form_manager))\n reference_list = []\n for i in range(len(reference_list_)):\n print(\"reference\\n\\n\")\n reference_list.append(norm_tree(reference_list_[i], form_manager).to_list(form_manager))\n return compute_accuracy(candidate_list, reference_list)\n", "import argparse\nimport time\nimport pickle as pkl\nimport util\nimport os\nimport time\nimport numpy as np\nfrom tree import Tree\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch import optim\nimport random\n\nclass LSTM(nn.Module):\n def __init__(self, opt):\n super(LSTM, self).__init__()\n self.opt = opt\n self.i2h = nn.Linear(opt.rnn_size, 4 * opt.rnn_size)\n self.h2h = nn.Linear(opt.rnn_size, 4*opt.rnn_size)\n if opt.dropoutrec > 0:\n self.dropout = nn.Dropout(opt.dropoutrec)\n\n def forward(self, x, prev_c, prev_h):\n gates = self.i2h(x) \\\n + self.h2h(prev_h)\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n ingate = F.sigmoid(ingate)\n forgetgate = F.sigmoid(forgetgate)\n cellgate = F.tanh(cellgate)\n outgate = F.sigmoid(outgate)\n if self.opt.dropoutrec > 0:\n cellgate = self.dropout(cellgate)\n cy = (forgetgate * prev_c) + (ingate * cellgate)\n hy = outgate * F.tanh(cy) # n_b x hidden_dim\n return cy, hy\n\nclass Dec_LSTM(nn.Module):\n def __init__(self, opt):\n super(Dec_LSTM, self).__init__()\n self.opt = opt\n self.i2h = nn.Linear(2*opt.rnn_size, 4 * opt.rnn_size)\n self.h2h = nn.Linear(opt.rnn_size, 4*opt.rnn_size)\n if opt.dropoutrec > 0:\n self.dropout = nn.Dropout(opt.dropoutrec)\n\n def forward(self, x, prev_c, prev_h, parent_h):\n input_cat = torch.cat((x, parent_h), 1)\n gates = self.i2h(input_cat) \\\n + self.h2h(prev_h)\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n ingate = F.sigmoid(ingate)\n forgetgate = F.sigmoid(forgetgate)\n cellgate = F.tanh(cellgate)\n outgate = F.sigmoid(outgate)\n if self.opt.dropoutrec > 0:\n cellgate = self.dropout(cellgate)\n cy = (forgetgate * prev_c) + (ingate * cellgate)\n hy = outgate * F.tanh(cy) # n_b x hidden_dim\n return cy, hy\n\nclass EncoderRNN(nn.Module):\n def __init__(self, opt, input_size):\n super(EncoderRNN, self).__init__()\n self.opt = opt\n self.hidden_size = opt.rnn_size\n self.embedding = nn.Embedding(input_size, self.hidden_size)\n self.lstm = LSTM(self.opt)\n if opt.dropout > 0:\n self.dropout = nn.Dropout(opt.dropout)\n\n def forward(self, input_src, prev_c, prev_h):\n src_emb = self.embedding(input_src) # batch_size x src_length x emb_size\n if self.opt.dropout > 0:\n src_emb = self.dropout(src_emb)\n prev_cy, prev_hy = self.lstm(src_emb, prev_c, prev_h)\n return prev_cy, prev_hy\n\nclass DecoderRNN(nn.Module):\n def __init__(self, opt, input_size):\n super(DecoderRNN, self).__init__()\n self.opt = opt\n self.hidden_size = opt.rnn_size\n self.embedding = nn.Embedding(input_size, self.hidden_size)\n self.lstm = Dec_LSTM(self.opt)\n if opt.dropout > 0:\n self.dropout = nn.Dropout(opt.dropout)\n\n def forward(self, input_src, prev_c, prev_h, parent_h):\n src_emb = self.embedding(input_src) # batch_size x src_length x emb_size\n if self.opt.dropout > 0:\n src_emb = self.dropout(src_emb)\n prev_cy, prev_hy = self.lstm(src_emb, prev_c, prev_h, parent_h)\n return prev_cy, prev_hy\n\nclass AttnUnit(nn.Module):\n def __init__(self, opt, output_size):\n super(AttnUnit, self).__init__()\n self.opt = opt\n self.hidden_size = opt.rnn_size\n\n self.linear_att = nn.Linear(2*self.hidden_size, self.hidden_size)\n self.linear_out = nn.Linear(self.hidden_size, output_size)\n if opt.dropout > 0:\n self.dropout = nn.Dropout(opt.dropout)\n\n self.softmax = nn.Softmax(dim=1)\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, enc_s_top, dec_s_top):\n # (batch*length*hidden) * (batch * hidden * 1) = (batch*length*1)\n #print(\"enc_s_top: {}\\n\".format(enc_s_top.size()))\n #print(\"dec_s_top: {}\\n\".format(dec_s_top.size()))\n dot = torch.bmm(enc_s_top, dec_s_top.unsqueeze(2))\n #dot = torch.legacy.nn.MM()((enc_s_top, torch.legacy.nn.View(opt.rnn_size,1).setNumInputDims(0)(dec_s_top)))\n #print(\"dot size: {}\\n\".format(dot.size()))\n attention = self.softmax(dot.squeeze(2)).unsqueeze(2)\n #print(\"attention size: {}\\n\".format(attention.size()))\n\n #(batch*length*H)^T * (batch*length*1) = (batch*H*1)\n enc_attention = torch.bmm(enc_s_top.permute(0,2,1), attention)\n hid = F.tanh(self.linear_att(torch.cat((enc_attention.squeeze(2),dec_s_top), 1)))\n h2y_in = hid\n if self.opt.dropout > 0:\n h2y_in = self.dropout(h2y_in)\n h2y = self.linear_out(h2y_in)\n pred = self.logsoftmax(h2y)\n return pred\n\ndef eval_training(opt, train_loader, encoder, decoder, attention_decoder, encoder_optimizer, decoder_optimizer, attention_decoder_optimizer, criterion, using_gpu, word_manager, form_manager):\n encoder_optimizer.zero_grad()\n decoder_optimizer.zero_grad()\n attention_decoder_optimizer.zero_grad()\n enc_batch, enc_len_batch, dec_tree_batch = train_loader.random_batch()\n enc_max_len = enc_batch.size(1)\n\n enc_outputs = torch.zeros((enc_batch.size(0), enc_max_len, encoder.hidden_size), requires_grad=True)\n if using_gpu:\n enc_outputs = enc_outputs.cuda()\n enc_s = {}\n for j in range(opt.enc_seq_length + 1):\n enc_s[j] = {}\n\n dec_s = {}\n for i in range(opt.dec_seq_length + 1):\n dec_s[i] = {}\n for j in range(opt.dec_seq_length + 1):\n dec_s[i][j] = {}\n\n for i in range(1, 3):\n enc_s[0][i] = torch.zeros((opt.batch_size, opt.rnn_size), dtype=torch.float, requires_grad=True)\n if using_gpu:\n enc_s[0][i] = enc_s[0][i].cuda()\n\n for i in range(enc_max_len):\n enc_s[i+1][1], enc_s[i+1][2] = encoder(enc_batch[:,i], enc_s[i][1], enc_s[i][2])\n enc_outputs[:, i, :] = enc_s[i+1][2]\n\n # tree decode\n queue_tree = {}\n for i in range(1, opt.batch_size+1):\n queue_tree[i] = []\n #string1 = dec_tree_batch[i-1].to_string()\n #print(string1)\n queue_tree[i].append({\"tree\" : dec_tree_batch[i-1], \"parent\": 0, \"child_index\": 1})\n loss = 0\n cur_index, max_index = 1,1\n dec_batch = {}\n #print(queue_tree[1][0][\"tree\"].to_string());exit()\n while (cur_index <= max_index):\n #print(cur_index)\n # build dec_batch for cur_index\n max_w_len = -1\n batch_w_list = []\n for i in range(1, opt.batch_size+1):\n w_list = []\n if (cur_index <= len(queue_tree[i])):\n t = queue_tree[i][cur_index - 1][\"tree\"]\n for ic in range (t.num_children):\n #print(\"children \")\n #print(ic +1)\n if isinstance(t.children[ic], Tree):\n #print(\"appending\")\n w_list.append(3)\n queue_tree[i].append({\"tree\" : t.children[ic], \"parent\" : cur_index, \"child_index\": ic + 1})\n else:\n w_list.append(t.children[ic])\n if len(queue_tree[i]) > max_index:\n max_index = len(queue_tree[i])\n if len(w_list) > max_w_len:\n max_w_len = len(w_list)\n batch_w_list.append(w_list)\n dec_batch[cur_index] = torch.zeros((opt.batch_size, max_w_len + 2), dtype=torch.long)\n for i in range(opt.batch_size):\n w_list = batch_w_list[i]\n if len(w_list) > 0:\n for j in range(len(w_list)):\n dec_batch[cur_index][i][j+1] = w_list[j]\n # add <S>, <E>\n if cur_index == 1:\n dec_batch[cur_index][i][0] = 0\n else:\n dec_batch[cur_index][i][0] = form_manager.get_symbol_idx('(')\n dec_batch[cur_index][i][len(w_list) + 1] = 1\n #print(dec_batch[cur_index])\n # initialize first decoder unit hidden state (zeros)\n if using_gpu:\n dec_batch[cur_index] = dec_batch[cur_index].cuda()\n # initialize using encoding results\n for j in range(1, 3):\n dec_s[cur_index][0][j] = torch.zeros((opt.batch_size, opt.rnn_size), dtype=torch.float, requires_grad=True)\n if using_gpu:\n dec_s[cur_index][0][j] = dec_s[cur_index][0][j].cuda()\n\n if cur_index == 1:\n for i in range(opt.batch_size):\n dec_s[1][0][1][i, :] = enc_s[enc_len_batch[i]][1][i, :]\n dec_s[1][0][2][i, :] = enc_s[enc_len_batch[i]][2][i, :]\n\n else:\n for i in range(1, opt.batch_size+1):\n if (cur_index <= len(queue_tree[i])):\n par_index = queue_tree[i][cur_index - 1][\"parent\"]\n child_index = queue_tree[i][cur_index - 1][\"child_index\"]\n #print(\"parent child\")\n #print(par_index)\n #print(child_index)\n dec_s[cur_index][0][1][i-1,:] = \\\n dec_s[par_index][child_index][1][i-1,:]\n dec_s[cur_index][0][2][i-1,:] = dec_s[par_index][child_index][2][i-1,:]\n #loss = 0\n #prev_c, prev_h = dec_s[cur_index, 0, 0,:,:], dec_s[cur_index, 0, 1,:,:]\n #pred_matrix = np.ndarray((20, dec_batch[cur_index].size(1)-1), dtype=object)\n gold_string = \" \"\n parent_h = dec_s[cur_index][0][2]\n for i in range(dec_batch[cur_index].size(1) - 1):\n #print(i)\n dec_s[cur_index][i+1][1], dec_s[cur_index][i+1][2] = decoder(dec_batch[cur_index][:,i], dec_s[cur_index][i][1], dec_s[cur_index][i][2], parent_h)\n pred = attention_decoder(enc_outputs, dec_s[cur_index][i+1][2])\n #print(dec_batch[cur_index][:,i+1])\n #pred_max = pred.argmax(1)\n #pred_ints = [int(p) for p in pred_max]\n #gold = dec_batch[cur_index][:,i+1]\n #gold_ints = [int(p) for p in gold]\n ##print(gold_ints)\n ##print(\"prediction:\")\n ##print(pred_max)\n ##print(dec_batch[cur_index][:,i+1])\n ##pred_strings = [form_manager.get_idx_symbol(int(p)) for p in pred_max]\n ##gold_strings = [form_manager.get_idx_symbol(int(p)) for p in dec_batch[cur_index][:,i+1]]\n #gold_string += form_manager.get_idx_symbol(int(dec_batch[cur_index][0,i+1]))\n #gold_string += \" \"\n ##print(\"i: \")\n ##print(i)\n ##print(pred_strings)\n #print(gold_strings)\n #pred_matrix[:,i] = pred_strings\n #pred, prev_c, prev_h = decoder(dec_batch[cur_index][:,i], dec_s[cur_index, i, 0, :,:], dec_s[cur_index, i, 1, :, :]);\n #dec_s[cur_index, i+1, 0,:,:], dec_s[cur_index, i+1, 1,:,:] = prev_c.clone(), prev_h.clone()\n loss += criterion(pred, dec_batch[cur_index][:,i+1])\n #print(\"start\")\n #print(gold_string)\n #print(\"between\")\n #print(dec_batch[cur_index][0, i+1])\n #print(\"end\")\n\n cur_index = cur_index + 1\n #input_string = [form_manager.get_idx_symbol(int(p)) for p in enc_batch[0,:]]\n #print(\"===========\\n\")\n #print(\"input string: {}\\n\".format(input_string))\n #print(\"predicted string: {}\\n\".format(pred_matrix[0,:]))\n #print(\"===========\\n\")\n\n loss = loss / opt.batch_size\n loss.backward()\n torch.nn.utils.clip_grad_value_(encoder.parameters(),opt.grad_clip)\n torch.nn.utils.clip_grad_value_(decoder.parameters(),opt.grad_clip)\n torch.nn.utils.clip_grad_value_(attention_decoder.parameters(),opt.grad_clip)\n encoder_optimizer.step()\n decoder_optimizer.step()\n attention_decoder_optimizer.step()\n #print(\"end eval training \\n \")\n #print(\"=====================\\n\")\n return loss\n\n\ndef main(opt):\n random.seed(opt.seed)\n np.random.seed(opt.seed)\n torch.manual_seed(opt.seed)\n managers = pkl.load( open(\"{}/map.pkl\".format(opt.data_dir), \"rb\" ) )\n word_manager, form_manager = managers\n using_gpu = False\n if opt.gpuid > -1:\n using_gpu = True\n encoder = EncoderRNN(opt, word_manager.vocab_size)\n decoder = DecoderRNN(opt, form_manager.vocab_size)\n attention_decoder = AttnUnit(opt, form_manager.vocab_size)\n if using_gpu:\n encoder = encoder.cuda()\n decoder = decoder.cuda()\n attention_decoder = attention_decoder.cuda()\n # init parameters\n for name, param in encoder.named_parameters():\n if param.requires_grad:\n init.uniform_(param, -opt.init_weight, opt.init_weight)\n for name, param in decoder.named_parameters():\n if param.requires_grad:\n init.uniform_(param, -opt.init_weight, opt.init_weight)\n for name, param in attention_decoder.named_parameters():\n if param.requires_grad:\n init.uniform_(param, -opt.init_weight, opt.init_weight)\n \"\"\"model_parameters = filter(lambda p: p.requires_grad, encoder.parameters())\n params_encoder = sum([np.prod(p.size()) for p in model_parameters])\n model_parameters = filter(lambda p: p.requires_grad, decoder.parameters())\n params_decoder = sum([np.prod(p.size()) for p in model_parameters])\n model_parameters = filter(lambda p: p.requires_grad, attention_decoder.parameters())\n params_attention_decoder = sum([np.prod(p.size()) for p in model_parameters])\n print(params_encoder + params_decoder+ params_attention_decoder);exit()\"\"\"\n # 926255 as in DL\n\n ##-- load data\n train_loader = util.MinibatchLoader(opt, 'train', using_gpu)\n\n if not os.path.exists(opt.checkpoint_dir):\n os.makedirs(opt.checkpoint_dir)\n\n ##-- start training\n step = 0\n epoch = 0\n optim_state = {\"learningRate\" : opt.learning_rate, \"alpha\" : opt.decay_rate}\n # default to rmsprop\n if opt.opt_method == 0:\n print(\"using RMSprop\")\n encoder_optimizer = optim.RMSprop(encoder.parameters(), lr=optim_state[\"learningRate\"], alpha=optim_state[\"alpha\"])\n decoder_optimizer = optim.RMSprop(decoder.parameters(), lr=optim_state[\"learningRate\"], alpha=optim_state[\"alpha\"])\n attention_decoder_optimizer = optim.RMSprop(attention_decoder.parameters(), lr=optim_state[\"learningRate\"], alpha=optim_state[\"alpha\"])\n criterion = nn.NLLLoss(size_average=False, ignore_index=0)\n\n print(\"Starting training.\")\n encoder.train()\n decoder.train()\n attention_decoder.train()\n iterations = opt.max_epochs * train_loader.num_batch\n start_time = time.time()\n restarted = False\n # TODO revert back after tests\n #iterations = 2\n for i in range(iterations):\n epoch = i // train_loader.num_batch\n train_loss = eval_training(opt, train_loader, encoder, decoder, attention_decoder, encoder_optimizer, decoder_optimizer, attention_decoder_optimizer, criterion, using_gpu, word_manager, form_manager)\n #exponential learning rate decay\n if opt.opt_method == 0:\n if i % train_loader.num_batch == 0 and opt.learning_rate_decay < 1:\n if epoch >= opt.learning_rate_decay_after:\n decay_factor = opt.learning_rate_decay\n optim_state[\"learningRate\"] = optim_state[\"learningRate\"] * decay_factor #decay it\n for param_group in encoder_optimizer.param_groups:\n param_group['lr'] = optim_state[\"learningRate\"]\n for param_group in decoder_optimizer.param_groups:\n param_group['lr'] = optim_state[\"learningRate\"]\n for param_group in attention_decoder_optimizer.param_groups:\n param_group['lr'] = optim_state[\"learningRate\"]\n if (epoch == opt.restart) and not restarted:\n restarted = True\n optim_state[\"learningRate\"] = opt.learning_rate\n for param_group in encoder_optimizer.param_groups:\n param_group['lr'] = optim_state[\"learningRate\"]\n param_group['momentum'] = 0\n for param_group in decoder_optimizer.param_groups:\n param_group['lr'] = optim_state[\"learningRate\"]\n param_group['momentum'] = 0\n for param_group in attention_decoder_optimizer.param_groups:\n param_group['lr'] = optim_state[\"learningRate\"]\n param_group['momentum'] = 0\n\n\n if i % opt.print_every == 0:\n end_time = time.time()\n print(\"{}/{}, train_loss = {}, time since last print = {}\".format( i, iterations, train_loss, (end_time - start_time)/60))\n start_time = time.time()\n\n #on last iteration\n if i == iterations -1:\n checkpoint = {}\n checkpoint[\"encoder\"] = encoder\n checkpoint[\"decoder\"] = decoder\n checkpoint[\"attention_decoder\"] = attention_decoder\n checkpoint[\"opt\"] = opt\n checkpoint[\"i\"] = i\n checkpoint[\"epoch\"] = epoch\n torch.save(checkpoint, \"{}/model_seq2seq\".format(opt.checkpoint_dir))\n\n if train_loss != train_loss:\n print('loss is NaN. This usually indicates a bug.')\n break\n\nif __name__ == \"__main__\":\n start = time.time()\n main_arg_parser = argparse.ArgumentParser(description=\"parser\")\n main_arg_parser.add_argument('-gpuid', type=int, default=0, help='which gpu to use. -1 = use CPU')\n main_arg_parser.add_argument('-data_dir', type=str, default='../data/', help='data path')\n main_arg_parser.add_argument('-seed',type=int,default=123,help='torch manual random number generator seed')\n main_arg_parser.add_argument('-checkpoint_dir',type=str, default= 'checkpoint_dir', help='output directory where checkpoints get written')\n main_arg_parser.add_argument('-savefile',type=str, default='save',help='filename to autosave the checkpont to. Will be inside checkpoint_dir/')\n main_arg_parser.add_argument('-print_every',type=int, default=2000,help='how many steps/minibatches between printing out the loss')\n main_arg_parser.add_argument('-rnn_size', type=int,default=200, help='size of LSTM internal state')\n main_arg_parser.add_argument('-num_layers', type=int, default=1, help='number of layers in the LSTM')\n main_arg_parser.add_argument('-dropout',type=float, default=0.3,help='dropout for regularization, used after each RNN hidden layer. 0 = no dropout')\n main_arg_parser.add_argument('-dropoutrec',type=float,default=0.3,help='dropout for regularization, used after each c_i. 0 = no dropout')\n main_arg_parser.add_argument('-enc_seq_length',type=int, default=60,help='number of timesteps to unroll for')\n main_arg_parser.add_argument('-dec_seq_length',type=int, default=220,help='number of timesteps to unroll for')\n main_arg_parser.add_argument('-batch_size',type=int, default=20,help='number of sequences to train on in parallel')\n #main_arg_parser.add_argument('-batch_size',type=int, default=2,help='number of sequences to train on in parallel')\n main_arg_parser.add_argument('-max_epochs',type=int, default=130,help='number of full passes through the training data')\n main_arg_parser.add_argument('-opt_method', type=int,default=0,help='optimization method: 0-rmsprop 1-sgd')\n main_arg_parser.add_argument('-learning_rate',type=float, default=0.007,help='learning rate')\n main_arg_parser.add_argument('-init_weight',type=float, default=0.08,help='initailization weight')\n main_arg_parser.add_argument('-learning_rate_decay',type=float, default=0.98,help='learning rate decay')\n main_arg_parser.add_argument('-learning_rate_decay_after',type=int, default=5,help='in number of epochs, when to start decaying the learning rate')\n main_arg_parser.add_argument('-restart',type=int, default=-1,help='in number of epochs, when to restart the optimization')\n main_arg_parser.add_argument('-decay_rate',type=float, default=0.95,help='decay rate for rmsprop')\n main_arg_parser.add_argument('-grad_clip',type=int, default=5,help='clip gradients at this value')\n\n args = main_arg_parser.parse_args()\n main(args)\n end = time.time()\n print(\"total time: {} minutes\\n\".format((end - start)/60))\n" ]
[ [ "torch.zeros" ], [ "torch.nn.Softmax", "torch.nn.NLLLoss", "torch.nn.LogSoftmax", "torch.nn.Dropout", "torch.nn.init.uniform_", "numpy.random.seed", "torch.cat", "torch.zeros", "torch.manual_seed", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.functional.sigmoid", "torch.nn.functional.tanh" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jejjohnson/2019_rbig_rs
[ "00df5c623d55895e0b43a4130bb6c601fae84890", "00df5c623d55895e0b43a4130bb6c601fae84890", "00df5c623d55895e0b43a4130bb6c601fae84890" ]
[ "src/features/preprocessing.py", "src/experiments/drought/compare_smadi.py", "src/experiments/spatemp/entropy_earth.py" ]
[ "from typing import Tuple, Optional\n\nimport pandas as pd\nimport xarray as xr\nfrom sklearn.preprocessing import StandardScaler\n\nLEVELS = [\"time\", \"lat\", \"lon\"]\n\n# @task # get reference cube\ndef get_reference_cube(data: xr.DataArray) -> pd.DataFrame:\n \"\"\"Wrapper Function to get reference cube\"\"\"\n return data.to_dataframe().dropna().reorder_levels(LEVELS)\n\n\ndef get_common_indices(\n reference_df: pd.DataFrame, density_df: pd.DataFrame\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n idx = density_df.index.intersection(reference_df.index)\n return reference_df.loc[idx, :], density_df.loc[idx, :]\n\n\ndef standardizer_data(\n X: pd.DataFrame, Y: Optional[pd.DataFrame] = None\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Standardize the data\"\"\"\n normalizer = StandardScaler(with_mean=True, with_std=True)\n\n # standardize X values\n X = normalizer.fit_transform(X)\n # X = pd.DataFrame(data=X_values, index=X.index, columns=X.columns)\n\n # standardize Y Values\n Y = normalizer.fit_transform(Y)\n # Y = pd.DataFrame(data=Y_values, index=Y.index, columns=Y.columns)\n\n return X, Y\n\n\n# ----------------------------------------------------------\n# Matching Temporal Resolutions\n# ----------------------------------------------------------\n\n# TODO: Check TommyLee Scripts\n# https://github.com/tommylees112/esowc_notes/blob/master/src/preprocessing_utils.py\n# TODO: Get Union TimeSlice\n", "import sys, os\nfrom pyprojroot import here\nimport logging\nimport pathlib\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\nlogger = logging.getLogger()\n\nPATH = pathlib.Path(str(here()))\n# root = here(project_files=[\".here\"])\nsys.path.append(str(here()))\n\n\nimport argparse\nimport numpy as np\n\n# drought tools\nfrom src.data.drought.loader import DataLoader\nfrom src.features.drought.build_features import (\n get_cali_geometry,\n mask_datacube,\n remove_climatology,\n get_density_cubes,\n get_common_elements_many,\n)\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom src.models.train_models import get_similarity_scores\nfrom src.models.similarity import univariate_stats\nfrom tqdm import tqdm\nfrom src.features.utils import subset_indices\nfrom scipy import stats\nfrom src.experiments.utils import dict_product\nimport itertools\n\nRES_PATH = PATH.joinpath(\"data/drought/results/compare\")\n\n\ndef main(args):\n\n # get save name\n SAVE_NAME = RES_PATH.joinpath(\n args.save + f\"_t{args.temporal}_s{args.spatial}_c{args.compare}_smadi.csv\"\n )\n SMOKE_NAME = RES_PATH.joinpath(\n args.save + f\"_t{args.temporal}_s{args.spatial}_c{args.compare}_smadi_sm.csv\"\n )\n\n # Load data\n logger.info(\"Loading datacube...\")\n drought_cube = DataLoader().load_data(args.region, args.sampling)\n\n # get cali geometry\n logger.info(\"Getting shapefile...\")\n if args.region in [\"conus\"]:\n shape_file = get_cali_geometry()\n else:\n raise ValueError(\"Unrecognized region.\")\n\n # subset datacube with cali\n logger.info(f\"Masking dataset with {args.region} shapefile.\")\n drought_cube = mask_datacube(drought_cube, shape_file)\n\n # do interpolation\n logger.info(f\"Interpolating time dims with {args.interp_method} method\")\n drought_cube = drought_cube.interpolate_na(dim=\"time\", method=args.interp_method)\n\n # Remove climatology\n logger.info(f\"Removing climatology\")\n drought_cube, _ = remove_climatology(drought_cube)\n\n # drought_years\n drought_years = {\n \"2010\": False,\n \"2011\": False,\n \"2012\": True,\n \"2013\": False,\n \"2014\": True,\n \"2015\": True,\n }\n # # MI elements\n # variables_names = [\"VOD\", \"NDVI\", \"LST\", \"SM\"]\n\n # ========================\n # Experimental Parameters\n # ========================\n parameters = {}\n parameters[\"cubes\"] = list(drought_cube.groupby(\"time.year\"))\n parameters[\"temporal\"] = np.arange(1, args.temporal + 1)\n parameters[\"spatial\"] = np.arange(1, args.spatial + 1)\n\n parameters = list(dict_product(parameters))\n\n results_df_single = pd.DataFrame()\n if args.smoke_test:\n\n iparams = parameters[0]\n # extract density cubes\n vod_df, lst_df, ndvi_df, sm_df = get_density_cubes(\n iparams[\"cubes\"][1], iparams[\"spatial\"], iparams[\"temporal\"]\n )\n\n # get common elements\n dfs = get_common_elements_many([vod_df, lst_df, ndvi_df, sm_df])\n\n variables = {\"VOD\": dfs[0], \"NDVI\": dfs[1], \"SM\": dfs[2], \"LST\": dfs[3]}\n\n # get unique permutations\n res = set(\n tuple(\n frozenset(sub)\n for sub in set(\n list(itertools.permutations(variables.keys(), args.compare))\n )\n )\n )\n var_set1 = pd.concat(\n [variables[\"NDVI\"], variables[\"SM\"], variables[\"LST\"]], axis=1\n )\n var_set2 = pd.concat(\n [variables[\"NDVI\"], variables[\"SM\"], variables[\"LST\"], variables[\"VOD\"],],\n axis=1,\n )\n # print(var_set1.shape, var_set2.shape)\n\n # Univariate statistics (pearson, spearman, kendall's tau)\n # uni_stats = univariate_stats(X_norm, Y_norm)\n logger.info(f\"Subsetting data\")\n idx = subset_indices(var_set1, subsample=1_000)\n\n # standardize data\n logger.info(f\"Standardizing Data...\")\n X_norm = StandardScaler().fit_transform(var_set1.iloc[idx, :])\n Y_norm = StandardScaler().fit_transform(var_set2.iloc[idx, :])\n logger.info(f\"Data inputs: {X_norm.shape},{Y_norm.shape}\")\n\n # entropy, total correlation\n logger.info(f\"Getting similarity scores...\")\n multivar_stats = get_similarity_scores(X_norm, Y_norm, verbose=1)\n\n # get H and TC\n results_df_single = results_df_single.append(\n {\n \"year\": iparams[\"cubes\"][0],\n \"drought\": drought_years[str(iparams[\"cubes\"][0])],\n \"samples\": X_norm.shape[0],\n \"temporal\": iparams[\"temporal\"],\n \"variable1\": \"SMADI\",\n \"variable2\": \"SMADI+\",\n **multivar_stats,\n },\n ignore_index=True,\n )\n\n results_df_single.to_csv(SMOKE_NAME)\n else:\n with tqdm(parameters) as params:\n for iparams in params:\n # Update progress bar\n postfix = dict(\n Year=f\"{iparams['cubes'][0]}\",\n Temporal=f\"{iparams['temporal']}\",\n Spatial=f\"{iparams['spatial']}\",\n )\n params.set_postfix(postfix)\n\n # extract density cubes\n vod_df, lst_df, ndvi_df, sm_df = get_density_cubes(\n iparams[\"cubes\"][1], iparams[\"spatial\"], iparams[\"temporal\"]\n )\n\n # get common elements\n dfs = get_common_elements_many([vod_df, lst_df, ndvi_df, sm_df])\n\n variables = {\"VOD\": dfs[0], \"NDVI\": dfs[1], \"SM\": dfs[2], \"LST\": dfs[3]}\n\n # get unique permutations\n var_set1 = pd.concat(\n [variables[\"NDVI\"], variables[\"SM\"], variables[\"LST\"]], axis=1\n )\n var_set2 = pd.concat(\n [\n variables[\"NDVI\"],\n variables[\"SM\"],\n variables[\"LST\"],\n variables[\"VOD\"],\n ],\n axis=1,\n )\n # print(var_set1.shape, var_set2.shape)\n\n # logger.info(f\"Subsetting data\")\n if args.subsample < var_set1.values.shape[0]:\n idx = subset_indices(var_set1.values, subsample=args.subsample)\n var_set1 = var_set1.iloc[idx, :]\n var_set2 = var_set2.iloc[idx, :]\n # standardize data\n # logger.info(f\"Standardizing Data...\")\n X_norm = StandardScaler().fit_transform(var_set1.values)\n Y_norm = StandardScaler().fit_transform(var_set2.values)\n # logger.info(f\"Data inputs: {X_norm.shape},{Y_norm.shape}\")\n\n # entropy, total correlation\n # logger.info(f\"Getting similarity scores...\")\n multivar_stats = get_similarity_scores(X_norm, Y_norm, verbose=0)\n\n # get H and TC\n results_df_single = results_df_single.append(\n {\n \"year\": iparams[\"cubes\"][0],\n \"drought\": drought_years[str(iparams[\"cubes\"][0])],\n \"samples\": X_norm.shape[0],\n \"temporal\": iparams[\"temporal\"],\n \"variable1\": \"SMADI\",\n \"variable2\": \"SMADI+\",\n **multivar_stats,\n },\n ignore_index=True,\n )\n\n results_df_single.to_csv(SAVE_NAME)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Arguments for Drought Experiment.\")\n\n # DataCube Arguments\n parser.add_argument(\n \"--region\", default=\"conus\", type=str, help=\"The region for the drought events.\"\n )\n parser.add_argument(\n \"--sampling\",\n default=\"14D\",\n type=str,\n help=\"The sampling scheme for drought events.\",\n )\n\n # PreProcessing Arguments\n parser.add_argument(\n \"--interp_method\", default=\"linear\", type=str, help=\"Interpolation method.\"\n )\n\n # Climatology Arguments\n parser.add_argument(\n \"--climatology_window\",\n default=2,\n type=int,\n help=\"Window length for climatology.\",\n )\n parser.add_argument(\n \"--subsample\", type=int, default=10_000, help=\"subset points to take\"\n )\n parser.add_argument(\n \"-c\", \"--compare\", type=int, default=2, help=\"variables to compare\"\n )\n parser.add_argument(\n \"-t\",\n \"--temporal\",\n type=int,\n default=12,\n help=\"Max number of temporal dimensions\",\n )\n parser.add_argument(\n \"-s\", \"--spatial\", type=int, default=1, help=\"Max number of spatial dimensions\"\n )\n # logistics\n parser.add_argument(\n \"--save\", default=\"drought_v0\", type=str, help=\"Save Name for data results.\",\n )\n parser.add_argument(\"-sm\", \"--smoke_test\", action=\"store_true\")\n main(parser.parse_args())\n", "import sys, os\nfrom pyprojroot import here\n\nroot = here(project_files=[\".here\"])\nsys.path.append(str(here()))\n\nfrom typing import Dict, Tuple, Optional, Union, Any\nfrom collections import namedtuple\n\nimport pathlib\nimport argparse\nimport pandas as pd\nfrom tqdm import tqdm\nimport numpy as np\nimport time\nimport joblib\nimport xarray as xr\n\n# Experiment Functions\nfrom src.data.esdc import get_dataset\nfrom src.features.temporal import select_period, TimePeriod\nfrom src.features.spatial import (\n select_region,\n get_spain,\n get_europe,\n get_northern_hemisphere,\n get_southern_hemisphere,\n)\nfrom sklearn.preprocessing import StandardScaler\nfrom src.features.temporal import remove_climatology\nfrom src.experiments.utils import dict_product, run_parallel_step\nfrom src.features.density import get_density_cubes\nfrom src.features.utils import subset_indices\nfrom src.models.similarity import rbig_h_measures\nfrom src.features.preprocessing import (\n standardizer_data,\n get_reference_cube,\n get_common_indices,\n)\n\nimport logging\n\nlogging.basicConfig(\n level=logging.INFO,\n stream=sys.stdout,\n format=f\"%(asctime)s: %(levelname)s: %(message)s\",\n)\nlogger = logging.getLogger()\n# logger.setLevel(logging.INFO)\n\nSPATEMP = namedtuple(\"SPATEMP\", [\"spatial\", \"temporal\", \"dimensions\"])\n\n\nRES_PATH = pathlib.Path(str(root)).joinpath(\"data/spa_temp/info_earth/entropy\")\n\n\ndef get_parameters(args) -> Dict:\n\n parameters = {}\n # ======================\n # Variable\n # ======================\n if args.variable == \"gpp\":\n\n parameters[\"variable\"] = [\"gross_primary_productivity\"]\n\n elif args.variable == \"sm\":\n\n parameters[\"variable\"] = [\"soil_moisture\"]\n\n elif args.variable == \"lst\":\n\n parameters[\"variable\"] = [\"land_surface_temperature\"]\n\n elif args.variable == \"lai\":\n\n parameters[\"variable\"] = [\"leaf_area_index\"]\n\n elif args.variable == \"rm\":\n\n parameters[\"variable\"] = [\"root_moisture\"]\n\n elif args.variable == \"precip\":\n\n parameters[\"variable\"] = [\"precipitation\"]\n\n else:\n raise ValueError(\"Unrecognized variable\")\n\n # ======================\n # Region\n # ======================\n if args.region == \"spain\":\n parameters[\"region\"] = [get_spain()]\n elif args.region == \"europe\":\n parameters[\"region\"] = [get_europe()]\n elif args.region == \"world\":\n parameters[\"region\"] = [\"world\"]\n elif args.region == \"north\":\n parameters[\"region\"] = [get_northern_hemisphere()]\n elif args.region == \"south\":\n parameters[\"region\"] = [get_southern_hemisphere()]\n else:\n raise ValueError(\"Unrecognized region\")\n\n # ======================\n # Period\n # ======================\n if args.period == \"2010\":\n\n parameters[\"period\"] = [\n TimePeriod(name=\"2010\", start=\"Jan-2010\", end=\"Dec-2010\")\n ]\n\n elif args.period == \"2002_2010\":\n parameters[\"period\"] = [\n TimePeriod(name=\"2002_2010\", start=\"Jan-2002\", end=\"Dec-2010\")\n ]\n if args.resample:\n spatial_dimensions = [\n 1, # 1 dimension\n 2,\n 1, # 4 Dimensions\n 3,\n 2,\n 2,\n 1, # 9 Dimensions\n 4,\n 3,\n 2,\n 1, # 16 total dimensions\n ]\n temporal_dimensions = [\n 1, # 1 dimension\n 1,\n 4, # 4 dimensions\n 1,\n 2,\n 3,\n 9, # 9 dimensions\n 1,\n 2,\n 4,\n 12, # 16 dimensions\n ]\n n_dimensions = [\n 1,\n 4,\n 4, # 4 dimensions\n 9,\n 9,\n 9,\n 9, # 9 dimensions\n 16,\n 16,\n 16,\n 16, # 16 dimensions\n ]\n else:\n spatial_dimensions = [\n 1,\n 2,\n 1, # 4 Dimensions\n 3,\n 2,\n 1, # 9 Dimensions\n 4,\n 3,\n 2,\n 1, # 16 total dimensions\n 5,\n 3,\n 2,\n 1, # 25 total dimensions\n 6,\n 4,\n 3,\n 2,\n 1, # 36 total dimensions\n 7,\n 5,\n 4,\n 3,\n 2,\n 1, # 49 total dimensions\n ]\n temporal_dimensions = [\n 1,\n 1,\n 4, # 4 dimensions\n 1,\n 2,\n 9, # 9 dimensions\n 1,\n 2,\n 4,\n 16, # 16 dimensions\n 1,\n 3,\n 6,\n 25, # 25 dimensions\n 1,\n 2,\n 4,\n 9,\n 36, # 36 dimensions\n 1,\n 2,\n 3,\n 5,\n 12,\n 46, # 49 dimensions\n ]\n n_dimensions = [\n 1,\n 4,\n 4, # 4 dimensions\n 9,\n 9,\n 9, # 9 dimensions\n 16,\n 16,\n 16,\n 16, # 16 dimensions\n 25,\n 25,\n 25,\n 25, # 25 dimensions\n 36,\n 36,\n 36,\n 36,\n 36, # 36 dimensions\n 49,\n 49,\n 49,\n 49,\n 49,\n 49, # 49 dimensions\n ]\n parameters[\"dimensions\"] = [\n SPATEMP(i, j, k)\n for i, j, k in zip(spatial_dimensions, temporal_dimensions, n_dimensions)\n ]\n parameters = list(dict_product(parameters))\n return parameters\n\n\ndef experiment_step(parameters: Dict, args: argparse.Namespace,) -> pd.DataFrame:\n\n # ======================\n # experiment - Data\n # ======================\n # Get DataCube\n datacube = get_dataset([parameters[\"variable\"]])\n\n # ======================\n # RESAMPLE\n # ======================\n if args.resample:\n datacube = datacube.resample(time=args.resample).mean()\n\n # ======================\n # SPATIAL SUBSET\n # ======================\n if parameters[\"region\"] not in [\"world\"]:\n region_name = parameters[\"region\"].name\n datacube = select_region(xr_data=datacube, bbox=parameters[\"region\"])[\n parameters[\"variable\"]\n ]\n else:\n region_name = \"world\"\n\n # ======================\n # CLIMATOLOGY (TEMPORAL)\n # ======================\n if args.remove_climatology:\n datacube, _ = remove_climatology(datacube)\n # print(type(datacube))\n #\n # ======================\n # TEMPORAL SUBSET\n # ======================\n datacube = select_period(xr_data=datacube, period=parameters[\"period\"])\n\n # ======================\n # DENSITY CUBES\n # ======================\n if isinstance(datacube, xr.Dataset):\n # print(type(datacube))\n datacube = datacube[parameters[\"variable\"]]\n\n density_cube_df = get_density_cubes(\n data=datacube,\n spatial=parameters[\"dimensions\"].spatial,\n temporal=parameters[\"dimensions\"].temporal,\n )\n\n # ======================\n # STANDARDIZE DATA\n # ======================\n x_transformer = StandardScaler().fit(density_cube_df.values)\n\n density_cube_df_norm = pd.DataFrame(\n data=x_transformer.transform(density_cube_df.values),\n columns=density_cube_df.columns.values,\n index=density_cube_df.index,\n )\n # ======================\n # SUBSAMPLE DATA\n # ======================\n if args.subsample is not None:\n idx = subset_indices(\n density_cube_df_norm.values, subsample=args.subsample, random_state=100\n )\n if idx is not None:\n X = density_cube_df_norm.iloc[idx, :].values\n else:\n X = density_cube_df_norm.values\n else:\n X = density_cube_df_norm.values\n\n # =========================\n # Model - Gaussianization\n # =========================\n # Gaussianize the data\n t0 = time.time()\n rbig_h = rbig_h_measures(X, random_state=123, method=args.method)\n t1 = time.time() - t0\n\n # Save Results\n results_df = pd.DataFrame(\n {\n \"region\": region_name,\n \"period\": parameters[\"period\"].name,\n \"variable\": parameters[\"variable\"],\n \"spatial\": parameters[\"dimensions\"].spatial,\n \"temporal\": parameters[\"dimensions\"].temporal,\n \"n_dimensions\": parameters[\"dimensions\"].dimensions,\n \"n_samples\": X.shape[0],\n \"entropy\": rbig_h,\n \"time\": t1,\n },\n index=[0],\n )\n return results_df\n\n\ndef main(args):\n\n parameters = get_parameters(args)\n\n save_name = (\n f\"{args.save}_\" + f\"{args.region}_\" + f\"{args.variable}_\" + f\"{args.period}\"\n )\n if args.subsample:\n save_name += f\"_s{int(args.subsample / 1_000)}k\"\n if args.resample:\n save_name += f\"_rs{args.resample}\"\n if args.remove_climatology:\n save_name += f\"_rc\"\n\n header = True\n mode = \"w\"\n if args.smoke_test:\n # print(parameters)\n iparam = parameters[0]\n print(iparam)\n result_df = experiment_step(parameters=iparam, args=args)\n with open(RES_PATH.joinpath(f\"sm_{save_name}.csv\"), mode) as f:\n result_df.to_csv(f, header=header)\n else:\n\n with tqdm(parameters) as pbar:\n for iparam in pbar:\n\n pbar.set_description(\n f\"V: {args.variable}, T: {args.period}, \"\n f\"R: {args.region}, \"\n f\"Spa-Temp: {iparam['dimensions'].temporal}-{iparam['dimensions'].spatial}\"\n )\n\n results_df = experiment_step(parameters=iparam, args=args)\n\n # save results\n with open(RES_PATH.joinpath(f\"{save_name}.csv\"), mode) as f:\n results_df.to_csv(f, header=header)\n\n header = False\n mode = \"a\"\n del results_df\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Arguments for GP experiment.\")\n\n parser.add_argument(\n \"--res\", default=\"low\", type=str, help=\"Resolution for datacube\"\n )\n\n parser.add_argument(\n \"-v\", \"--variable\", default=\"gpp\", type=str, help=\"Variable to use\"\n )\n\n parser.add_argument(\n \"-s\", \"--save\", default=\"v0\", type=str, help=\"Save name for experiment.\"\n )\n parser.add_argument(\n \"--njobs\", type=int, default=-1, help=\"number of processes in parallel\",\n )\n parser.add_argument(\n \"--subsample\", type=int, default=None, help=\"subset points to take\"\n )\n parser.add_argument(\n \"--region\", type=str, default=\"spain\", help=\"Region to be Gaussianized\"\n )\n parser.add_argument(\n \"--period\", type=str, default=\"2010\", help=\"Period to do the Gaussianization\"\n )\n parser.add_argument(\n \"-rs\", \"--resample\", type=str, default=None, help=\"Resample Frequency\"\n )\n parser.add_argument(\"-m\", \"--method\", type=str, default=\"old\", help=\"RBIG Method\")\n parser.add_argument(\"-sm\", \"--smoke-test\", action=\"store_true\")\n parser.add_argument(\"-tm\", \"--temporal-mean\", action=\"store_true\")\n parser.add_argument(\"-rc\", \"--remove-climatology\", action=\"store_true\")\n\n main(parser.parse_args())\n" ]
[ [ "sklearn.preprocessing.StandardScaler" ], [ "sklearn.preprocessing.StandardScaler", "numpy.arange", "pandas.concat", "pandas.DataFrame" ], [ "sklearn.preprocessing.StandardScaler", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
idekany/correct_vvv_zp
[ "cc33bbcf5bb56c8f827c188b66e5377fe522b78d" ]
[ "correct_vvv_zp.py" ]
[ "import numpy as np\nimport utils as ut\nimport os\nimport sys\n\n# Read parameters from a file or from the command line:\nparser = ut.argparser()\n# print(len(sys.argv))\nif len(sys.argv) == 1:\n # use default name for the parameter file\n pars = parser.parse_args([ut.default_parameter_file])\nelse:\n pars = parser.parse_args()\n\npars = ut.process_input_parameters(pars)\n\n# Wired-in parameters:\nsep = '\\s+' # separator character in the light curve files ('\\s+' is any number of whitespaces)\ncomment = '#' # comment character in the light curve files\n\nzpcorr_table = np.load(os.path.join(pars.rootdir, pars.input_table), encoding='bytes')\n\nids = np.genfromtxt(pars.input_list, dtype=None, unpack=False, comments='#', filling_values=np.nan, encoding='latin1')\n\nn_object = len(ids)\n\nfor ilc, objname in enumerate(ids):\n\n filename = os.path.join(pars.rootdir, pars.lcdir, objname + pars.lcsuffix_in)\n lcdatain = ut.read_lc(filename, pars.colnames, usecols=pars.usecols, subset_expr=pars.subset,\n sep=sep, comment=comment, verbose=pars.verbose)\n\n output_list = []\n\n ii = 0\n for iap in pars.apertures:\n\n mjd = lcdatain[pars.colname_mjd].to_numpy()\n tile = lcdatain[pars.colname_tile].to_numpy().astype(bytes)\n obsid = lcdatain[pars.colname_obsid].to_numpy().astype(bytes)\n ichip = lcdatain[pars.colname_chip].to_numpy()\n expnum = lcdatain[pars.colname_expnum].to_numpy()\n\n otime = lcdatain[pars.colname_obstime].to_numpy()\n mag = lcdatain[pars.colname_mag + str(iap)].to_numpy()\n magerr = lcdatain[pars.colname_magerr + str(iap)].to_numpy()\n\n mag, magerr, otime, zperr, tile, expnum, ichip, obsid, ndata, zpcorr_this_obj = \\\n ut.correct_zp_by_obsid(zpcorr_table, obsid, iap, tile, expnum, ichip, mag, magerr, otime)\n\n if ii == 0:\n output_list.append(otime)\n output_list.append(mag)\n output_list.append(magerr)\n output_list.append(zperr)\n\n ii += 1\n\n output_arr = np.rec.fromarrays(output_list)\n fmt = \"%.6f\" + 3 * len(pars.apertures) * \" %.3f\"\n np.savetxt(os.path.join(pars.rootdir, pars.lcdir, objname + pars.lcsuffix_out), output_arr, fmt=fmt)\n" ]
[ [ "numpy.rec.fromarrays", "numpy.genfromtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xiangzixuebit/Scipy
[ "9f7ca1ed7be17a7d1dd211722c5cc1eca33bcec0" ]
[ "scipy/optimize/_linprog_simplex.py" ]
[ "\"\"\"Simplex method for linear programming\n\nThe *simplex* method uses a traditional, full-tableau implementation of\nDantzig's simplex algorithm [1]_, [2]_ (*not* the Nelder-Mead simplex).\nThis algorithm is included for backwards compatibility and educational\npurposes.\n\n .. versionadded:: 0.15.0\n\nWarnings\n--------\n\nThe simplex method may encounter numerical difficulties when pivot\nvalues are close to the specified tolerance. If encountered try\nremove any redundant constraints, change the pivot strategy to Bland's\nrule or increase the tolerance value.\n\nAlternatively, more robust methods maybe be used. See\n:ref:`'interior-point' <optimize.linprog-interior-point>` and\n:ref:`'revised simplex' <optimize.linprog-revised_simplex>`.\n\nReferences\n----------\n.. [1] Dantzig, George B., Linear programming and extensions. Rand\n Corporation Research Study Princeton Univ. Press, Princeton, NJ,\n 1963\n.. [2] Hillier, S.H. and Lieberman, G.J. (1995), \"Introduction to\n Mathematical Programming\", McGraw-Hill, Chapter 4.\n\"\"\"\n\nimport numpy as np\nfrom warnings import warn\nfrom .optimize import OptimizeResult, OptimizeWarning, _check_unknown_options\nfrom ._linprog_util import _postsolve\n\n\ndef _pivot_col(T, tol=1e-9, bland=False):\n \"\"\"\n Given a linear programming simplex tableau, determine the column\n of the variable to enter the basis.\n\n Parameters\n ----------\n T : 2D array\n A 2D array representing the simplex tableau, T, corresponding to the\n linear programming problem. It should have the form:\n\n [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],\n [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],\n .\n .\n .\n [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],\n [c[0], c[1], ..., c[n_total], 0]]\n\n for a Phase 2 problem, or the form:\n\n [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],\n [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],\n .\n .\n .\n [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],\n [c[0], c[1], ..., c[n_total], 0],\n [c'[0], c'[1], ..., c'[n_total], 0]]\n\n for a Phase 1 problem (a problem in which a basic feasible solution is\n sought prior to maximizing the actual objective. ``T`` is modified in\n place by ``_solve_simplex``.\n tol : float\n Elements in the objective row larger than -tol will not be considered\n for pivoting. Nominally this value is zero, but numerical issues\n cause a tolerance about zero to be necessary.\n bland : bool\n If True, use Bland's rule for selection of the column (select the\n first column with a negative coefficient in the objective row,\n regardless of magnitude).\n\n Returns\n -------\n status: bool\n True if a suitable pivot column was found, otherwise False.\n A return of False indicates that the linear programming simplex\n algorithm is complete.\n col: int\n The index of the column of the pivot element.\n If status is False, col will be returned as nan.\n \"\"\"\n ma = np.ma.masked_where(T[-1, :-1] >= -tol, T[-1, :-1], copy=False)\n if ma.count() == 0:\n return False, np.nan\n if bland:\n # ma.mask is sometimes 0d\n return True, np.nonzero(np.logical_not(np.atleast_1d(ma.mask)))[0][0]\n return True, np.ma.nonzero(ma == ma.min())[0][0]\n\n\ndef _pivot_row(T, basis, pivcol, phase, tol=1e-9, bland=False):\n \"\"\"\n Given a linear programming simplex tableau, determine the row for the\n pivot operation.\n\n Parameters\n ----------\n T : 2D array\n A 2D array representing the simplex tableau, T, corresponding to the\n linear programming problem. It should have the form:\n\n [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],\n [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],\n .\n .\n .\n [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],\n [c[0], c[1], ..., c[n_total], 0]]\n\n for a Phase 2 problem, or the form:\n\n [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],\n [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],\n .\n .\n .\n [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],\n [c[0], c[1], ..., c[n_total], 0],\n [c'[0], c'[1], ..., c'[n_total], 0]]\n\n for a Phase 1 problem (a Problem in which a basic feasible solution is\n sought prior to maximizing the actual objective. ``T`` is modified in\n place by ``_solve_simplex``.\n basis : array\n A list of the current basic variables.\n pivcol : int\n The index of the pivot column.\n phase : int\n The phase of the simplex algorithm (1 or 2).\n tol : float\n Elements in the pivot column smaller than tol will not be considered\n for pivoting. Nominally this value is zero, but numerical issues\n cause a tolerance about zero to be necessary.\n bland : bool\n If True, use Bland's rule for selection of the row (if more than one\n row can be used, choose the one with the lowest variable index).\n\n Returns\n -------\n status: bool\n True if a suitable pivot row was found, otherwise False. A return\n of False indicates that the linear programming problem is unbounded.\n row: int\n The index of the row of the pivot element. If status is False, row\n will be returned as nan.\n \"\"\"\n if phase == 1:\n k = 2\n else:\n k = 1\n ma = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, pivcol], copy=False)\n if ma.count() == 0:\n return False, np.nan\n mb = np.ma.masked_where(T[:-k, pivcol] <= tol, T[:-k, -1], copy=False)\n q = mb / ma\n min_rows = np.ma.nonzero(q == q.min())[0]\n if bland:\n return True, min_rows[np.argmin(np.take(basis, min_rows))]\n return True, min_rows[0]\n\n\ndef _apply_pivot(T, basis, pivrow, pivcol, tol=1e-9):\n \"\"\"\n Pivot the simplex tableau inplace on the element given by (pivrow, pivol).\n The entering variable corresponds to the column given by pivcol forcing\n the variable basis[pivrow] to leave the basis.\n\n Parameters\n ----------\n T : 2D array\n A 2D array representing the simplex tableau, T, corresponding to the\n linear programming problem. It should have the form:\n\n [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],\n [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],\n .\n .\n .\n [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],\n [c[0], c[1], ..., c[n_total], 0]]\n\n for a Phase 2 problem, or the form:\n\n [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],\n [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],\n .\n .\n .\n [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],\n [c[0], c[1], ..., c[n_total], 0],\n [c'[0], c'[1], ..., c'[n_total], 0]]\n\n for a Phase 1 problem (a problem in which a basic feasible solution is\n sought prior to maximizing the actual objective. ``T`` is modified in\n place by ``_solve_simplex``.\n basis : 1D array\n An array of the indices of the basic variables, such that basis[i]\n contains the column corresponding to the basic variable for row i.\n Basis is modified in place by _apply_pivot.\n pivrow : int\n Row index of the pivot.\n pivcol : int\n Column index of the pivot.\n \"\"\"\n basis[pivrow] = pivcol\n pivval = T[pivrow, pivcol]\n T[pivrow] = T[pivrow] / pivval\n for irow in range(T.shape[0]):\n if irow != pivrow:\n T[irow] = T[irow] - T[pivrow] * T[irow, pivcol]\n\n # The selected pivot should never lead to a pivot value less than the tol.\n if np.isclose(pivval, tol, atol=0, rtol=1e4):\n message = (\n \"The pivot operation produces a pivot value of:{0: .1e}, \"\n \"which is only slightly greater than the specified \"\n \"tolerance{1: .1e}. This may lead to issues regarding the \"\n \"numerical stability of the simplex method. \"\n \"Removing redundant constraints, changing the pivot strategy \"\n \"via Bland's rule or increasing the tolerance may \"\n \"help reduce the issue.\".format(pivval, tol))\n warn(message, OptimizeWarning, stacklevel=5)\n\n\ndef _solve_simplex(T, n, basis, callback, postsolve_args,\n maxiter=1000, tol=1e-9, phase=2, bland=False, nit0=0,\n ):\n \"\"\"\n Solve a linear programming problem in \"standard form\" using the Simplex\n Method. Linear Programming is intended to solve the following problem form:\n\n Minimize::\n\n c @ x\n\n Subject to::\n\n A @ x == b\n x >= 0\n\n Parameters\n ----------\n T : 2D array\n A 2D array representing the simplex tableau, T, corresponding to the\n linear programming problem. It should have the form:\n\n [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],\n [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],\n .\n .\n .\n [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],\n [c[0], c[1], ..., c[n_total], 0]]\n\n for a Phase 2 problem, or the form:\n\n [[A[0, 0], A[0, 1], ..., A[0, n_total], b[0]],\n [A[1, 0], A[1, 1], ..., A[1, n_total], b[1]],\n .\n .\n .\n [A[m, 0], A[m, 1], ..., A[m, n_total], b[m]],\n [c[0], c[1], ..., c[n_total], 0],\n [c'[0], c'[1], ..., c'[n_total], 0]]\n\n for a Phase 1 problem (a problem in which a basic feasible solution is\n sought prior to maximizing the actual objective. ``T`` is modified in\n place by ``_solve_simplex``.\n n : int\n The number of true variables in the problem.\n basis : 1D array\n An array of the indices of the basic variables, such that basis[i]\n contains the column corresponding to the basic variable for row i.\n Basis is modified in place by _solve_simplex\n callback : callable, optional\n If a callback function is provided, it will be called within each\n iteration of the algorithm. The callback must accept a\n `scipy.optimize.OptimizeResult` consisting of the following fields:\n\n x : 1D array\n Current solution vector\n fun : float\n Current value of the objective function\n success : bool\n True only when a phase has completed successfully. This\n will be False for most iterations.\n slack : 1D array\n The values of the slack variables. Each slack variable\n corresponds to an inequality constraint. If the slack is zero,\n the corresponding constraint is active.\n con : 1D array\n The (nominally zero) residuals of the equality constraints,\n that is, ``b - A_eq @ x``\n phase : int\n The phase of the optimization being executed. In phase 1 a basic\n feasible solution is sought and the T has an additional row\n representing an alternate objective function.\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n nit : int\n The number of iterations performed.\n message : str\n A string descriptor of the exit status of the optimization.\n postsolve_args : tuple\n Data needed by _postsolve to convert the solution to the standard-form\n problem into the solution to the original problem.\n maxiter : int\n The maximum number of iterations to perform before aborting the\n optimization.\n tol : float\n The tolerance which determines when a solution is \"close enough\" to\n zero in Phase 1 to be considered a basic feasible solution or close\n enough to positive to serve as an optimal solution.\n phase : int\n The phase of the optimization being executed. In phase 1 a basic\n feasible solution is sought and the T has an additional row\n representing an alternate objective function.\n bland : bool\n If True, choose pivots using Bland's rule [3]_. In problems which\n fail to converge due to cycling, using Bland's rule can provide\n convergence at the expense of a less optimal path about the simplex.\n nit0 : int\n The initial iteration number used to keep an accurate iteration total\n in a two-phase problem.\n\n Returns\n -------\n nit : int\n The number of iterations. Used to keep an accurate iteration total\n in the two-phase problem.\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n \"\"\"\n nit = nit0\n status = 0\n message = ''\n complete = False\n\n if phase == 1:\n m = T.shape[1]-2\n elif phase == 2:\n m = T.shape[1]-1\n else:\n raise ValueError(\"Argument 'phase' to _solve_simplex must be 1 or 2\")\n\n if phase == 2:\n # Check if any artificial variables are still in the basis.\n # If yes, check if any coefficients from this row and a column\n # corresponding to one of the non-artificial variable is non-zero.\n # If found, pivot at this term. If not, start phase 2.\n # Do this for all artificial variables in the basis.\n # Ref: \"An Introduction to Linear Programming and Game Theory\"\n # by Paul R. Thie, Gerard E. Keough, 3rd Ed,\n # Chapter 3.7 Redundant Systems (pag 102)\n for pivrow in [row for row in range(basis.size)\n if basis[row] > T.shape[1] - 2]:\n non_zero_row = [col for col in range(T.shape[1] - 1)\n if abs(T[pivrow, col]) > tol]\n if len(non_zero_row) > 0:\n pivcol = non_zero_row[0]\n _apply_pivot(T, basis, pivrow, pivcol, tol)\n nit += 1\n\n if len(basis[:m]) == 0:\n solution = np.zeros(T.shape[1] - 1, dtype=np.float64)\n else:\n solution = np.zeros(max(T.shape[1] - 1, max(basis[:m]) + 1),\n dtype=np.float64)\n\n while not complete:\n # Find the pivot column\n pivcol_found, pivcol = _pivot_col(T, tol, bland)\n if not pivcol_found:\n pivcol = np.nan\n pivrow = np.nan\n status = 0\n complete = True\n else:\n # Find the pivot row\n pivrow_found, pivrow = _pivot_row(T, basis, pivcol, phase, tol, bland)\n if not pivrow_found:\n status = 3\n complete = True\n\n if callback is not None:\n solution[:] = 0\n solution[basis[:n]] = T[:n, -1]\n x = solution[:m]\n x, fun, slack, con, _, _ = _postsolve(\n x, postsolve_args, tol=tol\n )\n res = OptimizeResult({\n 'x': x,\n 'fun': fun,\n 'slack': slack,\n 'con': con,\n 'status': status,\n 'message': message,\n 'nit': nit,\n 'success': status == 0 and complete,\n 'phase': phase,\n 'complete': complete,\n })\n callback(res)\n\n if not complete:\n if nit >= maxiter:\n # Iteration limit exceeded\n status = 1\n complete = True\n else:\n _apply_pivot(T, basis, pivrow, pivcol, tol)\n nit += 1\n return nit, status\n\n\ndef _linprog_simplex(c, c0, A, b, callback, postsolve_args,\n maxiter=1000, tol=1e-9, disp=False, bland=False,\n **unknown_options):\n \"\"\"\n Minimize a linear objective function subject to linear equality and\n non-negativity constraints using the two phase simplex method.\n Linear programming is intended to solve problems of the following form:\n\n Minimize::\n\n c @ x\n\n Subject to::\n\n A @ x == b\n x >= 0\n\n Parameters\n ----------\n c : 1D array\n Coefficients of the linear objective function to be minimized.\n c0 : float\n Constant term in objective function due to fixed (and eliminated)\n variables. (Purely for display.)\n A : 2D array\n 2D array such that ``A @ x``, gives the values of the equality\n constraints at ``x``.\n b : 1D array\n 1D array of values representing the right hand side of each equality\n constraint (row) in ``A``.\n callback : callable, optional\n If a callback function is provided, it will be called within each\n iteration of the algorithm. The callback function must accept a single\n `scipy.optimize.OptimizeResult` consisting of the following fields:\n\n x : 1D array\n Current solution vector\n fun : float\n Current value of the objective function\n success : bool\n True when an algorithm has completed successfully.\n slack : 1D array\n The values of the slack variables. Each slack variable\n corresponds to an inequality constraint. If the slack is zero,\n the corresponding constraint is active.\n con : 1D array\n The (nominally zero) residuals of the equality constraints,\n that is, ``b - A_eq @ x``\n phase : int\n The phase of the algorithm being executed.\n status : int\n An integer representing the status of the optimization::\n\n 0 : Algorithm proceeding nominally\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n nit : int\n The number of iterations performed.\n message : str\n A string descriptor of the exit status of the optimization.\n postsolve_args : tuple\n Data needed by _postsolve to convert the solution to the standard-form\n problem into the solution to the original problem.\n\n Options\n -------\n maxiter : int\n The maximum number of iterations to perform.\n disp : bool\n If True, print exit status message to sys.stdout\n tol : float\n The tolerance which determines when a solution is \"close enough\" to\n zero in Phase 1 to be considered a basic feasible solution or close\n enough to positive to serve as an optimal solution.\n bland : bool\n If True, use Bland's anti-cycling rule [3]_ to choose pivots to\n prevent cycling. If False, choose pivots which should lead to a\n converged solution more quickly. The latter method is subject to\n cycling (non-convergence) in rare instances.\n unkown_options : dict\n Optional arguments not used by this particular solver. If\n `unknown_options` is non-empty a warning is issued listing all\n unused options.\n\n Returns\n -------\n x : 1D array\n Solution vector.\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n iteration : int\n The number of iterations taken to solve the problem.\n\n References\n ----------\n .. [1] Dantzig, George B., Linear programming and extensions. Rand\n Corporation Research Study Princeton Univ. Press, Princeton, NJ,\n 1963\n .. [2] Hillier, S.H. and Lieberman, G.J. (1995), \"Introduction to\n Mathematical Programming\", McGraw-Hill, Chapter 4.\n .. [3] Bland, Robert G. New finite pivoting rules for the simplex method.\n Mathematics of Operations Research (2), 1977: pp. 103-107.\n\n\n Notes\n -----\n The expected problem formulation differs between the top level ``linprog``\n module and the method specific solvers. The method specific solvers expect a\n problem in standard form:\n\n Minimize::\n\n c @ x\n\n Subject to::\n\n A @ x == b\n x >= 0\n\n Whereas the top level ``linprog`` module expects a problem of form:\n\n Minimize::\n\n c @ x\n\n Subject to::\n\n A_ub @ x <= b_ub\n A_eq @ x == b_eq\n lb <= x <= ub\n\n where ``lb = 0`` and ``ub = None`` unless set in ``bounds``.\n\n The original problem contains equality, upper-bound and variable constraints\n whereas the method specific solver requires equality constraints and\n variable non-negativity.\n\n ``linprog`` module converts the original problem to standard form by\n converting the simple bounds to upper bound constraints, introducing\n non-negative slack variables for inequality constraints, and expressing\n unbounded variables as the difference between two non-negative variables.\n \"\"\"\n _check_unknown_options(unknown_options)\n\n status = 0\n messages = {0: \"Optimization terminated successfully.\",\n 1: \"Iteration limit reached.\",\n 2: \"Optimization failed. Unable to find a feasible\"\n \" starting point.\",\n 3: \"Optimization failed. The problem appears to be unbounded.\",\n 4: \"Optimization failed. Singular matrix encountered.\"}\n\n n, m = A.shape\n\n # All constraints must have b >= 0.\n is_negative_constraint = np.less(b, 0)\n A[is_negative_constraint] *= -1\n b[is_negative_constraint] *= -1\n\n # As all constraints are equality constraints the artificial variables\n # will also be basic variables.\n av = np.arange(n) + m\n basis = av.copy()\n\n # Format the phase one tableau by adding artificial variables and stacking\n # the constraints, the objective row and pseudo-objective row.\n row_constraints = np.hstack((A, np.eye(n), b[:, np.newaxis]))\n row_objective = np.hstack((c, np.zeros(n), c0))\n row_pseudo_objective = -row_constraints.sum(axis=0)\n row_pseudo_objective[av] = 0\n T = np.vstack((row_constraints, row_objective, row_pseudo_objective))\n\n nit1, status = _solve_simplex(T, n, basis, callback=callback,\n postsolve_args=postsolve_args,\n maxiter=maxiter, tol=tol, phase=1,\n bland=bland\n )\n # if pseudo objective is zero, remove the last row from the tableau and\n # proceed to phase 2\n nit2 = nit1\n if abs(T[-1, -1]) < tol:\n # Remove the pseudo-objective row from the tableau\n T = T[:-1, :]\n # Remove the artificial variable columns from the tableau\n T = np.delete(T, av, 1)\n else:\n # Failure to find a feasible starting point\n status = 2\n messages[status] = (\n \"Phase 1 of the simplex method failed to find a feasible \"\n \"solution. The pseudo-objective function evaluates to {0:.1e} \"\n \"which exceeds the required tolerance of {1} for a solution to be \"\n \"considered 'close enough' to zero to be a basic solution. \"\n \"Consider increasing the tolerance to be greater than {0:.1e}. \"\n \"If this tolerance is unacceptably large the problem may be \"\n \"infeasible.\".format(abs(T[-1, -1]), tol)\n )\n\n if status == 0:\n # Phase 2\n nit2, status = _solve_simplex(T, n, basis, callback=callback,\n postsolve_args=postsolve_args,\n maxiter=maxiter, tol=tol, phase=2,\n bland=bland, nit0=nit1\n )\n\n solution = np.zeros(n + m)\n solution[basis[:n]] = T[:n, -1]\n x = solution[:m]\n\n return x, status, messages[status], int(nit2)\n" ]
[ [ "numpy.take", "numpy.less", "numpy.arange", "numpy.eye", "numpy.atleast_1d", "numpy.delete", "numpy.ma.masked_where", "numpy.zeros", "numpy.vstack", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
minds-n-company/CDSS-Sinusitis
[ "34794a516a095f5a8f15ec6c8e4fbb3ee8355413" ]
[ "utils.py" ]
[ "# Author : [email protected]\r\n# Date : 2020-12-03\r\n\r\nimport pandas as pd\r\nimport os\r\nimport copy, shutil\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\nfrom torch.optim import lr_scheduler\r\nimport torch.optim as optim\r\nimport torch.nn as nn\r\nfrom collections import defaultdict\r\n\r\n# ref : https://github.com/kakaobrain/fast-autoaugment/blob/1848b77cb42765974e572cecd92d8b825b1dcc85/FastAutoAugment/metrics.py\r\nclass Accumulator:\r\n def __init__(self):\r\n self.metrics = defaultdict(lambda: 0.)\r\n\r\n def add(self, key, value):\r\n self.metrics[key] += value\r\n\r\n def add_dict(self, dict):\r\n for key, value in dict.items():\r\n self.add(key, value)\r\n\r\n def __getitem__(self, item):\r\n return self.metrics[item]\r\n\r\n def __setitem__(self, key, value):\r\n self.metrics[key] = value\r\n\r\n def get_dict(self):\r\n return copy.deepcopy(dict(self.metrics))\r\n\r\n def items(self):\r\n return self.metrics.items()\r\n\r\n def __str__(self):\r\n return str(dict(self.metrics))\r\n\r\n def __truediv__(self, other):\r\n newone = Accumulator()\r\n for key, value in self.items():\r\n if isinstance(other, str):\r\n if other != key:\r\n newone[key] = value / self[other]\r\n else:\r\n newone[key] = value\r\n else:\r\n newone[key] = value / other\r\n return newone\r\n\r\n# # ref : https://github.com/CoinCheung/pytorch-loss \r\nclass FocalLossV2(nn.Module):\r\n '''\r\n This use better formula to compute the gradient, which has better numeric stability\r\n '''\r\n def __init__(self,\r\n alpha=0.25,\r\n gamma=2,\r\n reduction='mean'):\r\n super(FocalLossV2, self).__init__()\r\n self.alpha = alpha\r\n self.gamma = gamma\r\n self.reduction = reduction\r\n\r\n def forward(self, logits, label):\r\n loss = FocalSigmoidLossFuncV2.apply(logits, label, self.alpha, self.gamma)\r\n if self.reduction == 'mean':\r\n loss = loss.mean()\r\n if self.reduction == 'sum':\r\n loss = loss.sum()\r\n return loss\r\n\r\nclass FocalSigmoidLossFuncV2(torch.autograd.Function):\r\n '''\r\n compute backward directly for better numeric stability\r\n '''\r\n @staticmethod\r\n def forward(ctx, logits, label, alpha, gamma):\r\n logits = logits.float()\r\n coeff = torch.empty_like(logits).fill_(1 - alpha)\r\n coeff[label == 1] = alpha\r\n\r\n probs = torch.sigmoid(logits)\r\n log_probs = torch.where(logits >= 0,\r\n F.softplus(logits, -1, 50),\r\n logits - F.softplus(logits, 1, 50))\r\n log_1_probs = torch.where(logits >= 0,\r\n -logits + F.softplus(logits, -1, 50),\r\n -F.softplus(logits, 1, 50))\r\n probs_gamma = probs ** gamma\r\n probs_1_gamma = (1. - probs) ** gamma\r\n\r\n ctx.coeff = coeff\r\n ctx.probs = probs\r\n ctx.log_probs = log_probs\r\n ctx.log_1_probs = log_1_probs\r\n ctx.probs_gamma = probs_gamma\r\n ctx.probs_1_gamma = probs_1_gamma\r\n ctx.label = label\r\n ctx.gamma = gamma\r\n\r\n term1 = probs_1_gamma * log_probs\r\n term2 = probs_gamma * log_1_probs\r\n loss = torch.where(label == 1, term1, term2).mul_(coeff).neg_()\r\n return loss\r\n\r\n @staticmethod\r\n def backward(ctx, grad_output):\r\n '''\r\n compute gradient of focal loss\r\n '''\r\n coeff = ctx.coeff\r\n probs = ctx.probs\r\n log_probs = ctx.log_probs\r\n log_1_probs = ctx.log_1_probs\r\n probs_gamma = ctx.probs_gamma\r\n probs_1_gamma = ctx.probs_1_gamma\r\n label = ctx.label\r\n gamma = ctx.gamma\r\n\r\n term1 = (1. - probs - gamma * probs * log_probs).mul_(probs_1_gamma).neg_()\r\n term2 = (probs - gamma * (1. - probs) * log_1_probs).mul_(probs_gamma)\r\n\r\n grads = torch.where(label == 1, term1, term2).mul_(coeff).mul_(grad_output)\r\n return grads, None, None, None\r\n\r\n# ref: https://github.com/ShowLo/MobileNetV3/blob/master/CosineLR.py\r\nfrom torch.optim.lr_scheduler import _LRScheduler\r\nfrom math import pi, cos\r\n\r\nclass CosineWarmupLR(_LRScheduler):\r\n '''\r\n Cosine lr decay function with warmup.\r\n Ref: https://github.com/PistonY/torch-toolbox/blob/master/torchtoolbox/optimizer/lr_scheduler.py\r\n https://github.com/Randl/MobileNetV3-pytorch/blob/master/cosine_with_warmup.py\r\n Lr warmup is proposed by\r\n `Accurate, Large Minibatch SGD:Training ImageNet in 1 Hour`\r\n `https://arxiv.org/pdf/1706.02677.pdf`\r\n Cosine decay is proposed by\r\n `Stochastic Gradient Descent with Warm Restarts`\r\n `https://arxiv.org/abs/1608.03983`\r\n Args:\r\n optimizer (Optimizer): optimizer of a model.\r\n iter_in_one_epoch (int): number of iterations in one epoch.\r\n epochs (int): number of epochs to train.\r\n lr_min (float): minimum(final) lr.\r\n warmup_epochs (int): warmup epochs before cosine decay.\r\n last_epoch (int): init iteration. In truth, this is last_iter\r\n Attributes:\r\n niters (int): number of iterations of all epochs.\r\n warmup_iters (int): number of iterations of all warmup epochs.\r\n cosine_iters (int): number of iterations of all cosine epochs.\r\n '''\r\n\r\n def __init__(self, optimizer, epochs, iter_in_one_epoch, lr_min=0, warmup_epochs=0, last_epoch=-1):\r\n self.lr_min = lr_min\r\n self.niters = epochs * iter_in_one_epoch\r\n self.warmup_iters = iter_in_one_epoch * warmup_epochs\r\n self.cosine_iters = iter_in_one_epoch * (epochs - warmup_epochs)\r\n super(CosineWarmupLR, self).__init__(optimizer, last_epoch)\r\n\r\n def get_lr(self):\r\n if self.last_epoch < self.warmup_iters:\r\n return [(self.lr_min + (base_lr - self.lr_min) * self.last_epoch / self.warmup_iters) for base_lr in\r\n self.base_lrs]\r\n else:\r\n return [(self.lr_min + (base_lr - self.lr_min) * (\r\n 1 + cos(pi * (self.last_epoch - self.warmup_iters) / self.cosine_iters)) / 2) for base_lr in\r\n self.base_lrs]\r\n\r\n" ]
[ [ "torch.sigmoid", "torch.empty_like", "torch.nn.functional.softplus", "torch.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DIvkov575/python-epipack
[ "f1bf4ec943f71ee922825fe3aa5c05263afef22e" ]
[ "main.py" ]
[ "import pandas as pd\nfrom pandas import DataFrame\nimport os\nimport random\nfrom bisect import bisect_left\n\ndf1 = pd.read_csv(\"Tables/song-list-a.csv\")\n\n\ndef get_asset_names(directory=\"Assets\") -> list:\n # pulls all file paths from director\n # removes spaces from all paths\n return os.listdir(directory)\n\n\ndef get_asset_paths(song_name_list, rename_origin_asset=False) -> list:\n # return song names cleaned -> usable paths\n path_list_out = []\n for song_in in song_name_list:\n if \" \" or \"/\" in song_in:\n song_out = song_in.replace(\" \", \"\")\n song_out = song_out.replace(\"/\", \"\")\n path_list_out.append(song_out)\n\n if rename_origin_asset:\n os.rename(\n os.path.join(\"Assets\", song_in), os.path.join(\"Assets\", song_out)\n )\n return path_list_out\n\n\n# does not work\ndef update_archive(current_archive=DataFrame()) -> DataFrame:\n # compare for new files in archive against current-archive (df1)\n # currently unusable -- checks auto-update df1 : updated view of `assets`\n df_output = DataFrame()\n current_path_list = current_archive[\"path\"]\n paths_in_assets = get_asset_names()\n\n for i in range(len(paths_in_assets)):\n if paths_in_assets[i] not in current_path_list:\n df_temp = pd.DataFrame(\n {\n \"name\": [\"NA\"],\n \"path\": [paths_in_assets[i]],\n \"bpm\": [\"NA\"],\n \"scale\": [\"NA\"],\n \"genre\": [\"NA\"],\n \"mood\": [\"NA\"],\n \"energy\": [\"NA\"],\n \"artist\": [\"NA\"],\n \"album\": [\"NA\"],\n \"sub-genre\": [\"NA\"],\n }\n )\n pd.concat([df_output, df_temp])\n\n print(df_output)\n\n\ndef countfrequency(my_list):\n freq = {}\n for item in my_list:\n if (item in freq):\n freq[item] += 1\n else:\n freq[item] = 1\n\n return (freq)\n print(freq)\n\n #-----------------------------------------#\n #-----------------------------------------#\n\ndef sort_b(song_index=df1,\n sort_type='linear-increase',\n sort_by='aggresion',\n parameter=[],\n repeat_songs=True,\n mid_point_threshhold=65,\n\n generic_length=10,\n increase_length=0,\n decay_length=0,\n mid_point='hold',\n hold_length='NA',\n sine_cycle_count='NA',\n sine_cycle_length='NA',\n sine_high_count='NA',\n sine_low_count='NA') -> list:\n list_of_song_names = song_index['names']\n list_of_song_paths = song_index['paths']\n # possible_sort_types = ['linear-increase', 'linear-decrease', 'parabola-fliped', 'sine', 'built-sine']\n possible_sort_by = ['aggression', 'energy', 'ambience']\n collapsed_sort_by_values = countfrequency(song_index[sort_by])\n song_sort_by_length = len(collapsed_sort_by_values.keys)\n queue = []\n increase_queue = DataFrame()\n decrease_queue = DataFrame()\n midpoint_queue = DataFrame()\n songs_upper_thresh = DataFrame()\n songs_lower_thresh = DataFrame()\n\n\n if mid_point.lower() == 'hold':\n sine_cycle_count = 'NA'\n sine_cycle_length = 'NA'\n sine_high_count = 'NA'\n sine_low_count = 'NA'\n if mid_point.lower() == 'sine':\n hold_length = 'NA'\n\n # append row if value in sort by, below threshhold\n for row in song_index:\n if row[sort_by] < mid_point_threshhold:\n songs_lower_thresh.append(row)\n elif row[sort_by] >= mid_point_threshhold:\n song_lower_thresh.append(row)\n else:\n raise \"bad sort by, in row\"\n\n\nif len(songs_lower_thresh) > 0:\n if len(song_lower_threshhold) < increase_length + decrease_length:\n if len(song_lower_threshhold) == increase_length:\n increase_queue = song_lower_threshhold.sort_by(Sort_by, axis=1, kind='merge_sort')\n if len(song_lower_threshhold) > increase_length:\n min_val = min(song_lower_threshhold[sort_by])\n max_val = max(song_lower_threshhold[sort_by])\n minmaxavg = (max_val-min_val)//increase_length\n _val_list = [i*minmaxavg for i in range(increase_length)]\n \n\n \n if len(song_lower_threshhold) == decrease_length:\n increase_queue = song_lower_threshhold.sort_by(Sort_by, axis=1, ascending=False, kind='merge_sort')\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
LautaroParada/variance-test
[ "bc88de3dc324f622560b781aae8a0280f4c99b29" ]
[ "code/price_paths.py" ]
[ "import numpy as np\nfrom scipy.stats import halfnorm\n\nclass PricePaths(object):\n \n def __init__(self, n:int, T:int, s0:float):\n \n self.n = n # number of paths to generate\n self.T = T # number of observations to generate\n self.s0 = s0 # initial price\n \n self.h = self.n / self.T # step to move in each step\n self.r0 = self.s0 / 100 # Initial rate, based on the price\n \n\n\t# -------------------------------------------\n\t# The Brownian Motion Stochastic Process (Wiener Process)\n\t# -------------------------------------------\n \n def brownian_prices(self, mu:float, sigma:float, sto_vol:bool=False):\n \n # preallocate the data\n bro_prices = self.__zeros()\n \n # check the size of the output matrix\n if self.n > 1:\n for i in range(self.n):\n # simulate n price paths\n bro_prices[:, i] = self.__brownian_returns(mu, sigma, sto_vol)\n else:\n # case for only 1 simulation\n bro_prices = self.__brownian_returns(mu, sigma, sto_vol)\n \n return bro_prices\n \n\t# -------------------------------------------\n\t# Geometric Brownian motion\n\t# -------------------------------------------\n \n def gbm_prices(self, mu:float, sigma:float, sto_vol:bool=True):\n # preallocate the data\n gbm_prices = self.__zeros()\n\n # check the size of the output matrix\n if self.n > 1:\n for i in range(self.n):\n # simulate n price paths\n gbm_prices[:, i] = self.__brownian_returns(mu, sigma, sto_vol)\n else:\n # case for only 1 simulation\n gbm_prices = self.__brownian_returns(mu, sigma, sto_vol)\n\n return gbm_prices\n \n\t# -------------------------------------------\n\t# Merton Jump Diffusion Stochastic Process\n\t# -------------------------------------------\n \n def merton_prices(self, mu:float, sigma:float, lambda_:int, sto_vol:bool=False):\n # preallocate the data\n mert_prices = self.__zeros()\n \n # check the size of the output matrix\n if self.n > 1:\n for i in range(self.n):\n # simulate n price paths\n mert_prices[:, i] = self.__merton_returns(mu, sigma, lambda_, sto_vol)\n else:\n # case for only 1 simulation\n mert_prices = self.__merton_returns(mu, sigma, lambda_, sto_vol)\n \n return mert_prices\n \n\t# -------------------------------------------\n\t# Vasicek Interest Rate Model\n\t# -------------------------------------------\n \n def vas_rates(self, mu:float, sigma:float, lambda_:float, sto_vol:bool=False):\n \n ou_rates = self.__zeros()\n if self.n > 1:\n for i in range(self.n):\n ou_rates[:, i] = self.__vas_returns(mu, sigma, lambda_, sto_vol)\n \n else:\n ou_rates = self.__vas_returns(mu, sigma, lambda_, sto_vol)\n \n return ou_rates\n \n\t# -------------------------------------------\n\t# Cox Ingersoll Ross (CIR) stochastic proces - RATES\n\t# -------------------------------------------\n \n def cir_rates(self, mu:float, sigma:float, lambda_:float, sto_vol:bool=False):\n cir_rates_ = self.__zeros()\n \n if self.n > 1:\n for i in range(self.n):\n cir_rates_[:, i] = self.__cir_return(mu, sigma, lambda_, sto_vol)\n else:\n cir_rates_ = self.__cir_return(mu, sigma, lambda_, sto_vol)\n \n return cir_rates_\n \n # -------------------------------------------\n\t# Heston Stochastic Volatility Process\n\t# -------------------------------------------\n \n def heston_prices(self, rf:float, k:float, theta:float, sigma:float, sto_vol:bool=False):\n hes_prices = self.__zeros()\n \n if self.n > 1:\n for i in range(self.n):\n hes_prices[:, i] = self.__heston_returns(rf, k, theta, sigma, sto_vol)\n \n else:\n hes_prices = self.__heston_returns(rf, k, theta, sigma, sto_vol)\n \n return hes_prices\n \n # -------------------------------------------\n\t# Ornstein–Uhlenbeck Process (Mean reverting)\n\t# -------------------------------------------\n \n def ou_prices(self, mu:float, sigma:float, lambda_:float, sto_vol:bool=False):\n ou_prices = self.__zeros()\n if self.n > 1:\n for i in range(self.n):\n ou_prices[:, i] = self.__ou_returns(mu, sigma, lambda_, sto_vol)\n \n else:\n ou_prices = self.__ou_returns(mu, sigma, lambda_, sto_vol)\n \n return ou_prices\n\n\t# -------------------------------------------\n\t# Helper methods\n\t# -------------------------------------------\n \n # Ornstein–Uhlenbeck Process (Mean reverting)\n \n def __ou_discrete(self, mu:float, sigma:float, lambda_:float, st:float, vol:float):\n return np.exp(-lambda_*self.h)*st + (1-np.exp(-lambda_*self.h))*mu + sigma*( (1-np.exp(-2*lambda_*self.h)) / (2*lambda_)) * vol\n \n def __ou_returns(self, mu:float, sigma:float, lambda_:float, sto_vol:float):\n volatility = self.__random_disturbance(sto_vol, rd_mu=mu, rd_sigma=sigma)\n ou_rets = np.zeros(self.T)\n ou_rets[0] = self.s0\n \n for t in range(1, self.T):\n ou_rets[t] = ou_rets[t] + self.__ou_discrete(mu=mu*100, sigma=sigma*100, lambda_=lambda_, st=ou_rets[t-1], vol=volatility[t])\n \n return ou_rets \n \n # Heston Stochastic Volatility Process\n \n def __heston_dis_vol(self, k:float, theta:float, vt:float, sigma:float, w2:float):\n # heston mean reverting volatility recurrence\n return k * (theta - vt) * self.h + sigma * np.sqrt(np.abs(vt) * self.h) * w2\n \n def __heston_discrete(self, rf:float, st:float, V, w1):\n # Discrete form of the Heston model\n return rf * st *self.h + np.sqrt(np.abs(V) * self.h) * st * w1\n \n def __heston_returns(self, rf:float, k:float, theta:float, sigma:float, sto_vol:bool):\n \n # integrate a random correlation level\n corr_wn1, corr_wn2 = self.__corr_noise()\n \n # integrating the mean reverting volatility\n wn2 = self.__random_disturbance(sto_vol=sto_vol, rd_mu=0, rd_sigma=0)\n dw2 = np.zeros(self.T)\n dw2[0] = corr_wn2[0]\n \n for t in range(1, self.T):\n dw2[t] = self.__heston_dis_vol(k=k, \n theta=theta, \n vt=corr_wn2[t], \n sigma=sigma, \n w2=wn2[t])\n \n # creating the actual data for the process\n heston_ret = np.zeros(self.T)\n heston_ret[0] = self.s0\n \n for t in range(1, self.T):\n heston_ret[t] = heston_ret[t-1] + self.__heston_discrete(rf=rf, \n st=heston_ret[t-1], \n V=dw2[t], \n w1=corr_wn1[t])\n \n return heston_ret.ravel()\n \n # Cox Ingersoll Ross\n \n def __cir_discrete(self, mu:float, sigma:float, lambda_:float, xt:float, vol:float):\n return lambda_ * (mu-xt) * self.h + sigma * np.sqrt(xt*self.h) * vol\n \n def __cir_return(self, mu:float, sigma:float, lambda_:float, sto_vol:bool):\n \n volatility = self.__random_disturbance(sto_vol, mu, sigma)\n cir_ret = np.zeros(self.T)\n \n cir_ret[0] = self.r0\n \n for t in range(1, self.T):\n cir_ret[t] = cir_ret[t-1] + self.__cir_discrete(mu, sigma, lambda_, cir_ret[t-1], volatility[t])\n \n return cir_ret\n \n # Vasicek Interest Rate Model and Ornstein–Uhlenbeck Process - Mean reverting\n \n def __vas_discrete(self, mu:float, sigma:float, lambda_:float, rt:float, vol:float):\n \n return lambda_ * (mu-rt) * self.h + sigma * np.sqrt(self.h) * vol\n \n def __vas_returns(self, mu:float, sigma:float, lambda_:float, sto_vol:float):\n volatility = self.__random_disturbance(sto_vol, rd_mu=mu, rd_sigma=sigma)\n vas_rets = np.zeros((self.T))\n vas_rets[0] = self.r0\n \n for t in range(1, self.T):\n vas_rets[t] = vas_rets[t-1] + self.__vas_discrete(mu=mu, sigma=sigma, lambda_=lambda_, rt=vas_rets[t-1], vol=volatility[t])\n \n return vas_rets\n \n # Merton Jump Diffusion Stochastic Process\n \n def __jumps_diffusion(self, lambda_:int):\n t = 0\n jumps = np.zeros((self.T, 1))\n lambda__ = lambda_ / self.T\n small_lambda = -(1.0/lambda__)\n pd = np.random.poisson(lam=lambda__, size=(self.T))\n \n # applying the psudo-code of the algorithm\n for i in range(self.T):\n t += small_lambda * np.log(np.random.uniform())\n if t > self.T:\n jumps[i:] = ( (np.mean(pd) + np.std(pd)) * np.random.uniform() ) * np.random.choice([-1, 1])\n # the t parameter is restituted to the original value\n # for several jumps in the future\n t = small_lambda\n break\n \n return jumps.reshape(-1, 1)\n \n def __merton_returns(self, mu:float, sigma:float, lambda_:int, sto_vol:bool):\n geometric_brownian_motion = self.__brownian_returns(mu, sigma, sto_vol).reshape(-1, 1)\n jump_diffusion = self.__jumps_diffusion(lambda_)\n return (geometric_brownian_motion + jump_diffusion).ravel()\n \n\t# Brownian Motion Stochastic Process (Wiener Process)\n \n def __brownian_discrete(self, mu:float, sigma:float, st:float, vol:float):\n return ( mu * st * self.h ) + ( sigma * st * np.sqrt(self.h) * vol )\n \n def __brownian_returns(self, mu:float, sigma:float, sto_vol:bool):\n # preallocate the volatility\n volatility = self.__random_disturbance(sto_vol, rd_mu=mu, rd_sigma=sigma)\n bro_returns = np.zeros((self.T))\n bro_returns[0] = self.s0\n for t in range(1, self.T):\n bro_returns[t] = bro_returns[t-1] + \\\n self.__brownian_discrete(mu, sigma, st=bro_returns[t-1], vol=volatility[t])\n \n return bro_returns\n \n\t# -------------------------------------------\n\t# General utilities\n\t# -------------------------------------------\n \n def __random_disturbance(self, sto_vol:bool, rd_mu:float, rd_sigma:float):\n \n if not sto_vol:\n return np.random.normal(size=(self.T, 1))\n else:\n # error handling for scale < 0, because negative volatilities \n # doesnt makes sense.\n return np.random.normal(loc=rd_mu * self.h,\n scale=rd_sigma * halfnorm.rvs(1) * np.sqrt(self.h),\n size=(self.T, 1)) * 100\n return\n \n def __zeros(self):\n return np.zeros((self.T, self.n))\n \n def __corr_noise(self):\n \n # generate two uncorrelated Brownian processes\n z1 = self.__random_disturbance(sto_vol=False, rd_mu=0, rd_sigma=1)\n z2 = self.__random_disturbance(sto_vol=False, rd_mu=0, rd_sigma=1)\n \n # randomly create an absolute correlation\n rho = np.random.uniform(low=0.5, high=1)\n \n corr1 = np.sqrt( (1 + rho) / 2 )\n corr2 = np.sqrt( (1 - rho) / 2 )\n \n # correlating the brownian processes\n dw1 = corr1 * z1 + corr2 * z2\n dw2 = corr1 * z1 - corr2 * z2\n \n return dw1, dw2" ]
[ [ "scipy.stats.halfnorm.rvs", "numpy.sqrt", "numpy.abs", "numpy.random.choice", "numpy.random.poisson", "numpy.random.normal", "numpy.std", "numpy.mean", "numpy.random.uniform", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andrewhead/Package-Qualifiers
[ "ac58654ea0463c0986670fdb80fb8d04dd68e2e2" ]
[ "dump/popular_tag_post_stats.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport logging\nfrom progressbar import ProgressBar, Percentage, Bar, ETA, Counter, RotatingMarker\nimport numpy as np\n\nfrom dump import dump_json\nfrom models import Post, Tag, PostTag\n\n\nlogger = logging.getLogger('data')\nTAGS = [\n \"javascript\",\n \"java\",\n \"c#\",\n \"php\",\n \"android\",\n \"jquery\",\n \"python\",\n \"html\",\n \"c++\",\n \"ios\",\n \"mysql\",\n \"css\",\n \"sql\",\n \"asp.net\",\n \"objective-c\",\n \"ruby-on-rails\",\n \".net\",\n \"c\",\n \"iphone\",\n \"arrays\",\n \"angularjs\",\n \"sql-server\",\n \"ruby\",\n \"json\",\n \"ajax\",\n \"regex\",\n \"xml\",\n \"r\",\n \"asp.net-mvc\",\n \"linux\",\n \"django\",\n \"wpf\",\n \"node.js\",\n \"database\",\n]\n\n\n@dump_json(__name__)\ndef main(sample_size, show_progress, *args, **kwargs):\n\n # Set up progress bar.\n if show_progress:\n progress_bar = ProgressBar(maxval=len(TAGS), widgets=[\n 'Progress: ', Percentage(),\n ' ', Bar(marker=RotatingMarker()),\n ' ', ETA(),\n ' Fetched posts for ', Counter(), ' / ' + str(len(TAGS)) + ' tags.'\n ])\n progress_bar.start()\n\n # Fetch statistics for posts related to each tag\n for tag_count, tag in enumerate(TAGS, start=1):\n\n # As it turns out, we can make the selection of random posts tractable if we\n # don't do a random ordering in SQL but instead do a random selection\n # in Python. So we start by fetching all of the post IDs for a tag,\n # make the random choice locally, and then query for the posts in this subset.\n post_id_tuples = (\n PostTag.select()\n .join(Tag, on=(PostTag.tag_id == Tag.id))\n .where(Tag.tag_name == tag)\n .select(PostTag.post_id)\n .tuples()\n )\n\n # We convert this from a 2D Nx1 matrix to a 1D N-length matrix by taking its\n # transpose and then getting the \"first row\" of the transpose.\n post_ids = np.array(post_id_tuples).T[0]\n sample_post_ids = np.random.choice(post_ids, sample_size, replace=False).tolist()\n\n post_records = (\n Post.select(Post.title, Post.creation_date, Post.answer_count, Post.comment_count,\n Post.favorite_count, Post.score, Post.view_count)\n .where(Post.id << sample_post_ids)\n .dicts()\n )\n\n # Store which tag this record is associated with\n for record in post_records:\n record['tag_name'] = tag\n\n yield post_records\n\n if show_progress:\n progress_bar.update(tag_count)\n\n if show_progress:\n progress_bar.finish()\n\n raise StopIteration\n\n\ndef configure_parser(parser):\n parser.description = \"Dump count statistics for posts for frequent Stack Overflow tags.\"\n parser.add_argument(\n '--sample-size',\n type=int,\n default=2000,\n help=\"The maximum number of random posts to fetch for a tag.\" +\n \"Performance should be pretty invariant to this number.\"\n )\n parser.add_argument(\n '--show-progress',\n action='store_true',\n help=\"Show progress in loading content from the file.\"\n )\n" ]
[ [ "numpy.array", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Bingwen-Hu/hackaway
[ "69727d76fd652390d9660e9ea4354ba5cc76dd5c", "69727d76fd652390d9660e9ea4354ba5cc76dd5c", "69727d76fd652390d9660e9ea4354ba5cc76dd5c", "69727d76fd652390d9660e9ea4354ba5cc76dd5c", "69727d76fd652390d9660e9ea4354ba5cc76dd5c", "69727d76fd652390d9660e9ea4354ba5cc76dd5c" ]
[ "memos/python/newsparser.py", "projects/faces/facessh/facessh/model/SSH.py", "memos/opencv/RGB2BGR.py", "projects/imwrap/mls/body.py", "projects/olds/ocr/ocr-tensorflow/preprocess.py", "projects/faces/pcn/pcn/api.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 7 16:45:41 2017\n\n@author: Mory\n\"\"\"\nimport requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom webUtil import character2url, date2timestamp\n\ndefaulttimes = [[2010, 1, 1],\n [2011, 1, 1],\n [2012, 1, 1],\n [2013, 1, 1],\n [2014, 1, 1],\n [2015, 1, 1],\n [2016, 1, 1],\n [2017, 1, 1]]\n\n\ndef df_split(df):\n company = df['公司'][0]\n indexfilter = df['新闻'].apply(lambda news: company in news)\n df = df[indexfilter]\n indexStrategy = df['新闻'].apply(lambda news: '战略' in news)\n strategy = df[indexStrategy] \n nonStrategy = df[~indexStrategy]\n return strategy, nonStrategy\n\ndef all_df(times, keyword, company):\n dataframes = pd.DataFrame(columns=['公司', '起始时间', '新闻'])\n for bt, et in zip(times, times[1:]):\n df = get_df(bt, et, keyword, company)\n dataframes = dataframes.append(df, ignore_index=True)\n return dataframes\ndef cleaned_df(first, other):\n all_others = [o for ox in other for o in ox]\n first.extend(all_others)\n return list(set(first))\n\ndef get_df(bt, et, keyword, company):\n url = url_constructor(bt, et, keyword, company)\n soup = preprocess(url)\n first, other = get_all_text(soup)\n text = cleaned_df(first, other)\n df = news_output(text, bt, company)\n return df\n \ndef preprocess(url, params={}):\n news = requests.get(url, params)\n news.encoding = 'UTF-8'\n soup = BeautifulSoup(news.text, \"lxml\")\n return soup\n\n\ndef get_all_text(soup):\n first_text = get_text(soup)\n other_text = next_page(soup)\n return first_text, other_text\n \ndef get_text(soup):\n rs = soup.find_all('h3', {'class': 'c-title'})\n texts = [r.get_text() for r in rs]\n return texts\n\n\ndef get_links(soup):\n tag_a = soup.find(\"p\", id=\"page\")\n links = tag_a.find_all('a')\n links = [link.get('href') for link in links]\n return links\n\n\ndef url_constructor(bt, et, must, either):\n must = '\"'+must+'\"'\n either = '\"'+either+'\"'\n urlbase = \"http://news.baidu.com/ns?from=news&cl=2\"\n urldate = \"&bt={bt}&et={et}\".format(bt=date2timestamp(bt), \n et=date2timestamp(et))\n keywords = \"&q1={must}&submit=%B0%D9%B6%C8%D2%BB%CF%C2&q3={either}&q4=\".format(must=character2url(must), either=character2url(either))\n options = \"&mt=0&lm=&s=2&tn=newstitledy&ct=0&rn=50&q6=\"\n return urlbase+urldate+keywords+options\n\ndef next_page(soup):\n all_text = []\n base = 'http://news.baidu.com'\n for link in get_links(soup):\n url = base + link\n soup = preprocess(url)\n text = get_text(soup)\n all_text.append(text)\n return all_text\n\ndef news_output(text, bt, company):\n df = pd.DataFrame(columns=['公司', '起始时间', '新闻'])\n df[\"新闻\"] = text\n df[\"公司\"] = company\n df[\"起始时间\"] = str(bt[0])\n return df\n\n\nif __name__ == '__main__':\n importants = ['小米', '华为', \n '苹果', '微软', '谷歌', \n '支付宝', '京东', '亚马逊', # 购物\n '美团', '百度外卖', # 查看市场份额\n '滴滴', '摩拜单车']\n \n \n home = \"http://news.baidu.com/advanced_news.html\"\n", "import torch\nfrom . import network as net\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .utils.config import cfg\nimport math\nfrom torch.autograd import Variable\nimport torchvision as tv\nfrom .rpn.anchor_target_layer import _AnchorTargetLayer\nfrom .rpn.proposal_layer import _Proposallayer\nimport numpy as np\n\n\nclass SSH(nn.Module):\n def __init__(self, vgg16_image_net=True):\n super(SSH, self).__init__()\n\n vgg16_features = tv.models.vgg16(pretrained=vgg16_image_net).features\n self.conv4_3 = nn.Sequential(*list(vgg16_features.children())[:23])\n self.conv5_3 = nn.Sequential(*list(vgg16_features.children())[23:30])\n self.m3_module = net.M3(512)\n self.m2_module = net.M2(512)\n self.m1_module = net.M1(128)\n\n self.conv5_128 = net.Conv2D(512, 128, 1, False, 1, True)\n self.conv5_128_up = nn.ConvTranspose2d(128, 128, 4, 2, 1, 1, 128, False)\n self.con4_128 = net.Conv2D(512, 128, 1, False, 1, True)\n self.con4_fuse_final = net.Conv2D(128, 128, 3, True, 1, True)\n\n self.pool6 = nn.MaxPool2d(2, 2)\n\n self.m3_anchor_target_layer = _AnchorTargetLayer(32, np.array([16, 32]), np.array([1, ]), 512, name='m3')\n self.m2_anchor_target_layer = _AnchorTargetLayer(16, np.array([4, 8]), np.array([1, ]), 0, name='m2')\n self.m1_anchor_target_layer = _AnchorTargetLayer(8, np.array([1, 2]), np.array([1, ]), 0, name='m1')\n\n self.m3_proposal_layer = _Proposallayer(32, np.array([16, 32]), np.array([1, ]))\n self.m2_proposal_layer = _Proposallayer(16, np.array([4, 8]), np.array([1, ]))\n self.m1_proposal_layer = _Proposallayer(8, np.array([1, 2]), np.array([1, ]))\n\n self.m3_soft_max = nn.Softmax(1)\n self.m2_soft_max = nn.Softmax(1)\n self.m1_soft_max = nn.Softmax(1)\n\n @staticmethod\n def reshape(x, d):\n input_shape = x.size()\n x = x.view(\n input_shape[0],\n int(d),\n int(float(input_shape[1] * input_shape[2]) / float(d)),\n input_shape[3]\n )\n return x\n\n def forward(self, image_data, im_info, gt_boxes=None):\n batch_size = image_data.size(0)\n\n conv4_3 = self.conv4_3(image_data)\n conv5_3 = self.conv5_3(conv4_3)\n\n m2_ssh_cls_score, m2_ssh_bbox_pred = self.m2_module(conv5_3)\n\n # M3\n pool6 = self.pool6(conv5_3)\n m3_ssh_cls_score, m3_ssh_bbox_pred = self.m3_module(pool6)\n\n # M 1\n conv4_128 = self.con4_128(conv4_3)\n conv5_128 = self.conv5_128(conv5_3)\n conv5_128_up = self.conv5_128_up(conv5_128)\n\n # crop cove5_128_up to match conv4_128's size\n # NCHW\n conv4_128_height = conv4_128.size()[2]\n conv4_128_width = conv4_128.size()[3]\n\n conv5_128_crop = conv5_128_up[:, :,\n 0:conv4_128_height,\n 0:conv4_128_width]\n\n conv4_fuse = conv5_128_crop + conv4_128\n\n con4_fuse_final = self.con4_fuse_final(conv4_fuse)\n m1_ssh_cls_score, m1_ssh_bbox_pred = self.m1_module(con4_fuse_final)\n\n # generating training labels and build the rpn loss\n if self.training:\n assert gt_boxes is not None\n\n m3_ssh_cls_prob_reshape_OHEM = None\n m2_ssh_cls_prob_reshape_OHEM = None\n m1_ssh_cls_prob_reshape_OHEM = None\n\n if cfg.TRAIN.HARD_POSITIVE_MINING or cfg.TRAIN.HARD_NEGATIVE_MINING:\n m3_ssh_cls_score_reshape_OHEM = self.reshape(m3_ssh_cls_score.detach(), 2)\n m2_ssh_cls_score_reshape_OHEM = self.reshape(m2_ssh_cls_score.detach(), 2)\n m1_ssh_cls_score_reshape_OHEM = self.reshape(m1_ssh_cls_score.detach(), 2)\n\n # softmax\n m3_ssh_cls_prob_output_OHEM = self.m3_soft_max(m3_ssh_cls_score_reshape_OHEM)\n m2_ssh_cls_prob_output_OHEM = self.m2_soft_max(m2_ssh_cls_score_reshape_OHEM)\n m1_ssh_cls_prob_output_OHEM = self.m1_soft_max(m1_ssh_cls_score_reshape_OHEM)\n\n # reshape from (batch,2,2*H,W) back to (batch,4,h,w)\n m3_ssh_cls_prob_reshape_OHEM = self.reshape(m3_ssh_cls_prob_output_OHEM, 4)\n m2_ssh_cls_prob_reshape_OHEM = self.reshape(m2_ssh_cls_prob_output_OHEM, 4)\n m1_ssh_cls_prob_reshape_OHEM = self.reshape(m1_ssh_cls_prob_output_OHEM, 4)\n\n m3_labels, m3_bbox_targets, m3_bbox_inside_weights, m3_bbox_outside_weights = \\\n self.m3_anchor_target_layer(m3_ssh_cls_score, gt_boxes, im_info, m3_ssh_cls_prob_reshape_OHEM)\n\n m2_labels, m2_bbox_targets, m2_bbox_inside_weights, m2_bbox_outside_weights = \\\n self.m2_anchor_target_layer(m2_ssh_cls_score, gt_boxes, im_info, m2_ssh_cls_prob_reshape_OHEM)\n\n m1_labels, m1_bbox_targets, m1_bbox_inside_weights, m1_bbox_outside_weights = \\\n self.m1_anchor_target_layer(m1_ssh_cls_score, gt_boxes, im_info, m1_ssh_cls_prob_reshape_OHEM)\n\n # reshape from (batch,4,h,w) to (batch,2,2*h,w)\n m3_ssh_cls_score_reshape = self.reshape(m3_ssh_cls_score, 2)\n m2_ssh_cls_score_reshape = self.reshape(m2_ssh_cls_score, 2)\n m1_ssh_cls_score_reshape = self.reshape(m1_ssh_cls_score, 2)\n\n # reshape from (batch, 2, 2*h,w) to (batch, 2*h*w,2)\n m3_ssh_cls_score = m3_ssh_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)\n m2_ssh_cls_score = m2_ssh_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)\n m1_ssh_cls_score = m1_ssh_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)\n\n # reshape from (batch,1,2*H,W) to (batch, 2*h*w)\n m3_target_labels = m3_labels.view(batch_size, -1)\n m2_target_labels = m2_labels.view(batch_size, -1)\n m1_target_labels = m1_labels.view(batch_size, -1)\n\n # reshape to (N,C) for cross_entropy loss\n m3_ssh_cls_score = m3_ssh_cls_score.view(-1, 2)\n m2_ssh_cls_score = m2_ssh_cls_score.view(-1, 2)\n m1_ssh_cls_score = m1_ssh_cls_score.view(-1, 2)\n\n # reshape to (N)\n m3_target_labels = m3_target_labels.view(-1).long()\n m2_target_labels = m2_target_labels.view(-1).long()\n m1_target_labels = m1_target_labels.view(-1).long()\n\n # compute bbox classification loss\n m3_ssh_cls_loss = F.cross_entropy(m3_ssh_cls_score, m3_target_labels, ignore_index=-1)\n m2_ssh_cls_loss = F.cross_entropy(m2_ssh_cls_score, m2_target_labels, ignore_index=-1)\n m1_ssh_cls_loss = F.cross_entropy(m1_ssh_cls_score, m1_target_labels, ignore_index=-1)\n\n # compute bbox regression loss\n m3_bbox_loss = net._smooth_l1_loss(m3_ssh_bbox_pred, m3_bbox_targets,\n m3_bbox_inside_weights, m3_bbox_outside_weights, sigma=3, dim=[1, 2, 3])\n m2_bbox_loss = net._smooth_l1_loss(m2_ssh_bbox_pred, m2_bbox_targets,\n m2_bbox_inside_weights, m2_bbox_outside_weights, sigma=3, dim=[1, 2, 3])\n m1_bbox_loss = net._smooth_l1_loss(m1_ssh_bbox_pred, m1_bbox_targets,\n m1_bbox_inside_weights, m1_bbox_outside_weights, sigma=3, dim=[1, 2, 3])\n\n return m3_ssh_cls_loss, m2_ssh_cls_loss, m1_ssh_cls_loss, m3_bbox_loss, m2_bbox_loss, m1_bbox_loss\n else:\n # reshape from (batch,4,h,w) to (batch,2,-1,w)\n m3_ssh_cls_score_reshape = self.reshape(m3_ssh_cls_score, 2)\n m2_ssh_cls_score_reshape = self.reshape(m2_ssh_cls_score, 2)\n m1_ssh_cls_score_reshape = self.reshape(m1_ssh_cls_score, 2)\n\n # softmax\n m3_ssh_cls_prob_output = self.m3_soft_max(m3_ssh_cls_score_reshape)\n m2_ssh_cls_prob_output = self.m2_soft_max(m2_ssh_cls_score_reshape)\n m1_ssh_cls_prob_output = self.m1_soft_max(m1_ssh_cls_score_reshape)\n\n # reshape from (batch,2,2*H,W) back to (batch,4,h,w)\n m3_ssh_cls_prob_reshape = self.reshape(m3_ssh_cls_prob_output, 4)\n m2_ssh_cls_prob_reshape = self.reshape(m2_ssh_cls_prob_output, 4)\n m1_ssh_cls_prob_reshape = self.reshape(m1_ssh_cls_prob_output, 4)\n\n # roi has shape of (batch, top_k, 5)\n # where (batch, top_k, 4) is cls score and\n # (batch, top_k, 0:4) is bbox coordinated\n m3_ssh_roi = self.m3_proposal_layer(m3_ssh_cls_prob_reshape, m3_ssh_bbox_pred, im_info)\n m2_ssh_roi = self.m2_proposal_layer(m2_ssh_cls_prob_reshape, m2_ssh_bbox_pred, im_info)\n m1_ssh_roi = self.m1_proposal_layer(m1_ssh_cls_prob_reshape, m1_ssh_bbox_pred, im_info)\n\n ssh_roi = torch.cat((m3_ssh_roi, m2_ssh_roi, m1_ssh_roi), dim=1)\n # ssh_roi = torch.cat((m3_ssh_roi,), dim=1)\n return ssh_roi\n", "# matplotlib prefer BGR while Matplotlib like RGB\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nimg = cv2.imread('E:/Mory/rose.jpg')\nb, g, r = cv2.split(img)\nimg2 = cv2.merge([r, g, b])\nplt.subplot(121)\nplt.imshow(img)\nplt.subplot(122)\nplt.imshow(img2)\nplt.show()\n\n\ncv2.imshow(\"bgr image\", img)\ncv2.imshow(\"rgb image\", img2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n", "# memory file\nimport cv2\nimport rtpose\nimport numpy as np\n\n\n# img = '../imgs/fat.jpg'\n# img = 'wei.jpg'\n# img = '../imgs/bress.jpg'\nimg = '../imgs/girl.jpg'\n\nim = cv2.imread(img)\nif im is None:\n raise \"Image is None\"\n\n\njoint = rtpose.rtpose.KeyPointParams.joint\ncanvans, jsonfile = rtpose.estimation(img)\npoints = jsonfile['people'][0]\n\ndef get_XY(points):\n neck = points[joint.Neck * 3], points[joint.Neck * 3 + 1]\n lshoulder = points[joint.LShoulder * 3], points[joint.LShoulder * 3 + 1]\n rshoulder = points[joint.RShoulder * 3], points[joint.RShoulder * 3 + 1]\n lhip = points[joint.LHip * 3], points[joint.LHip * 3 + 1] \n rhip = points[joint.RHip * 3], points[joint.RHip * 3 + 1] \n lknee = points[joint.LKnee * 3], points[joint.LKnee * 3 + 1]\n rknee = points[joint.RKnee * 3], points[joint.RKnee * 3 + 1]\n lankle = points[joint.LAnkle * 3], points[joint.LAnkle * 3 + 1]\n rankle = points[joint.RAnkle * 3], points[joint.RAnkle * 3 + 1]\n return neck, lshoulder, rshoulder, lhip, rhip, lknee, rknee, lankle, rankle\n\nparts = get_XY(points)\n\n# for part in parts:\n# cv2.circle(im, part, 4, (255, 128, 200), thickness=4)\n\n# cv2.imwrite(\"wei.jpg\", im)\n\ndef midpoint(src, dst, ratio):\n \"\"\"x, y in image format\"\"\"\n xspace = np.linspace(src[0], dst[0], 11)\n yspace = np.linspace(src[1], dst[1], 11)\n return [int(xspace[ratio]), int(yspace[ratio])]\n \n\n\ndef slim_Waist(parts, offset):\n neck, lshoulder, rshoulder, lhip, rhip = parts[0:5]\n vd = lhip[0] - rhip[0]\n hd = lshoulder[1] - lhip[1]\n dst_lwaist = midpoint(lshoulder, lhip, 8)\n dst_rwaist = midpoint(rshoulder, rhip, 8)\n src_lwaist = int(dst_lwaist[0] + vd*offset), dst_lwaist[1]+1\n src_rwaist = int(dst_rwaist[0] - vd*offset), dst_rwaist[1]+1\n\n src = [neck,\n src_lwaist, src_rwaist]\n dst = [neck,\n dst_lwaist, dst_rwaist]\n return src, dst\n\npoints = slim_Waist(parts, 0.3)\n\nimport pickle\nwith open(\"points.pkl\", 'wb') as f:\n pickle.dump(points, f)\n\nfor s, d in zip(*points):\n cv2.circle(im, tuple(s), 10, (0, 255, 0), thickness=4)\n cv2.circle(im, tuple(d), 4, (0, 0, 255), thickness=4)\ncv2.imwrite(\"newimg.png\", im)\n\n", "# -*- coding: utf-8 -*-\nimport os\nimport numpy as np\nfrom PIL import Image\n\nfrom config import FLAGS\n\ndef text2int(text, wordset):\n return wordset.index(text)\n\n\ndef int2text(index, wordset):\n return wordset[index]\n\ndef black2red(img):\n data = np.array(img)\n n_rows, n_columns, _ = data.shape\n for i in range(n_rows):\n for j in range(n_columns):\n R, G, B = data[i][j]\n if R < 255 or G < 255 or B < 255:\n data[i][j] = [255, 0, 0]\n img = Image.fromarray(data)\n return img\n\ndef get_X(path, size, random_red=False):\n \"\"\"resize, convert to gray, flatten and normalizes\n random_red: random change the image to red.\n \"\"\"\n img = Image.open(path)\n\n if random_red:\n img = black2red(img) if np.random.randint else img\n\n img = img.resize(size).convert('L')\n img = np.array(img).flatten() / 255\n return img\n\n\ndef get_Y(path, wordset):\n \"\"\"assume captche text is at the beginning of path\n \"\"\"\n basename = os.path.basename(path)\n text = basename[0]\n vec = text2int(text, wordset)\n return vec\n\n\ndef data_iterator(data_dir, batch_size, num_epochs):\n \"\"\"iterate around data\n data_dir: data directory, allow sub directory exists\n \"\"\"\n data = [os.path.join(dir, f) for dir, _, files in\n os.walk(data_dir) for f in files]\n data = np.array(data)\n data_size = len(data)\n num_batches_per_epoch = int((len(data)-1)/batch_size) + 1\n for _ in range(num_epochs):\n indices = np.random.permutation(len(data))\n shuffled_data = data[indices]\n for batch_idx in range(num_batches_per_epoch):\n start_index = batch_idx * batch_size\n end_index = min((batch_idx + 1) * batch_size, data_size)\n yield shuffled_data[start_index:end_index]\n\n# most ugly function\ndef train_data_iterator():\n size = (FLAGS.image_size, FLAGS.image_size)\n data_iter = data_iterator(FLAGS.train_data_dir, FLAGS.batch_size, FLAGS.num_epochs)\n for data in data_iter:\n X = [get_X(datum, size, random_red=True) for datum in data]\n y = [get_Y(datum, FLAGS.wordset) for datum in data]\n yield X, y\n\n# most ugly function\ndef test_data_helper(batch_size=None):\n size = (FLAGS.image_size, FLAGS.image_size)\n data = [os.path.join(dir, f) for dir, _, files in\n os.walk(FLAGS.test_data_dir) for f in files]\n\n if batch_size is not None:\n np.random.shuffle(data)\n data = data[:batch_size]\n X = [get_X(datum, size) for datum in data]\n y = [get_Y(datum, FLAGS.wordset) for datum in data]\n return X, y", "import cv2\nimport numpy as np\n\nfrom .models import load_model\nfrom .utils import crop_face, draw_face\nfrom .pcn import pcn_detect\n\n\nnets = load_model()\n\ndef detect(img):\n if type(img) == str:\n img = cv2.imread(img)\n winlist = pcn_detect(img, nets)\n return winlist\n\ndef crop(img, winlist, size=200):\n \"\"\"\n Returns:\n list of [face, location] pairs\n \"\"\"\n faces = list(map(lambda win: crop_face(img, win, size), winlist))\n return faces\n\ndef draw(img, winlist):\n list(map(lambda win: draw_face(img, win), winlist))\n return img\n\ndef show(img, is_crop=False):\n img = cv2.imread(img)\n winlist = detect(img)\n if is_crop:\n faces = crop(img, winlist)\n faces = [f[0] for f in faces] # ignore location\n img = np.hstack(faces)\n else:\n draw(img, winlist)\n cv2.imshow(\"Show\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n" ]
[ [ "pandas.DataFrame" ], [ "torch.nn.Softmax", "torch.nn.ConvTranspose2d", "torch.cat", "torch.nn.functional.cross_entropy", "torch.nn.MaxPool2d", "numpy.array" ], [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show" ], [ "numpy.linspace" ], [ "numpy.array", "numpy.random.shuffle" ], [ "numpy.hstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
atmacvit/meronymnet
[ "47e1a7caadc0f770439bb26a93b885f790f62804", "47e1a7caadc0f770439bb26a93b885f790f62804", "47e1a7caadc0f770439bb26a93b885f790f62804", "47e1a7caadc0f770439bb26a93b885f790f62804" ]
[ "Meronymnet/arch/label2obj/util/util.py", "baselines/scripts/segvae/models/networks/normalization.py", "baselines/scripts/layout2im/data/custom_dataloader.py", "baselines/scripts/lostgans/data/custom_loader.py" ]
[ "\"\"\"\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\n\nimport re\nimport importlib\nimport torch\nfrom argparse import Namespace\nimport numpy as np\nfrom PIL import Image\nimport os\nimport argparse\nimport dill as pickle\nimport util.coco\n\n\ndef save_obj(obj, name):\n with open(name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(name):\n with open(name, 'rb') as f:\n return pickle.load(f)\n\n# returns a configuration for creating a generator\n# |default_opt| should be the opt of the current experiment\n# |**kwargs|: if any configuration should be overriden, it can be specified here\n\n\ndef copyconf(default_opt, **kwargs):\n conf = argparse.Namespace(**vars(default_opt))\n for key in kwargs:\n print(key, kwargs[key])\n setattr(conf, key, kwargs[key])\n return conf\n\n\ndef tile_images(imgs, picturesPerRow=4):\n \"\"\" Code borrowed from\n https://stackoverflow.com/questions/26521365/cleanly-tile-numpy-array-of-images-stored-in-a-flattened-1d-format/26521997\n \"\"\"\n\n # Padding\n if imgs.shape[0] % picturesPerRow == 0:\n rowPadding = 0\n else:\n rowPadding = picturesPerRow - imgs.shape[0] % picturesPerRow\n if rowPadding > 0:\n imgs = np.concatenate([imgs, np.zeros((rowPadding, *imgs.shape[1:]), dtype=imgs.dtype)], axis=0)\n\n # Tiling Loop (The conditionals are not necessary anymore)\n tiled = []\n for i in range(0, imgs.shape[0], picturesPerRow):\n tiled.append(np.concatenate([imgs[j] for j in range(i, i + picturesPerRow)], axis=1))\n\n tiled = np.concatenate(tiled, axis=0)\n return tiled\n\n\n# Converts a Tensor into a Numpy array\n# |imtype|: the desired type of the converted numpy array\ndef tensor2im(image_tensor, imtype=np.uint8, normalize=True, tile=False):\n if isinstance(image_tensor, list):\n image_numpy = []\n for i in range(len(image_tensor)):\n image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))\n return image_numpy\n\n if image_tensor.dim() == 4:\n # transform each image in the batch\n images_np = []\n for b in range(image_tensor.size(0)):\n one_image = image_tensor[b]\n one_image_np = tensor2im(one_image)\n images_np.append(one_image_np.reshape(1, *one_image_np.shape))\n images_np = np.concatenate(images_np, axis=0)\n if tile:\n images_tiled = tile_images(images_np)\n return images_tiled\n else:\n return images_np\n\n if image_tensor.dim() == 2:\n image_tensor = image_tensor.unsqueeze(0)\n image_numpy = image_tensor.detach().cpu().float().numpy()\n if normalize:\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\n else:\n image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0\n image_numpy = np.clip(image_numpy, 0, 255)\n if image_numpy.shape[2] == 1:\n image_numpy = image_numpy[:, :, 0]\n return image_numpy.astype(imtype)\n\n\n# Converts a one-hot tensor into a colorful label map\ndef tensor2label(label_tensor, n_label, imtype=np.uint8, tile=False):\n if label_tensor.dim() == 4:\n # transform each image in the batch\n images_np = []\n for b in range(label_tensor.size(0)):\n one_image = label_tensor[b]\n one_image_np = tensor2label(one_image, n_label, imtype)\n images_np.append(one_image_np.reshape(1, *one_image_np.shape))\n images_np = np.concatenate(images_np, axis=0)\n if tile:\n images_tiled = tile_images(images_np)\n return images_tiled\n else:\n images_np = images_np[0]\n return images_np\n\n if label_tensor.dim() == 1:\n return np.zeros((64, 64, 3), dtype=np.uint8)\n if n_label == 0:\n return tensor2im(label_tensor, imtype)\n label_tensor = label_tensor.cpu().float()\n if label_tensor.size()[0] > 1:\n label_tensor = label_tensor.max(0, keepdim=True)[1]\n label_tensor = Colorize(n_label)(label_tensor)\n label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))\n result = label_numpy.astype(imtype)\n return result\n\n\ndef save_image(image_numpy, image_path, create_dir=False):\n if create_dir:\n os.makedirs(os.path.dirname(image_path), exist_ok=True)\n if len(image_numpy.shape) == 2:\n image_numpy = np.expand_dims(image_numpy, axis=2)\n if image_numpy.shape[2] == 1:\n image_numpy = np.repeat(image_numpy, 3, 2)\n image_pil = Image.fromarray(image_numpy)\n\n # save to png\n image_pil.save(image_path.replace('.jpg', '.png'))\n\n\ndef mkdirs(paths):\n if isinstance(paths, list) and not isinstance(paths, str):\n for path in paths:\n mkdir(path)\n else:\n mkdir(paths)\n\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\ndef natural_keys(text):\n '''\n alist.sort(key=natural_keys) sorts in human order\n http://nedbatchelder.com/blog/200712/human_sorting.html\n (See Toothy's implementation in the comments)\n '''\n return [atoi(c) for c in re.split('(\\d+)', text)]\n\n\ndef natural_sort(items):\n items.sort(key=natural_keys)\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef find_class_in_module(target_cls_name, module):\n target_cls_name = target_cls_name.replace('_', '').lower()\n clslib = importlib.import_module(module)\n cls = None\n for name, clsobj in clslib.__dict__.items():\n if name.lower() == target_cls_name:\n cls = clsobj\n\n if cls is None:\n print(\"In %s, there should be a class whose name matches %s in lowercase without underscore(_)\" % (module, target_cls_name))\n exit(0)\n\n return cls\n\n\ndef save_network(net, label, epoch, opt):\n save_filename = '%s_net_%s.pth' % (epoch, label)\n save_path = os.path.join(opt.checkpoints_dir, opt.name, save_filename)\n torch.save(net.cpu().state_dict(), save_path)\n if len(opt.gpu_ids) and torch.cuda.is_available():\n net.cuda()\n\n\ndef load_network(net, label, epoch, opt):\n save_filename = '%s_net_%s.pth' % (epoch, label)\n save_dir = os.path.join(opt.checkpoints_dir, opt.name)\n save_path = os.path.join(save_dir, save_filename)\n weights = torch.load(save_path)\n net.load_state_dict(weights)\n return net\n\n\n###############################################################################\n# Code from\n# https://github.com/ycszen/pytorch-seg/blob/master/transform.py\n# Modified so it complies with the Citscape label map colors\n###############################################################################\ndef uint82bin(n, count=8):\n \"\"\"returns the binary of integer n, count refers to amount of bits\"\"\"\n return ''.join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])\n\n\ndef labelcolormap(N):\n if N == 35: # cityscape\n cmap = np.array([(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (111, 74, 0), (81, 0, 81),\n (128, 64, 128), (244, 35, 232), (250, 170, 160), (230, 150, 140), (70, 70, 70), (102, 102, 156), (190, 153, 153),\n (180, 165, 180), (150, 100, 100), (150, 120, 90), (153, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0),\n (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),\n (0, 60, 100), (0, 0, 90), (0, 0, 110), (0, 80, 100), (0, 0, 230), (119, 11, 32), (0, 0, 142)],\n dtype=np.uint8)\n else:\n cmap = np.zeros((N, 3), dtype=np.uint8)\n for i in range(N):\n r, g, b = 0, 0, 0\n id = i + 1 # let's give 0 a color\n for j in range(7):\n str_id = uint82bin(id)\n r = r ^ (np.uint8(str_id[-1]) << (7 - j))\n g = g ^ (np.uint8(str_id[-2]) << (7 - j))\n b = b ^ (np.uint8(str_id[-3]) << (7 - j))\n id = id >> 3\n cmap[i, 0] = r\n cmap[i, 1] = g\n cmap[i, 2] = b\n\n if N == 182: # COCO\n important_colors = {\n 'sea': (54, 62, 167),\n 'sky-other': (95, 219, 255),\n 'tree': (140, 104, 47),\n 'clouds': (170, 170, 170),\n 'grass': (29, 195, 49)\n }\n for i in range(N):\n name = util.coco.id2label(i)\n if name in important_colors:\n color = important_colors[name]\n cmap[i] = np.array(list(color))\n\n return cmap\n\n\nclass Colorize(object):\n def __init__(self, n=35):\n self.cmap = labelcolormap(n)\n self.cmap = torch.from_numpy(self.cmap[:n])\n\n def __call__(self, gray_image):\n size = gray_image.size()\n color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)\n\n for label in range(0, len(self.cmap)):\n mask = (label == gray_image[0]).cpu()\n color_image[0][mask] = self.cmap[label][0]\n color_image[1][mask] = self.cmap[label][1]\n color_image[2][mask] = self.cmap[label][2]\n\n return color_image\n\n\n\n", "import re\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.networks.sync_batchnorm import SynchronizedBatchNorm2d\nimport torch.nn.utils.spectral_norm as spectral_norm\n\n# Returns a function that creates a normalization function\n# that does not condition on semantic map\ndef get_nonspade_norm_layer(opt, norm_type='instance'):\n # helper function to get # output channels of the previous layer\n def get_out_channel(layer):\n if hasattr(layer, 'out_channels'):\n return getattr(layer, 'out_channels')\n return layer.weight.size(0)\n\n # this function will be returned\n def add_norm_layer(layer):\n nonlocal norm_type\n if norm_type.startswith('spectral'):\n layer = spectral_norm(layer)\n subnorm_type = norm_type[len('spectral'):]\n\n if subnorm_type == 'none' or len(subnorm_type) == 0:\n return layer\n\n # remove bias in the previous layer, which is meaningless\n # since it has no effect after normalization\n if getattr(layer, 'bias', None) is not None:\n delattr(layer, 'bias')\n layer.register_parameter('bias', None)\n\n if subnorm_type == 'batch':\n norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)\n elif subnorm_type == 'sync_batch':\n norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)\n elif subnorm_type == 'instance':\n norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)\n else:\n raise ValueError('normalization layer %s is not recognized' % subnorm_type)\n\n return nn.Sequential(layer, norm_layer)\n\n return add_norm_layer\n\nclass SPADE(nn.Module):\n def __init__(self, config_text, norm_nc, label_nc):\n super().__init__()\n\n assert config_text.startswith('spade')\n parsed = re.search('spade(\\D+)(\\d)x\\d', config_text)\n param_free_norm_type = str(parsed.group(1))\n ks = int(parsed.group(2))\n\n if param_free_norm_type == 'instance':\n self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)\n elif param_free_norm_type == 'syncbatch':\n self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)\n elif param_free_norm_type == 'batch':\n self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)\n else:\n raise ValueError('%s is not a recognized param-free norm type in SPADE'\n % param_free_norm_type)\n\n # The dimension of the intermediate embedding space. Yes, hardcoded.\n nhidden = 128\n\n pw = ks // 2\n self.mlp_shared = nn.Sequential(\n nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),\n nn.ReLU()\n )\n self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)\n self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)\n\n def forward(self, x, segmap):\n\n # Part 1. generate parameter-free normalized activations\n normalized = self.param_free_norm(x)\n\n # Part 2. produce scaling and bias conditioned on semantic map\n segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')\n actv = self.mlp_shared(segmap)\n gamma = self.mlp_gamma(actv)\n beta = self.mlp_beta(actv)\n\n # apply scale and bias\n out = normalized * (1 + gamma) + beta\n\n return out\n", "#!/usr/bin/python\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json, os, random\nfrom collections import defaultdict\n\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as T\nfrom data.preprocess import *\nimport PIL\nfrom PIL import Image\nfrom utils.data import imagenet_preprocess, Resize\nfrom torch.utils.data import DataLoader\n\n\nclass CustomDataset(Dataset):\n def __init__(self, image_dir, instances_json, stuff_json=None,\n stuff_only=False, image_size=(64, 64), mask_size=16,\n normalize_images=False, max_samples=None,\n include_relationships=True, min_object_size=0.02,\n min_objects_per_image=3, max_objects_per_image=8,\n include_other=False, instance_whitelist=None, stuff_whitelist=None, data_mode='train'):\n\n\n \"\"\"\n A PyTorch Dataset for loading Coco and Coco-Stuff annotations and converting\n them to scene graphs on the fly.\n\n Inputs:\n - image_dir: Path to a directory where images are held\n - instances_json: Path to a JSON file giving COCO annotations\n - stuff_json: (optional) Path to a JSON file giving COCO-Stuff annotations\n - stuff_only: (optional, default True) If True then only iterate over\n images which appear in stuff_json; if False then iterate over all images\n in instances_json.\n - image_size: Size (H, W) at which to load images. Default (64, 64).\n - mask_size: Size M for object segmentation masks; default 16.\n - normalize_image: If True then normalize images by subtracting ImageNet\n mean pixel and dividing by ImageNet std pixel.\n - max_samples: If None use all images. Other wise only use images in the\n range [0, max_samples). Default None.\n - include_relationships: If True then include spatial relationships; if\n False then only include the trivial __in_image__ relationship.\n - min_object_size: Ignore objects whose bounding box takes up less than\n this fraction of the image.\n - min_objects_per_image: Ignore images which have fewer than this many\n object annotations.\n - max_objects_per_image: Ignore images which have more than this many\n object annotations.\n - include_other: If True, include COCO-Stuff annotations which have category\n \"other\". Default is False, because I found that these were really noisy\n and pretty much impossible for the system to model.\n - instance_whitelist: None means use all instance categories. Otherwise a\n list giving a whitelist of instance category names to use.\n - stuff_whitelist: None means use all stuff categories. Otherwise a list\n giving a whitelist of stuff category names to use.\n \"\"\"\n super(Dataset, self).__init__()\n\n class_names = ['cow', 'person', 'sheep', 'bird', 'cat', 'dog', 'horse', 'aeroplane', 'motorbike', 'bicycle']\n data_dict = preprocess_make_data(class_names, data_mode)\n self.data_dict = data_dict\n\n if stuff_only and stuff_json is None:\n print('WARNING: Got stuff_only=True but stuff_json=None.')\n print('Falling back to stuff_only=False.')\n\n self.image_dir = image_dir\n self.mask_size = mask_size\n self.max_samples = max_samples\n self.normalize_images = normalize_images\n self.include_relationships = include_relationships\n self.set_image_size(image_size)\n\n #with open(instances_json, 'r') as f:\n # instances_data = json.load(f)\n\n #stuff_data = None\n # if stuff_json is not None and stuff_json != '':\n # # with open(stuff_json, 'r') as f:\n # # stuff_data = json.load(f)\n #\n # self.image_ids = []\n # self.image_id_to_filename = {}\n # self.image_id_to_size = {}\n #for image_data in instances_data['images']:\n # image_id = image_data['id']\n # filename = image_data['file_name']\n # width = image_data['width']\n # height = image_data['height']\n # self.image_ids.append(image_id)\n # self.image_id_to_filename[image_id] = filename\n # self.image_id_to_size[image_id] = (width, height)\n\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n # object_idx_to_name = {}\n # all_instance_categories = []\n # for category_data in instances_data['categories']:\n # category_id = category_data['id']\n # category_name = category_data['name']\n # all_instance_categories.append(category_name)\n # object_idx_to_name[category_id] = category_name\n # self.vocab['object_name_to_idx'][category_name] = category_id\n all_stuff_categories = []\n #if stuff_data:\n # for category_data in stuff_data['categories']:\n # category_name = category_data['name']\n # category_id = category_data['id']\n # all_stuff_categories.append(category_name)\n # object_idx_to_name[category_id] = category_name\n # self.vocab['object_name_to_idx'][category_name] = category_id\n\n #if instance_whitelist is None:\n # instance_whitelist = all_instance_categories\n #if stuff_whitelist is None:\n # stuff_whitelist = all_stuff_categories\n #category_whitelist = set(instance_whitelist) | set(stuff_whitelist)\n\n # Add object data from instances\n #self.image_id_to_objects = defaultdict(list)\n # for object_data in instances_data['annotations']:\n # image_id = object_data['image_id']\n # _, _, w, h = object_data['bbox']\n # W, H = self.image_id_to_size[image_id]\n # box_area = (w * h) / (W * H)\n # box_ok = box_area > min_object_size\n # object_name = object_idx_to_name[object_data['category_id']]\n # category_ok = object_name in category_whitelist\n # other_ok = object_name != 'other' or include_other\n # if box_ok and category_ok and other_ok:\n # self.image_id_to_objects[image_id].append(object_data)\n\n # Add object data from stuff\n #if stuff_data:\n # image_ids_with_stuff = set()\n # for object_data in stuff_data['annotations']:\n # image_id = object_data['image_id']\n # image_ids_with_stuff.add(image_id)\n # _, _, w, h = object_data['bbox']\n # W, H = self.image_id_to_size[image_id]\n # box_area = (w * h) / (W * H)\n # box_ok = box_area > min_object_size\n # object_name = object_idx_to_name[object_data['category_id']]\n # category_ok = object_name in category_whitelist\n # other_ok = object_name != 'other' or include_other\n # if box_ok and category_ok and other_ok:\n # self.image_id_to_objects[image_id].append(object_data)\n # if stuff_only:\n # new_image_ids = []\n # for image_id in self.image_ids:\n # if image_id in image_ids_with_stuff:\n # new_image_ids.append(image_id)\n # self.image_ids = new_image_ids\n\n # all_image_ids = set(self.image_id_to_filename.keys())\n # image_ids_to_remove = all_image_ids - image_ids_with_stuff\n # for image_id in image_ids_to_remove:\n # self.image_id_to_filename.pop(image_id, None)\n # self.image_id_to_size.pop(image_id, None)\n # self.image_id_to_objects.pop(image_id, None)\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n # name_to_idx = self.vocab['object_name_to_idx']\n # assert len(name_to_idx) == len(set(name_to_idx.values()))\n # max_object_idx = max(name_to_idx.values())\n # idx_to_name = ['NONE'] * (1 + max_object_idx)\n # for name, idx in self.vocab['object_name_to_idx'].items():\n # idx_to_name[idx] = name\n # self.vocab['object_idx_to_name'] = idx_to_name\n # self.num_objects = len(self.vocab['object_idx_to_name'])\n\n # Prune images that have too few or too many objects\n # new_image_ids = []\n # total_objs = 0\n # for image_id in self.image_ids:\n # num_objs = len(self.image_id_to_objects[image_id])\n # total_objs += num_objs\n # if min_objects_per_image <= num_objs <= max_objects_per_image:\n # new_image_ids.append(image_id)\n # self.image_ids = new_image_ids\n\n # self.vocab['pred_idx_to_name'] = [\n # '__in_image__',\n # 'left of',\n # 'right of',\n # 'above',\n # 'below',\n # 'inside',\n # 'surrounding',\n # ]\n # self.vocab['pred_name_to_idx'] = {}\n # for idx, name in enumerate(self.vocab['pred_idx_to_name']):\n # self.vocab['pred_name_to_idx'][name] = idx\n\n self.num_objects=25\n def set_image_size(self, image_size):\n print('called set_image_size', image_size)\n transform = [Resize(image_size), T.ToTensor()]\n if self.normalize_images:\n transform.append(imagenet_preprocess())\n self.transform = T.Compose(transform)\n self.image_size = image_size\n\n def total_objects(self):\n total_objs = 0\n for id in self.data_dict.keys():\n total_objs = total_objs + self.data_dict[id]['boxes'].shape[0]\n return total_objs\n # for i, image_id in enumerate(self.image_ids):\n # if self.max_samples and i >= self.max_samples:\n # break\n # num_objs = len(self.image_id_to_objects[image_id])\n # # total_objs += num_objs\n # # return total_objs\n\n def __len__(self):\n # if self.max_samples is None:\n # return len(self.image_ids)\n # return min(len(self.image_ids), self.max_samples)\n return len(self.data_dict.keys())\n\n def __getitem__(self, index):\n \"\"\"\n Get the pixels of an image, and a random synthetic scene graph for that\n image constructed on-the-fly from its COCO object annotations. We assume\n that the image will have height H, width W, C channels; there will be O\n object annotations, each of which will have both a bounding box and a\n segmentation mask of shape (M, M). There will be T triples in the scene\n graph.\n\n Returns a tuple of:\n - image: FloatTensor of shape (C, H, W)\n - objs: LongTensor of shape (O,)\n - boxes: FloatTensor of shape (O, 4) giving boxes for objects in\n (x0, y0, x1, y1) format, in a [0, 1] coordinate system\n - masks: LongTensor of shape (O, M, M) giving segmentation masks for\n objects, where 0 is background and 1 is object.\n - triples: LongTensor of shape (T, 3) where triples[t] = [i, p, j]\n means that (objs[i], p, objs[j]) is a triple.\n \"\"\"\n image_id = str(index)\n\n # filename = self.image_id_to_filename[image_id]\n # image_path = os.path.join(self.image_dir, filename)\n #with open(image_path, 'rb') as f:\n # with PIL.Image.open(f) as image:\n # WW, HH = image.size\n # image = self.transform(image.convert('RGB'))\n img = self.data_dict[image_id]['image']\n img = Image.fromarray(np.uint8(img))\n #img.save(image_id + '_.png')\n WW, HH = img.size\n image = self.transform(img)\n bbxs = self.data_dict[image_id]['boxes'].tolist()\n class_name = self.data_dict[image_id]['class']\n H, W = self.image_size\n objs, boxes, masks, obj_to_cls = [], [], [], []\n for bbx in bbxs:\n x0, y0, x1, y1 = bbx\n #x0 = x0 * 550.0/ WW\n #y0 = y0 * 550.0/ HH\n #x1 = x1 * 550.0/ WW\n #y1 = y1 * 550.0/ HH\n boxes.append(torch.FloatTensor([x0, y0, x1, y1]))\n\n # This will give a numpy array of shape (HH, WW)\n mask = torch.zeros(1, H, W)\n # mask = seg_to_mask(object_data['segmentation'], WW, HH)\n mask[:, round(y0 * H):max(round(y0 * H)+1, round(y1 * H)), round(x0 * W):max(round(x0 * W)+1, round(x1 * W))] = 1\n\n # Crop the mask according to the bounding box, being careful to\n # ensure that we don't crop a zero-area region\n # mx0, mx1 = int(round(x)), int(round(x + w))\n # my0, my1 = int(round(y)), int(round(y + h))\n # mx1 = max(mx0 + 1, mx1)\n # my1 = max(my0 + 1, my1)\n # mask = mask[my0:my1, mx0:mx1]\n # mask = imresize(255 * mask, (self.mask_size, self.mask_size),\n # mode='constant')\n # mask = torch.from_numpy((mask > 128).astype(np.int64))\n masks.append(mask)\n obj_to_cls.append(torch.from_numpy(class_name))\n for obj in self.data_dict[image_id]['labels']:\n objs.append(obj)\n\n # Add dummy __image__ object\n # objs.append(self.vocab['object_name_to_idx']['__image__'])\n # boxes.append(torch.FloatTensor([0, 0, 1, 1]))\n # masks.append(torch.ones(self.mask_size, self.mask_size).long())\n print(objs)\n # shuffle objs\n O = len(objs)\n rand_idx = list(range(O))\n random.shuffle(rand_idx)\n\n objs = [objs[i] for i in rand_idx]\n boxes = [boxes[i] for i in rand_idx]\n masks = [masks[i] for i in rand_idx]\n obj_to_cls = [obj_to_cls[i] for i in rand_idx]\n\n objs = torch.LongTensor(objs)\n boxes = torch.stack(boxes, dim=0)\n masks = torch.stack(masks, dim=0)\n obj_to_cls = torch.stack(obj_to_cls, dim=0)\n\n # box_areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n # Compute centers of all objects\n # obj_centers = []\n # _, MH, MW = masks.size()\n # for i, obj_idx in enumerate(objs):\n # x0, y0, x1, y1 = boxes[i]\n # mask = (masks[i] == 1)\n # xs = torch.linspace(x0, x1, MW).view(1, MW).expand(MH, MW)\n # ys = torch.linspace(y0, y1, MH).view(MH, 1).expand(MH, MW)\n # if mask.sum() == 0:\n # mean_x = 0.5 * (x0 + x1)\n # mean_y = 0.5 * (y0 + y1)\n # else:\n # mean_x = xs[mask].mean()\n # mean_y = ys[mask].mean()\n # obj_centers.append([mean_x, mean_y])\n # obj_centers = torch.FloatTensor(obj_centers)\n #\n # # Add triples\n # triples = []\n # num_objs = objs.size(0)\n # __image__ = self.vocab['object_name_to_idx']['__image__']\n # real_objs = []\n # if num_objs > 1:\n # real_objs = (objs != __image__).nonzero().squeeze(1)\n # for cur in real_objs:\n # choices = [obj for obj in real_objs if obj != cur]\n # if len(choices) == 0 or not self.include_relationships:\n # break\n # other = random.choice(choices)\n # if random.random() > 0.5:\n # s, o = cur, other\n # else:\n # s, o = other, cur\n #\n # # Check for inside / surrounding\n # sx0, sy0, sx1, sy1 = boxes[s]\n # ox0, oy0, ox1, oy1 = boxes[o]\n # d = obj_centers[s] - obj_centers[o]\n # theta = math.atan2(d[1], d[0])\n #\n # if sx0 < ox0 and sx1 > ox1 and sy0 < oy0 and sy1 > oy1:\n # p = 'surrounding'\n # elif sx0 > ox0 and sx1 < ox1 and sy0 > oy0 and sy1 < oy1:\n # p = 'inside'\n # elif theta >= 3 * math.pi / 4 or theta <= -3 * math.pi / 4:\n # p = 'left of'\n # elif -3 * math.pi / 4 <= theta < -math.pi / 4:\n # p = 'above'\n # elif -math.pi / 4 <= theta < math.pi / 4:\n # p = 'right of'\n # elif math.pi / 4 <= theta < 3 * math.pi / 4:\n # p = 'below'\n # p = self.vocab['pred_name_to_idx'][p]\n # triples.append([s, p, o])\n\n # Add __in_image__ triples\n # in_image = self.vocab['pred_name_to_idx']['__in_image__']\n # for i in range(O - 1):\n # triples.append([i, in_image, O - 1])\n #\n # triples = torch.LongTensor(triples)\n\n return image, objs, boxes, masks, obj_to_cls, torch.from_numpy(class_name)\n\n\n# def seg_to_mask(seg, width=1.0, height=1.0):\n# \"\"\"\n# Tiny utility for decoding segmentation masks using the pycocotools API.\n# \"\"\"\n# if type(seg) == list:\n# rles = mask_utils.frPyObjects(seg, height, width)\n# rle = mask_utils.merge(rles)\n# elif type(seg['counts']) == list:\n# rle = mask_utils.frPyObjects(seg, height, width)\n# else:\n# rle = seg\n# return mask_utils.decode(rle)\n\n\ndef custom_collate_fn(batch):\n \"\"\"\n Collate function to be used when wrapping CocoSceneGraphDataset in a\n DataLoader. Returns a tuple of the following:\n\n - imgs: FloatTensor of shape (N, C, H, W)\n - objs: LongTensor of shape (O,) giving object categories\n - boxes: FloatTensor of shape (O, 4)\n - masks: FloatTensor of shape (O, M, M)\n - triples: LongTensor of shape (T, 3) giving triples\n - obj_to_img: LongTensor of shape (O,) mapping objects to images\n - triple_to_img: LongTensor of shape (T,) mapping triples to images\n \"\"\"\n all_imgs, all_classes, all_objs, all_boxes, all_masks, all_obj_to_cls, all_obj_to_img = [], [], [], [], [], [], []\n\n for i, (img, objs, boxes, masks, obj_to_cls, class_name) in enumerate(batch):\n all_imgs.append(img[None])\n O = objs.size(0)\n all_objs.append(objs)\n all_boxes.append(boxes)\n all_masks.append(masks)\n all_classes.append(class_name[None])\n all_obj_to_cls.append(obj_to_cls)\n all_obj_to_img.append(torch.LongTensor(O).fill_(i))\n\n all_imgs = torch.cat(all_imgs)\n all_objs = torch.cat(all_objs)\n all_boxes = torch.cat(all_boxes)\n all_masks = torch.cat(all_masks)\n all_classes = torch.cat(all_classes)\n all_obj_to_cls = torch.cat(all_obj_to_cls)\n all_obj_to_img = torch.cat(all_obj_to_img)\n\n out = (all_imgs, all_objs, all_boxes, all_masks, all_classes, all_obj_to_cls, all_obj_to_img)\n print(\"loader obj to class \" + str(all_obj_to_cls.shape))\n print(\"loader class shape\" + str((all_classes).shape))\n\n return out\n\n\ndef get_dataloader(batch_size=10, CUSTOM_DIR='/home/zhaobo/Data/coco', instance_whitelist=None, stuff_whitelist=None, coco_include_other=False):\n coco_train_image_dir = None#os.path.join(COCO_DIR, 'images/val2017')\n coco_val_image_dir = None#os.path.join(COCO_DIR, 'images/val2017')\n coco_train_instances_json = None#os.path.join(COCO_DIR, 'annotations/instances_val2017.json')\n coco_train_stuff_json = None#os.path.join(COCO_DIR, 'annotations/stuff_train2017.json')\n coco_val_instances_json = None#os.path.join(COCO_DIR, 'annotations/instances_val2017.json')\n coco_val_stuff_json = None#os.path.join(COCO_DIR, 'annotations/stuff_val2017.json')\n min_object_size = 0.02\n min_objects_per_image = 3\n coco_stuff_only = False\n mask_size = 32\n\n image_size = (128, 128)\n num_train_samples = None\n num_val_samples = None\n include_relationships = False\n batch_size = batch_size\n shuffle_val = False\n\n # build datasets\n dset_kwargs = {\n 'image_dir': coco_train_image_dir,\n 'instances_json': coco_train_instances_json,\n 'stuff_json': coco_train_stuff_json,\n 'stuff_only': coco_stuff_only,\n 'image_size': image_size,\n 'mask_size': mask_size,\n 'max_samples': num_train_samples,\n 'min_object_size': min_object_size,\n 'min_objects_per_image': min_objects_per_image,\n 'instance_whitelist': instance_whitelist,\n 'stuff_whitelist': stuff_whitelist,\n 'include_other': coco_include_other,\n 'include_relationships': include_relationships,\n 'data_mode': 'train',\n }\n train_dset = CustomDataset(**dset_kwargs)\n num_objs = train_dset.total_objects()\n num_imgs = len(train_dset)\n print('Training dataset has %d images and %d objects' % (num_imgs, num_objs))\n print('(%.2f objects per image)' % (float(num_objs) / num_imgs))\n\n dset_kwargs['image_dir'] = coco_val_image_dir\n dset_kwargs['instances_json'] = coco_val_instances_json\n dset_kwargs['stuff_json'] = coco_val_stuff_json\n dset_kwargs['max_samples'] = num_val_samples\n dset_kwargs['data_mode'] = 'test'\n val_dset = CustomDataset(**dset_kwargs)\n\n #assert train_dset.vocab == val_dset.vocab\n\n #vocab = json.loads(json.dumps(train_dset.vocab))\n\n # build dataloader\n loader_kwargs = {\n 'batch_size': batch_size,\n 'num_workers': 1,\n 'shuffle': True,\n 'collate_fn': custom_collate_fn,\n }\n train_loader = DataLoader(train_dset, **loader_kwargs)\n\n loader_kwargs['shuffle'] = shuffle_val\n loader_kwargs['num_workers'] = 1\n val_loader = DataLoader(val_dset, **loader_kwargs)\n\n return train_loader, val_loader\n\n\nif __name__ == '__main__':\n train_loader, val_loader = get_dataloader(batch_size=32)\n\n # test reading data\n for i, batch in enumerate(train_loader):\n imgs, objs, boxes, masks, classes, obj_to_img = batch\n\n print(imgs.shape, objs.shape, boxes.shape, masks.shape, classes, obj_to_img.shape)\n\n if i == 20: break\n", "import json, os, random, math\nfrom collections import defaultdict\n\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as T\n\nimport numpy as np\nimport PIL\nfrom skimage.transform import resize as imresize\nimport pycocotools.mask as mask_utils\nfrom random import shuffle\nfrom data.preprocess import *\nfrom PIL import Image\nclass CustomDataset(Dataset):\n def __init__(self, image_dir, instances_json, stuff_json=None,\n stuff_only=True, image_size=(64, 64), mask_size=16,\n normalize_images=False, max_samples=None,\n include_relationships=True, min_object_size=0.02,\n min_objects_per_image=3, max_objects_per_image=25, left_right_flip=False,\n include_other=False, instance_whitelist=None, stuff_whitelist=None, data_mode='train', classes=None):\n \"\"\"\n A PyTorch Dataset for loading Coco and Coco-Stuff annotations and converting\n them to scene graphs on the fly.\n \n Inputs:\n - image_dir: Path to a directory where images are held\n - instances_json: Path to a JSON file giving COCO annotations\n - stuff_json: (optional) Path to a JSON file giving COCO-Stuff annotations\n - stuff_only: (optional, default True) If True then only iterate over\n images which appear in stuff_json; if False then iterate over all images\n in instances_json.\n - image_size: Size (H, W) at which to load images. Default (64, 64).\n - mask_size: Size M for object segmentation masks; default 16.\n - normalize_image: If True then normalize images by subtracting ImageNet\n mean pixel and dividing by ImageNet std pixel.\n - max_samples: If None use all images. Other wise only use images in the\n range [0, max_samples). Default None.\n - include_relationships: If True then include spatial relationships; if\n False then only include the trivial __in_image__ relationship.\n - min_object_size: Ignore objects whose bounding box takes up less than\n this fraction of the image.\n - min_objects_per_image: Ignore images which have fewer than this many\n object annotations.\n - max_objects_per_image: Ignore images which have more than this many\n object annotations.\n - include_other: If True, include COCO-Stuff annotations which have category\n \"other\". Default is False, because I found that these were really noisy\n and pretty much impossible for the system to model.\n - instance_whitelist: None means use all instance categories. Otherwise a\n list giving a whitelist of instance category names to use.\n - stuff_whitelist: None means use all stuff categories. Otherwise a list\n giving a whitelist of stuff category names to use.\n \"\"\"\n super(Dataset, self).__init__()\n class_names = classes if classes is not ['all'] else ['cow', 'person', 'sheep', 'bird', 'cat', 'dog', 'horse', 'aeroplane', 'motorbike', 'bicycle']\n data_dict = preprocess_make_data(class_names, data_mode)\n self.data_dict = data_dict\n\n if stuff_only and stuff_json is None:\n print('WARNING: Got stuff_only=True but stuff_json=None.')\n print('Falling back to stuff_only=False.')\n\n self.image_dir = image_dir\n self.mask_size = mask_size\n self.max_samples = max_samples\n self.max_objects_per_image = max_objects_per_image\n self.normalize_images = normalize_images\n self.include_relationships = include_relationships\n self.left_right_flip = left_right_flip\n self.set_image_size(image_size)\n\n# with open(instances_json, 'r') as f:\n# instances_data = json.load(f)\n\n stuff_data = None\n# if stuff_json is not None and stuff_json != '':\n# with open(stuff_json, 'r') as f:\n# stuff_data = json.load(f)\n\n# self.image_ids = []\n# self.image_id_to_filename = {}\n# self.image_id_to_size = {}\n# for image_data in instances_data['images']:\n# image_id = image_data['id']\n# filename = image_data['file_name']\n# width = image_data['width']\n# height = image_data['height']\n# self.image_ids.append(image_id)\n# self.image_id_to_filename[image_id] = filename\n# self.image_id_to_size[image_id] = (width, height)\n\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n# name_to_idx = self.vocab['object_name_to_idx']\n# assert len(name_to_idx) == len(set(name_to_idx.values()))\n# max_object_idx = max(name_to_idx.values())\n# idx_to_name = ['NONE'] * (1 + max_object_idx)\n# for name, idx in self.vocab['object_name_to_idx'].items():\n# idx_to_name[idx] = name\n# self.vocab['object_idx_to_name'] = idx_to_name\n\n # Prune images that have too few or too many objects\n# new_image_ids = []\n# total_objs = 0\n# for image_id in self.image_ids:\n# num_objs = len(self.image_id_to_objects[image_id])\n# total_objs += num_objs\n# if min_objects_per_image <= num_objs <= max_objects_per_image:\n# new_image_ids.append(image_id)\n# self.image_ids = new_image_ids\n\n# self.vocab['pred_idx_to_name'] = [\n# '__in_image__',\n# 'left of',\n# 'right of',\n# 'above',\n# 'below',\n# 'inside',\n# 'surrounding',\n# ]\n# self.vocab['pred_name_to_idx'] = {}\n# for idx, name in enumerate(self.vocab['pred_idx_to_name']):\n# self.vocab['pred_name_to_idx'][name] = idx\n #self.max_objects_per_image = 25\n\n def set_image_size(self, image_size):\n print('called set_image_size', image_size)\n transform = [Resize(image_size), T.ToTensor()]\n if self.normalize_images:\n transform.append(imagenet_preprocess())\n self.transform = T.Compose(transform)\n self.image_size = image_size\n\n def total_objects(self):\n total_objs = 0\n for id in self.data_dict.keys():\n total_objs = total_objs + self.data_dict[id]['boxes'].shape[0]\n return total_objs\n\n\n def __len__(self):\n return len(self.data_dict.keys())\n\n def __getitem__(self, index):\n \"\"\"\n Get the pixels of an image, and a random synthetic scene graph for that\n image constructed on-the-fly from its COCO object annotations. We assume\n that the image will have height H, width W, C channels; there will be O\n object annotations, each of which will have both a bounding box and a\n segmentation mask of shape (M, M). There will be T triples in the scene\n graph.\n \n Returns a tuple of:\n - image: FloatTensor of shape (C, H, W)\n - objs: LongTensor of shape (O,)\n - boxes: FloatTensor of shape (O, 4) giving boxes for objects in\n (x0, y0, x1, y1) format, in a [0, 1] coordinate system\n - masks: LongTensor of shape (O, M, M) giving segmentation masks for\n objects, where 0 is background and 1 is object.\n - triples: LongTensor of shape (T, 3) where triples[t] = [i, p, j]\n means that (objs[i], p, objs[j]) is a triple.\n \"\"\"\n image_id = str(index)\n img = self.data_dict[image_id]['image']\n\n #flip = False\n #if index >= len(self.image_ids):\n # index = index - len(self.image_ids)\n # flip = True\n #image_id = self.image_ids[index]\n\n #filename = self.image_id_to_filename[image_id]\n #image_path = os.path.join(self.image_dir, filename)\n #with open(image_path, 'rb') as f:\n # with PIL.Image.open(f) as image:\n # if flip:\n # image = PIL.ImageOps.mirror(image)\n # WW, HH = image.size\n # image = self.transform(image.convert('RGB'))\n img = Image.fromarray(np.uint8(img))\n WW, HH = img.size\n image = self.transform(img) \n\n bbxs = self.data_dict[image_id]['boxes'].tolist()\n \n class_name = self.data_dict[image_id]['class']\n H, W = self.image_size\n\n\n objs, boxes, masks = [], [], []\n for bbx in bbxs:\n x0, y0, x1, y1 = bbx\n #x0 = x0 * 550.0/WW \n #y0 = y0 * 550.0/HH\n #x1 = x1 * 550.0/WW\n #y1 = y1 * 550.0/HH\n x1 = (x1*550.0 - x0*550.0 + 1.0)/550.0\n y1 = (y1*550.0 - y0*550.0 + 1.0)/550.0\n boxes.append(torch.FloatTensor([x0, y0, x1, y1]))\n #boxes.append(torch.FloatTensor([0.0, 0.0, 1.0, 1.0]))\n\n # This will give a numpy array of shape (HH, WW)\n #mask = torch.zeros(1, H, W)\n # mask = seg_to_mask(object_data['segmentation'], WW, HH)\n #mask[:, round(y0 * H):max(round(y0 * H)+1, round(y1 * H)), round(x0 * W):max(round(x0 * W)+1, round(x1 * W))] = 1\n\n # Crop the mask according to the bounding box, being careful to\n # ensure that we don't crop a zero-area region\n # mx0, mx1 = int(round(x)), int(round(x + w))\n # my0, my1 = int(round(y)), int(round(y + h))\n # mx1 = max(mx0 + 1, mx1)\n # my1 = max(my0 + 1, my1)\n # mask = mask[my0:my1, mx0:mx1]\n # mask = imresize(255 * mask, (self.mask_size, self.mask_size),\n # mode='constant')\n #mask = torch.from_numpy((mask > 128).astype(np.int64))\n #masks.append(mask)\n #obj_to_cls.append(torch.from_numpy(class_name))\n for obj in self.data_dict[image_id]['labels']:\n objs.append(obj)\n\n # obj_masks = []\n #for object_data in self.image_id_to_objects[image_id]:\n # objs.append(object_data['category_id'])\n # x, y, w, h = object_data['bbox']\n # x0 = x / WW\n # y0 = y / HH\n # x1 = (w) / WW\n # y1 = (h) / HH\n # if flip:\n # x0 = 1 - (x0 + x1)\n # boxes.append(np.array([x0, y0, x1, y1]))\n\n # # This will give a numpy array of shape (HH, WW)\n # mask = seg_to_mask(object_data['segmentation'], WW, HH)\n # if flip:\n # mask = mask[:, ::-1]\n # x = WW - x - w\n # # Crop the mask according to the bounding box, being careful to\n # # ensure that we don't crop a zero-area region\n # # mx0, mx1 = int(round(x)), int(round(x + w))\n # # my0, my1 = int(round(y)), int(round(y + h))\n # # mx1 = max(mx0 + 1, mx1)\n # # my1 = max(my0 + 1, my1)\n # # obj_mask = mask[my0:my1, mx0:mx1]\n # # obj_mask = imresize(255.0 * obj_mask, (self.mask_size, self.mask_size),\n # # mode='constant')\n # # obj_mask = torch.from_numpy((obj_mask > 128).astype(np.int64))\n # # obj_masks.append(obj_mask)\n #\n # mask = imresize(255.0 * mask, (self.image_size[0], self.image_size[1]),\n # mode='constant')\n # mask = torch.from_numpy((mask > 128).astype(np.int64))\n # masks.append(mask)\n\n # shuffle(objs)\n # shuffle(boxes)\n # shuffle(masks)\n # Add dummy __image__ object\n # objs.append(184)\n # boxes.append(torch.FloatTensor([0, 0, 1, 1]))\n # masks.append(torch.ones(self.mask_size, self.mask_size).long())\n\n # add 0 for number of objects\n for _ in range(len(objs), self.max_objects_per_image):\n objs.append(self.vocab['object_name_to_idx']['__image__'])\n boxes.append(torch.FloatTensor([0.0, 0.0, 1.0, 1.0]))\n # masks.append(torch.zeros((self.image_size[0], self.image_size[1])).long())\n # obj_masks.append(torch.zeros((self.mask_size, self.mask_size)).long())\n\n #objs = torch.LongTensor(objs)\n #boxes = np.vstack(boxes)\n #O = len(objs)\n #rand_idx = list(range(O))\n #random.shuffle(rand_idx)\n\n #objs = [objs[i] for i in rand_idx]\n #boxes = [boxes[i] for i in rand_idx]\n\n objs = torch.LongTensor(objs)\n boxes = torch.stack(boxes, dim=0)\n \n\n # masks = torch.stack(masks, dim=0)\n # obj_masks = torch.stack(obj_masks, dim=0)\n # b_map = self.get_bbox_map_p(boxes)\n\n # box_areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n # Compute centers of all objects\n \"\"\"\n obj_centers = []\n _, MH, MW = masks.size()\n for i, obj_idx in enumerate(objs):\n x0, y0, x1, y1 = boxes[i]\n mask = (masks[i] == 1)\n xs = torch.linspace(x0, x1, MW).view(1, MW).expand(MH, MW)\n ys = torch.linspace(y0, y1, MH).view(MH, 1).expand(MH, MW)\n if mask.sum() == 0:\n mean_x = 0.5 * (x0 + x1)\n mean_y = 0.5 * (y0 + y1)\n else:\n mean_x = xs[mask].mean()\n mean_y = ys[mask].mean()\n obj_centers.append([mean_x, mean_y])\n obj_centers = torch.FloatTensor(obj_centers)\n\n # Add triples\n \n triples = []\n num_objs = objs.size(0)\n __image__ = self.vocab['object_name_to_idx']['__image__']\n real_objs = []\n if num_objs > 1:\n real_objs = (objs != __image__).nonzero().squeeze(1)\n for cur in real_objs:\n choices = [obj for obj in real_objs if obj != cur]\n if len(choices) == 0 or not self.include_relationships:\n break\n other = random.choice(choices)\n if random.random() > 0.5:\n s, o = cur, other\n else:\n s, o = other, cur\n\n # Check for inside / surrounding\n sx0, sy0, sx1, sy1 = boxes[s]\n ox0, oy0, ox1, oy1 = boxes[o]\n d = obj_centers[s] - obj_centers[o]\n theta = math.atan2(d[1], d[0])\n\n if sx0 < ox0 and sx1 > ox1 and sy0 < oy0 and sy1 > oy1:\n p = 'surrounding'\n elif sx0 > ox0 and sx1 < ox1 and sy0 > oy0 and sy1 < oy1:\n p = 'inside'\n elif theta >= 3 * math.pi / 4 or theta <= -3 * math.pi / 4:\n p = 'left of'\n elif -3 * math.pi / 4 <= theta < -math.pi / 4:\n p = 'above'\n elif -math.pi / 4 <= theta < math.pi / 4:\n p = 'right of'\n elif math.pi / 4 <= theta < 3 * math.pi / 4:\n p = 'below'\n p = self.vocab['pred_name_to_idx'][p]\n triples.append([s, p, o])\n\n # Add __in_image__ triples\n O = objs.size(0)\n in_image = self.vocab['pred_name_to_idx']['__in_image__']\n for i in range(O - 1):\n triples.append([i, in_image, O - 1])\n \"\"\"\n cls = torch.from_numpy(class_name)\n print(\"Image {}\".format(image.shape))\n print(\"Labels {}\".format(objs.shape))\n print(\"Bbox {}\".format(boxes.shape))\n print(\"Class {}\".format(cls.shape))\n # triples = torch.LongTensor(triples)\n return image, objs, boxes, cls## , b_map #, None # obj_masks #, obj_masks # , b_map # masks # , triples\n\n def get_bbox_map_p(self, bbox):\n mapping = np.zeros((len(bbox), self.image_size[0], self.image_size[0]))\n for idx in range(self.max_objects_per_image):\n if min(bbox[idx]) < 0:\n continue\n line_space = np.linspace(0, self.image_size[0]-1, num=self.image_size[0])\n xv, yv = np.meshgrid(line_space, line_space)\n mapping[idx][(xv < int((bbox[idx][0] + bbox[idx][2]) * self.image_size[0])) * (xv > int(bbox[idx][0] * self.image_size[0])) *\n (yv < int((bbox[idx][1] + bbox[idx][3]) * self.image_size[0])) * (yv > int(bbox[idx][1] * self.image_size[0]))] = 1\n return mapping\n\n\ndef seg_to_mask(seg, width=1.0, height=1.0):\n \"\"\"\n Tiny utility for decoding segmentation masks using the pycocotools API.\n \"\"\"\n if type(seg) == list:\n rles = mask_utils.frPyObjects(seg, height, width)\n rle = mask_utils.merge(rles)\n elif type(seg['counts']) == list:\n rle = mask_utils.frPyObjects(seg, height, width)\n else:\n rle = seg\n return mask_utils.decode(rle)\n\n\ndef coco_collate_fn(batch):\n \"\"\"\n Collate function to be used when wrapping CocoSceneGraphDataset in a\n DataLoader. Returns a tuple of the following:\n \n - imgs: FloatTensor of shape (N, C, H, W)\n - objs: LongTensor of shape (O,) giving object categories\n - boxes: FloatTensor of shape (O, 4)\n - masks: FloatTensor of shape (O, M, M)\n - triples: LongTensor of shape (T, 3) giving triples\n - obj_to_img: LongTensor of shape (O,) mapping objects to images\n - triple_to_img: LongTensor of shape (T,) mapping triples to images\n \"\"\"\n all_imgs, all_objs, all_boxes, all_masks, all_triples = [], [], [], [], []\n all_obj_to_img, all_triple_to_img = [], []\n obj_offset = 0\n for i, (img, objs, boxes, masks, triples) in enumerate(batch):\n all_imgs.append(img[None])\n if objs.dim() == 0 or triples.dim() == 0:\n continue\n O, T = objs.size(0), triples.size(0)\n all_objs.append(objs)\n all_boxes.append(boxes)\n all_masks.append(masks)\n triples = triples.clone()\n triples[:, 0] += obj_offset\n triples[:, 2] += obj_offset\n all_triples.append(triples)\n\n all_obj_to_img.append(torch.LongTensor(O).fill_(i))\n all_triple_to_img.append(torch.LongTensor(T).fill_(i))\n obj_offset += O\n\n all_imgs = torch.cat(all_imgs)\n all_objs = torch.cat(all_objs)\n all_boxes = torch.cat(all_boxes)\n all_masks = torch.cat(all_masks)\n all_triples = torch.cat(all_triples)\n all_obj_to_img = torch.cat(all_obj_to_img)\n all_triple_to_img = torch.cat(all_triple_to_img)\n\n out = (all_imgs, all_objs, all_boxes, all_masks, all_triples,\n all_obj_to_img, all_triple_to_img)\n return out\n\n\n# IMAGENET_MEAN = [0.485, 0.456, 0.406]\n# IMAGENET_STD = [0.229, 0.224, 0.225]\nIMAGENET_MEAN = [0.5, 0.5, 0.5]\nIMAGENET_STD = [0.5, 0.5, 0.5]\n\nINV_IMAGENET_MEAN = [-m for m in IMAGENET_MEAN]\nINV_IMAGENET_STD = [1.0 / s for s in IMAGENET_STD]\n\n\ndef imagenet_preprocess():\n return T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)\n\n\ndef rescale(x):\n lo, hi = x.min(), x.max()\n return x.sub(lo).div(hi - lo)\n\n\ndef imagenet_deprocess(rescale_image=True):\n transforms = [\n T.Normalize(mean=[0, 0, 0], std=INV_IMAGENET_STD),\n T.Normalize(mean=INV_IMAGENET_MEAN, std=[1.0, 1.0, 1.0]),\n ]\n if rescale_image:\n transforms.append(rescale)\n return T.Compose(transforms)\n\n\ndef imagenet_deprocess_batch(imgs, rescale=True):\n \"\"\"\n Input:\n - imgs: FloatTensor of shape (N, C, H, W) giving preprocessed images\n \n Output:\n - imgs_de: ByteTensor of shape (N, C, H, W) giving deprocessed images\n in the range [0, 255]\n \"\"\"\n if isinstance(imgs, torch.autograd.Variable):\n imgs = imgs.data\n imgs = imgs.cpu().clone()\n deprocess_fn = imagenet_deprocess(rescale_image=rescale)\n imgs_de = []\n for i in range(imgs.size(0)):\n img_de = deprocess_fn(imgs[i])[None]\n img_de = img_de.mul(255).clamp(0, 255).byte()\n imgs_de.append(img_de)\n imgs_de = torch.cat(imgs_de, dim=0)\n return imgs_de\n\n\nclass Resize(object):\n def __init__(self, size, interp=PIL.Image.BILINEAR):\n if isinstance(size, tuple):\n H, W = size\n self.size = (W, H)\n else:\n self.size = (size, size)\n self.interp = interp\n\n def __call__(self, img):\n return img.resize(self.size, self.interp)\n\n\ndef unpack_var(v):\n if isinstance(v, torch.autograd.Variable):\n return v.data\n return v\n\n\ndef split_graph_batch(triples, obj_data, obj_to_img, triple_to_img):\n triples = unpack_var(triples)\n obj_data = [unpack_var(o) for o in obj_data]\n obj_to_img = unpack_var(obj_to_img)\n triple_to_img = unpack_var(triple_to_img)\n\n triples_out = []\n obj_data_out = [[] for _ in obj_data]\n obj_offset = 0\n N = obj_to_img.max() + 1\n for i in range(N):\n o_idxs = (obj_to_img == i).nonzero().view(-1)\n t_idxs = (triple_to_img == i).nonzero().view(-1)\n\n cur_triples = triples[t_idxs].clone()\n cur_triples[:, 0] -= obj_offset\n cur_triples[:, 2] -= obj_offset\n triples_out.append(cur_triples)\n\n for j, o_data in enumerate(obj_data):\n cur_o_data = None\n if o_data is not None:\n cur_o_data = o_data[o_idxs]\n obj_data_out[j].append(cur_o_data)\n\n obj_offset += o_idxs.size(0)\n\n return triples_out, obj_data_out\n\n\n" ]
[ [ "torch.ByteTensor", "numpy.expand_dims", "torch.load", "numpy.clip", "numpy.uint8", "torch.from_numpy", "numpy.concatenate", "torch.cuda.is_available", "numpy.transpose", "numpy.repeat", "numpy.array", "numpy.zeros" ], [ "torch.nn.Sequential", "torch.nn.utils.spectral_norm", "torch.nn.Conv2d", "torch.nn.InstanceNorm2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "torch.LongTensor", "torch.zeros", "torch.cat", "torch.utils.data.DataLoader", "torch.from_numpy", "torch.FloatTensor", "torch.stack" ], [ "torch.LongTensor", "numpy.linspace", "torch.cat", "numpy.uint8", "torch.from_numpy", "torch.FloatTensor", "torch.stack", "numpy.meshgrid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
asteroid-team/Libri_VAD
[ "d90f2c7c6e281553290e9171f7cb392c543c4790" ]
[ "scripts/create_VAD_dataset.py" ]
[ "from librosa import resample\nimport tqdm.contrib.concurrent\nimport argparse\nimport os\nimport json\nimport numpy as np\nimport numpy.random as npr\nimport soundfile as sf\nimport random\nimport warnings\nimport pyloudnorm as pyln\nfrom tqdm import tqdm\nfrom itertools import cycle\n\n# eps secures log and division\nEPS = 1e-10\n# Rate of the sources in LibriSpeech\nRATE = 16000\nTARGET_SR = 8000\nMIN_LOUDNESS = -33\nMAX_LOUDNESS = -25\nMAX_AMP = 0.9\nnp.random.seed(22)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--librispeech_dir', type=str, required=True,\n help='Path to librispeech root directory')\nparser.add_argument('--metadata_dir', type=str, required=True,\n help='Path to the LibriMix metadata directory')\nparser.add_argument('--outdir', type=str, default=None,\n help='Path to the desired dataset root directory')\n\n\ndef main(args):\n # Get librispeech root path\n global librispeech_dir\n librispeech_dir = args.librispeech_dir\n # Get Metadata directory\n metadata_dir = args.metadata_dir\n # Get LibriMix root path\n vad_outdir = args.outdir\n\n noise_md = sorted(os.listdir(os.path.join(metadata_dir, 'dns_noise')))\n source_md = sorted(os.listdir(os.path.join(metadata_dir, 'reformated_LibriSpeech')))\n source_md = ['train-clean-360.json']\n noise_md = ['train.json']\n # source_md.remove('train-clean-100.json')\n\n # Get the desired frequencies\n for source_file, noise_file in zip(source_md, cycle(noise_md)):\n with open(os.path.join(metadata_dir, 'dns_noise', noise_file)) as file:\n n_file = json.load(file)\n with open(os.path.join(metadata_dir, 'reformated_LibriSpeech', source_file)) as file:\n s_file = json.load(file)\n dir_name = noise_file.replace('.json', '')\n dir_path = os.path.join(vad_outdir, dir_name)\n os.makedirs(dir_path, exist_ok=True)\n process(s_file, n_file, metadata_dir, dir_path)\n\n\ndef process(source_file, noise_file, metadata_dir, dir_path):\n result_md = []\n\n for s_file, n_file in tqdm(zip(source_file, noise_file), total=len(source_file)):\n max_swap = len(s_file['VAD']['start'])\n n_swap = draw_n_swap(max_swap)\n cuts = fusion_VAD(s_file[f'VAD'], n_swap)\n\n cuts = {'start': [int(cuts['start'][j] * TARGET_SR) for j in range(len(cuts['start']))],\n 'stop': [int(cuts['stop'][j] * TARGET_SR) for j in range(len(cuts['start']))],\n 'word': cuts['word']}\n\n source = read_sources(s_file)\n source, VAD = align_sources(source, cuts)\n noise = read_noise(n_file)\n noise = fit_noise(noise, len(source))\n sources = (source + [noise])\n if len(source) < TARGET_SR * 3:\n print('too short')\n continue\n # sources = pad(sources)\n loudness_list, _, source_norm_list = set_loudness(sources)\n mixture = mix(source_norm_list)\n m_length = len(mixture)\n create_directories(dir_path)\n mixture_path = write_sources_and_mixture(mixture, s_file, dir_path)\n add_to_md(result_md, mixture_path, n_swap, VAD, m_length)\n\n if 'train' in dir_path:\n save_md_path = 'train.json'\n elif 'test' in dir_path:\n save_md_path = 'test.json'\n else:\n save_md_path = 'dev.json'\n os.makedirs((os.path.join(metadata_dir, 'sets')), exist_ok=True)\n with open(os.path.join(metadata_dir, 'sets', save_md_path),\n 'w') as outfile:\n json.dump(result_md, outfile, indent=4)\n\n\ndef draw_n_swap(max_swap):\n # global overlap over the whole mixture\n return int(np.random.default_rng().uniform(1, max_swap, 1)[0])\n\n\ndef fusion_VAD(VAD_dict, n_swap):\n while len(VAD_dict['start']) != n_swap:\n to_fusion = int(np.random.default_rng().uniform(low=1, high=len(VAD_dict['start'])))\n del (VAD_dict['start'][to_fusion])\n del (VAD_dict['stop'][to_fusion - 1])\n return VAD_dict\n\n\ndef read_sources(mixture):\n source = sf.read(os.path.join(librispeech_dir, mixture[f'origin_path']), dtype='float32')[0]\n source = resample(source, RATE, TARGET_SR)\n return source\n\n\ndef read_noise(mixture):\n noise, n_sr = sf.read(mixture['path'], dtype='float32')\n noise = resample(noise, n_sr, TARGET_SR)\n return noise\n\n\ndef fit_noise(noise, duration):\n if len(noise) > duration:\n noise = noise[:duration]\n elif len(noise) < duration:\n noise = extend_noise(noise, duration)\n return noise\n\n\ndef extend_noise(noise, max_length):\n \"\"\" Concatenate noise using hanning window\"\"\"\n noise_ex = noise\n window = np.hanning(RATE // 4 + 1)\n # Increasing window\n i_w = window[:len(window) // 2 + 1]\n # Decreasing window\n d_w = window[len(window) // 2::-1]\n # Extend until max_length is reached\n while len(noise_ex) < max_length:\n noise_ex = np.concatenate((noise_ex[:len(noise_ex) - len(d_w)],\n np.multiply(\n noise_ex[len(noise_ex) - len(d_w):],\n d_w) + np.multiply(\n noise[:len(i_w)], i_w),\n noise[len(i_w):]))\n noise_ex = noise_ex[:max_length]\n return noise_ex\n\n\ndef align_sources(sources, cuts):\n source_aligned = np.array([])\n VAD = {'start': [], 'stop': []}\n VAD['start'].append(0)\n for i in range(len(cuts['start'])):\n chunks = get_chunks(sources, cuts, i)\n # chunks = smooth(chunks)\n source_aligned = np.concatenate((source_aligned, chunks))\n VAD['stop'].append(len(source_aligned))\n silence_length = random.randrange(int(TARGET_SR * 0.2), TARGET_SR)\n silence = np.zeros(silence_length)\n source_aligned = np.concatenate((source_aligned, silence))\n VAD['start'].append(len(source_aligned))\n del (VAD['start'][-1])\n return source_aligned, VAD\n\n\ndef get_chunks(source, cuts, i):\n return source[cuts['start'][i]:cuts['stop'][i]]\n\n\ndef set_loudness(sources_list):\n \"\"\" Compute original loudness and normalise them randomly \"\"\"\n # Initialize loudness\n loudness_list = []\n # In LibriSpeech all sources are at 16KHz hence the meter\n meter = pyln.Meter(TARGET_SR)\n # Randomize sources loudness\n target_loudness_list = []\n sources_list_norm = []\n\n # Normalize loudness\n for i in range(len(sources_list)):\n # Compute initial loudness\n loudness_list.append(meter.integrated_loudness(sources_list[i]))\n # Pick a random loudness\n target_loudness = random.uniform(MIN_LOUDNESS, MAX_LOUDNESS)\n # Noise has a different loudness\n if i == len(sources_list) - 1:\n target_loudness = random.uniform(MIN_LOUDNESS - 5,\n MAX_LOUDNESS - 5)\n # Normalize source to target loudness\n with warnings.catch_warnings():\n # We don't want to pollute stdout, but we don't want to ignore\n # other warnings.\n warnings.simplefilter(\"ignore\")\n src = pyln.normalize.loudness(sources_list[i], loudness_list[i],\n target_loudness)\n # If source clips, renormalize\n if np.max(np.abs(src)) >= 1:\n src = sources_list[i] * MAX_AMP / np.max(np.abs(sources_list[i]))\n target_loudness = meter.integrated_loudness(src)\n # Save scaled source and loudness.\n sources_list_norm.append(src)\n target_loudness_list.append(target_loudness)\n return loudness_list, target_loudness_list, sources_list_norm\n\n\ndef mix(sources_list_norm):\n \"\"\" Do the mixture for min mode and max mode \"\"\"\n # Initialize mixture\n mixture_max = np.zeros_like(sources_list_norm[0])\n for i in range(len(sources_list_norm)):\n mixture_max += sources_list_norm[i]\n return mixture_max\n\n\ndef add_to_md(result_md, mixture_path, n_swap, VAD, m_length):\n row = {'mixture_path': mixture_path, 'n_swap': n_swap,\n 'VAD': VAD, 'length': m_length}\n result_md.append(row)\n\n\ndef write_sources_and_mixture(mixture, file, dir_path):\n name = (os.path.basename(file[f'origin_path'])).replace('.flac', '.wav')\n mixture_path = os.path.join(dir_path, 'mixture', name)\n sf.write(mixture_path, mixture, TARGET_SR)\n return mixture_path\n\n\ndef create_directories(path):\n os.makedirs(os.path.join(path, 'mixture'), exist_ok=True)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n main(args)\n" ]
[ [ "numpy.abs", "numpy.random.seed", "numpy.concatenate", "numpy.zeros_like", "numpy.hanning", "numpy.array", "numpy.zeros", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
peterk1198/cycle-gan
[ "aaddb24f976b65dd5c7d93e47f374ff2962580de" ]
[ "count_things.py" ]
[ "import pandas as pd\nimport os\n\ndf = pd.read_csv(\"sample_labels.csv\")\n\nmales = 0\nfemales = 0\n\nfor index, row in df.iterrows():\n\tif row[\"Patient Gender\"] == \"M\":\n\t\tmales += 1\n\telse:\n\t\tfemales += 1\n\nprint (males, \" males\")\nprint (females, \" females\")\n\n\nfor index, row in df.iterrows():\n\tremove = row[\"Image Index\"]\n\tif row[\"Patient Gender\"] == \"M\" and males > 300:\n\t\tos.remove(\"./images/trainM/\" + remove)\n\t\tmales -= 1\n\telif row[\"Patient Gender\"] == \"F\" and females > 300:\n\t\tos.remove(\"./images/trainF/\" + remove)\n\t\tfemales -= 1" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
diiogofernands/transformers
[ "f5cd27694a0c7d0036954c8350f774a5c1181a57" ]
[ "src/transformers/models/visual_bert/modeling_visual_bert.py" ]
[ "# coding=utf-8\n# Copyright 2021 The UCLA NLP Authors and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch VisualBERT model. \"\"\"\n\n\nimport math\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, KLDivLoss, LogSoftmax\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n ModelOutput,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPooling,\n MultipleChoiceModelOutput,\n SequenceClassifierOutput,\n)\nfrom ...modeling_utils import (\n PreTrainedModel,\n apply_chunking_to_forward,\n find_pruneable_heads_and_indices,\n prune_linear_layer,\n)\nfrom ...utils import logging\nfrom .configuration_visual_bert import VisualBertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"VisualBertConfig\"\n_CHECKPOINT_FOR_DOC = \"uclanlp/visualbert-vqa-coco-pre\"\n\nVISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"uclanlp/visualbert-vqa\",\n \"uclanlp/visualbert-vqa-pre\",\n \"uclanlp/visualbert-vqa-coco-pre\",\n \"uclanlp/visualbert-vcr\",\n \"uclanlp/visualbert-vcr-pre\",\n \"uclanlp/visualbert-vcr-coco-pre\",\n \"uclanlp/visualbert-nlvr2\",\n \"uclanlp/visualbert-nlvr2-pre\",\n \"uclanlp/visualbert-nlvr2-coco-pre\"\n # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert\n]\n\n\nclass VisualBertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings and visual embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n # For Visual Features\n # Token type and position embedding for image features\n self.visual_token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n self.visual_position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n\n if config.special_visual_initialize:\n self.visual_token_type_embeddings.weight.data = nn.Parameter(\n self.token_type_embeddings.weight.data.clone(), requires_grad=True\n )\n self.visual_position_embeddings.weight.data = nn.Parameter(\n self.position_embeddings.weight.data.clone(), requires_grad=True\n )\n\n self.visual_projection = nn.Linear(config.visual_embedding_dim, config.hidden_size)\n\n def forward(\n self,\n input_ids=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n visual_embeds=None,\n visual_token_type_ids=None,\n image_text_alignment=None,\n ):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, :seq_length]\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n\n # Absolute Position Embeddings\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n\n if visual_embeds is not None:\n if visual_token_type_ids is None:\n visual_token_type_ids = torch.ones(\n visual_embeds.size()[:-1], dtype=torch.long, device=self.position_ids.device\n )\n\n visual_embeds = self.visual_projection(visual_embeds)\n visual_token_type_embeddings = self.visual_token_type_embeddings(visual_token_type_ids)\n\n if image_text_alignment is not None:\n # image_text_alignment = Batch x image_length x alignment_number.\n # Each element denotes the position of the word corresponding to the image feature. -1 is the padding value.\n\n dtype = token_type_embeddings.dtype\n image_text_alignment_mask = (image_text_alignment != -1).long()\n # Get rid of the -1.\n image_text_alignment = image_text_alignment_mask * image_text_alignment\n\n # Batch x image_length x alignment length x dim\n visual_position_embeddings = self.position_embeddings(image_text_alignment)\n visual_position_embeddings *= image_text_alignment_mask.to(dtype=dtype).unsqueeze(-1)\n visual_position_embeddings = visual_position_embeddings.sum(2)\n\n # We want to averge along the alignment_number dimension.\n image_text_alignment_mask = image_text_alignment_mask.to(dtype=dtype).sum(2)\n\n if (image_text_alignment_mask == 0).sum() != 0:\n image_text_alignment_mask[image_text_alignment_mask == 0] = 1 # Avoid divide by zero error\n logger.warning(\n \"Found 0 values in `image_text_alignment_mask`. Setting them to 1 to avoid divide-by-zero error.\"\n )\n visual_position_embeddings = visual_position_embeddings / image_text_alignment_mask.unsqueeze(-1)\n\n visual_position_ids = torch.zeros(\n *visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device\n )\n\n # When fine-tuning the detector , the image_text_alignment is sometimes padded too long.\n if visual_position_embeddings.size(1) != visual_embeds.size(1):\n if visual_position_embeddings.size(1) < visual_embeds.size(1):\n raise ValueError(\n f\"Visual position embeddings length: {visual_position_embeddings.size(1)}\"\n f\"should be the same as `visual_embeds` length: {visual_embeds.size(1)}\"\n )\n visual_position_embeddings = visual_position_embeddings[:, : visual_embeds.size(1), :]\n\n visual_position_embeddings = visual_position_embeddings + self.visual_position_embeddings(\n visual_position_ids\n )\n else:\n visual_position_ids = torch.zeros(\n *visual_embeds.size()[:-1], dtype=torch.long, device=visual_embeds.device\n )\n visual_position_embeddings = self.visual_position_embeddings(visual_position_ids)\n\n visual_embeddings = visual_embeds + visual_position_embeddings + visual_token_type_embeddings\n\n embeddings = torch.cat((embeddings, visual_embeddings), dim=1)\n\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass VisualBertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n mixed_query_layer = self.query(hidden_states)\n\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in VisualBertSelfAttentionModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->VisualBert\nclass VisualBertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass VisualBertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = VisualBertSelfAttention(config)\n self.output = VisualBertSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->VisualBert\nclass VisualBertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->VisualBert\nclass VisualBertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass VisualBertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = VisualBertAttention(config)\n self.intermediate = VisualBertIntermediate(config)\n self.output = VisualBertOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n attention_output = self_attention_outputs[0]\n\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass VisualBertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([VisualBertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n )\n else:\n layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)\n\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n all_hidden_states,\n all_self_attentions,\n ]\n if v is not None\n )\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->VisualBert\nclass VisualBertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->VisualBert\nclass VisualBertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->VisualBert\nclass VisualBertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.transform = VisualBertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->VisualBert\nclass VisualBertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = VisualBertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass VisualBertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = VisualBertConfig\n base_model_prefix = \"visual_bert\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\n@dataclass\nclass VisualBertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of :class:`~transformers.VisualBertForPreTraining`.\n\n Args:\n loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):\n Total loss as the sum of the masked language modeling loss and the sentence-image prediction\n (classification) loss.\n prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):\n Prediction scores of the sentence-image prediction (classification) head (scores of True/False continuation\n before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,\n sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n prediction_logits: torch.FloatTensor = None\n seq_relationship_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nVISUAL_BERT_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.VisualBertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model\n weights.\n\"\"\"\n\nVISUAL_BERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BertTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,\n 1]``:\n\n - 0 corresponds to a `sentence A` token,\n - 1 corresponds to a `sentence B` token.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,\n config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n\n visual_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, visual_seq_length, visual_embedding_dim)`, `optional`):\n The embedded representation of the visual inputs, generally derived using using an object detector.\n\n visual_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, visual_seq_length)`, `optional`):\n Mask to avoid performing attention on visual embeddings. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n visual_token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, visual_seq_length)`, `optional`):\n Segment token indices to indicate different portions of the visual embeds.\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_ The authors of VisualBERT set the\n `visual_token_type_ids` to `1` for all tokens.\n\n image_text_alignment (:obj:`torch.LongTensor` of shape :obj:`(batch_size, visual_seq_length, alignment_number)`, `optional`):\n Image-Text alignment uses to decide the position IDs of the visual embeddings.\n\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare VisualBert Model transformer outputting raw hidden-states without any specific head on top.\",\n VISUAL_BERT_START_DOCSTRING,\n)\nclass VisualBertModel(VisualBertPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) following the architecture described in `Attention is\n all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = VisualBertEmbeddings(config)\n self.encoder = VisualBertEncoder(config)\n\n self.pooler = VisualBertPooler(config) if add_pooling_layer else None\n\n self.bypass_transformer = config.bypass_transformer\n\n if self.bypass_transformer:\n self.additional_layer = VisualBertLayer(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n visual_embeds=None,\n visual_attention_mask=None,\n visual_token_type_ids=None,\n image_text_alignment=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n\n Returns:\n\n Example::\n\n >>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image.\n >>> from transformers import BertTokenizer, VisualBertModel\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = VisualBertModel.from_pretrained('uclanlp/visualbert-vqa-coco-pre')\n\n >>> inputs = tokenizer(\"The capital of France is Paris.\", return_tensors=\"pt\")\n >>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)\n >>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example\n >>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)\n\n >>> inputs.update({{\n ... \"visual_embeds\": visual_embeds,\n ... \"visual_token_type_ids\": visual_token_type_ids,\n ... \"visual_attention_mask\": visual_attention_mask\n ... }})\n\n >>> outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n \"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n batch_size, seq_length = input_shape\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size, seq_length = input_shape\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if visual_embeds is None:\n raise ValueError(\n f\"`visual_embeds` can not be of type {type(visual_embeds)} when using a VisualBert Model.\"\n )\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n visual_input_shape = visual_embeds.size()[:-1]\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n\n if visual_attention_mask is None:\n visual_attention_mask = torch.ones(visual_input_shape, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n\n combined_attention_mask = torch.cat((attention_mask, visual_attention_mask), dim=-1)\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(\n combined_attention_mask, [batch_size, input_shape + visual_input_shape], device\n )\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n visual_embeds=visual_embeds,\n visual_token_type_ids=visual_token_type_ids,\n image_text_alignment=image_text_alignment,\n )\n\n if self.bypass_transformer and visual_embeds is not None:\n text_length = input_ids.size(1)\n text_embedding_output = embedding_output[:, :text_length, :]\n visual_embedding_output = embedding_output[:, text_length:, :]\n\n text_extended_attention_mask = extended_attention_mask[:, :, text_length, :text_length]\n\n encoded_outputs = self.encoder(\n text_embedding_output,\n attention_mask=text_extended_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoded_outputs[0]\n concatenated_input = torch.cat((sequence_output, visual_embedding_output), dim=1)\n sequence_output = self.additional_layer(concatenated_input, extended_attention_mask)\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n else:\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n VisualBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a\n `sentence-image prediction (classification)` head.\n \"\"\",\n VISUAL_BERT_START_DOCSTRING,\n)\nclass VisualBertForPreTraining(VisualBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.visual_bert = VisualBertModel(config)\n self.cls = VisualBertPreTrainingHeads(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=VisualBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n visual_embeds=None,\n visual_attention_mask=None,\n visual_token_type_ids=None,\n image_text_alignment=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n sentence_image_labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape ``(batch_size, total_sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,\n config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``\n sentence_image_labels (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):\n Labels for computing the sentence-image prediction (classification) loss. Input should be a sequence\n pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:\n\n - 0 indicates sequence B is a matching pair of sequence A for the given image,\n - 1 indicates sequence B is a random sequence w.r.t A for the given image.\n\n Returns:\n\n Example::\n\n >>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.\n >>> from transformers import BertTokenizer, VisualBertForPreTraining\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = VisualBertForPreTraining.from_pretrained('uclanlp/visualbert-vqa-coco-pre')\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"pt\")\n >>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)\n >>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example\n >>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)\n\n >>> inputs.update({{\n ... \"visual_embeds\": visual_embeds,\n ... \"visual_token_type_ids\": visual_token_type_ids,\n ... \"visual_attention_mask\": visual_attention_mask\n ... }})\n >>> max_length = inputs[\"input_ids\"].shape[-1]+visual_embeds.shape[-2]\n >>> labels = tokenizer(\"The capital of France is Paris.\", return_tensors=\"pt\", padding=\"max_length\", max_length=max_length)[\"input_ids\"]\n >>> sentence_image_labels = torch.tensor(1).unsqueeze(0) # Batch_size\n\n\n >>> outputs = model(**inputs, labels=labels, sentence_image_labels=sentence_image_labels)\n >>> loss = outputs.loss\n >>> prediction_logits = outputs.prediction_logits\n >>> seq_relationship_logits = outputs.seq_relationship_logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.visual_bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n visual_embeds=visual_embeds,\n visual_attention_mask=visual_attention_mask,\n visual_token_type_ids=visual_token_type_ids,\n image_text_alignment=image_text_alignment,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n\n total_loss = None\n if labels is not None and sentence_image_labels is not None:\n total_size = attention_mask.size(-1) + visual_attention_mask.size(-1)\n if labels.size(-1) != total_size:\n raise ValueError(\n f\"The labels provided should have same sequence length as total attention mask.\"\n f\"Found labels with sequence length {labels.size(-1)}, expected {total_size}.\"\n )\n\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n sentence_image_loss = loss_fct(seq_relationship_score.view(-1, 2), sentence_image_labels.view(-1))\n total_loss = masked_lm_loss + sentence_image_loss\n\n if labels is not None and sentence_image_labels is None:\n total_size = attention_mask.size(-1) + visual_attention_mask.size(-1)\n if labels.size(-1) != total_size:\n raise ValueError(\n f\"The labels provided should have same sequence length as total attention mask.\"\n f\"Found labels with sequence length {labels.size(-1)}, expected {total_size}.\"\n )\n\n loss_fct = CrossEntropyLoss()\n total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return VisualBertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=prediction_scores,\n seq_relationship_logits=seq_relationship_score,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n VisualBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and\n a softmax) e.g. for VCR tasks.\n \"\"\",\n VISUAL_BERT_START_DOCSTRING,\n)\nclass VisualBertForMultipleChoice(VisualBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.visual_bert = VisualBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.cls = nn.Linear(config.hidden_size, 1)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(\n VISUAL_BERT_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\")\n )\n @replace_return_docstrings(output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n visual_embeds=None,\n visual_attention_mask=None,\n visual_token_type_ids=None,\n image_text_alignment=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,\n num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors.\n (See :obj:`input_ids` above)\n\n Returns:\n\n Example::\n\n >>> from transformers import BertTokenizer, VisualBertForMultipleChoice\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = VisualBertForMultipleChoice.from_pretrained('uclanlp/visualbert-vcr')\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n\n >>> visual_embeds = get_visual_embeddings(image)\n >>> # (batch_size, num_choices, visual_seq_length, visual_embedding_dim)\n >>> visual_embeds = visual_embeds.expand(1, 2, *visual_embeds.shape)\n >>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)\n >>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)\n\n >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1\n\n >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True)\n >>> # batch size is 1\n >>> inputs_dict = {{k: v.unsqueeze(0) for k,v in encoding.items()}}\n >>> inputs_dict.update({{\n ... visual_embeds=visual_embeds,\n ... visual_attention_mask=visual_attention_mask,\n ... visual_token_type_ids=visual_token_type_ids,\n ... labels=labels\n ... }})\n >>> outputs = model(**inputs_dict)\n\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n visual_embeds = (\n visual_embeds.view(-1, visual_embeds.size(-2), visual_embeds.size(-1))\n if visual_embeds is not None\n else None\n )\n visual_attention_mask = (\n visual_attention_mask.view(-1, visual_attention_mask.size(-1))\n if visual_attention_mask is not None\n else None\n )\n visual_token_type_ids = (\n visual_token_type_ids.view(-1, visual_token_type_ids.size(-1))\n if visual_token_type_ids is not None\n else None\n )\n\n outputs = self.visual_bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n visual_embeds=visual_embeds,\n visual_attention_mask=visual_attention_mask,\n visual_token_type_ids=visual_token_type_ids,\n image_text_alignment=image_text_alignment,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n _, pooled_output = outputs[0], outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.cls(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n VisualBert Model with a classification/regression head on top (a dropout and a linear layer on top of the pooled\n output) for VQA.\n \"\"\",\n VISUAL_BERT_START_DOCSTRING,\n)\nclass VisualBertForQuestionAnswering(VisualBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.visual_bert = VisualBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.cls = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n visual_embeds=None,\n visual_attention_mask=None,\n visual_token_type_ids=None,\n image_text_alignment=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, total_sequence_length)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.\n\n Returns:\n\n Example::\n\n >>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.\n >>> from transformers import BertTokenizer, VisualBertForQuestionAnswering\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = VisualBertForQuestionAnswering.from_pretrained('uclanlp/visualbert-vqa')\n\n >>> text = \"Who is eating the apple?\"\n >>> inputs = tokenizer(text, return_tensors='pt')\n >>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)\n >>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example\n >>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)\n\n >>> inputs.update({{\n ... \"visual_embeds\": visual_embeds,\n ... \"visual_token_type_ids\": visual_token_type_ids,\n ... \"visual_attention_mask\": visual_attention_mask\n ... }})\n\n >>> labels = torch.tensor([[0.0,1.0]]).unsqueeze(0) # Batch size 1, Num labels 2\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> scores = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # Get the index of the last text token\n index_to_gather = attention_mask.sum(1) - 2 # as in original code\n\n outputs = self.visual_bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n visual_embeds=visual_embeds,\n visual_attention_mask=visual_attention_mask,\n visual_token_type_ids=visual_token_type_ids,\n image_text_alignment=image_text_alignment,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n # TO-CHECK: From the original code\n index_to_gather = (\n index_to_gather.unsqueeze(-1).unsqueeze(-1).expand(index_to_gather.size(0), 1, sequence_output.size(-1))\n )\n pooled_output = torch.gather(sequence_output, 1, index_to_gather)\n\n pooled_output = self.dropout(pooled_output)\n logits = self.cls(pooled_output)\n reshaped_logits = logits.view(-1, self.num_labels)\n\n loss = None\n if labels is not None:\n loss_fct = nn.KLDivLoss(reduction=\"batchmean\")\n log_softmax = nn.LogSoftmax(dim=-1)\n reshaped_logits = log_softmax(reshaped_logits)\n loss = loss_fct(reshaped_logits, labels.contiguous())\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n VisualBert Model with a sequence classification head on top (a dropout and a linear layer on top of the pooled\n output) for Visual Reasoning e.g. for NLVR task.\n \"\"\",\n VISUAL_BERT_START_DOCSTRING,\n)\nclass VisualBertForVisualReasoning(VisualBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.visual_bert = VisualBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.cls = nn.Linear(config.hidden_size, config.num_labels) # 2\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n visual_embeds=None,\n visual_attention_mask=None,\n visual_token_type_ids=None,\n image_text_alignment=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. A classification loss is computed (Cross-Entropy) against these labels.\n\n Returns:\n\n Example::\n\n >>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.\n >>> from transformers import BertTokenizer, VisualBertForVisualReasoning\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = VisualBertForVisualReasoning.from_pretrained('uclanlp/visualbert-nlvr2')\n\n >>> text = \"Who is eating the apple?\"\n >>> inputs = tokenizer(text, return_tensors='pt')\n >>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)\n >>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example\n >>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)\n\n >>> inputs.update({{\n ... \"visual_embeds\": visual_embeds,\n ... \"visual_token_type_ids\": visual_token_type_ids,\n ... \"visual_attention_mask\": visual_attention_mask\n ... }})\n\n >>> labels = torch.tensor(1).unsqueeze(0) # Batch size 1, Num choices 2\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> scores = outputs.logits\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.visual_bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n visual_embeds=visual_embeds,\n visual_attention_mask=visual_attention_mask,\n visual_token_type_ids=visual_token_type_ids,\n image_text_alignment=image_text_alignment,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n # sequence_output = outputs[0]\n pooled_output = outputs[1]\n pooled_output = self.dropout(pooled_output)\n logits = self.cls(pooled_output)\n reshaped_logits = logits.contiguous()\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\nclass VisualBertRegionToPhraseAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n self.num_attention_heads = 1 # config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(self, query, key, attention_mask):\n attention_mask = attention_mask.to(query.dtype)\n attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n attention_mask = (1.0 - attention_mask) * -10000.0\n\n mixed_query_layer = self.query(query)\n mixed_key_layer = self.key(key)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n\n attention_scores = attention_scores + attention_mask\n\n attention_scores = attention_scores.squeeze(1)\n return attention_scores\n\n\n@add_start_docstrings(\n \"\"\"\n VisualBert Model with a Masked Language Modeling head and an attention layer on top for Region-to-Phrase Alignment\n e.g. for Flickr30 Entities task.\n \"\"\",\n VISUAL_BERT_START_DOCSTRING,\n)\nclass VisualBertForRegionToPhraseAlignment(VisualBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.visual_bert = VisualBertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.cls = VisualBertPreTrainingHeads(config)\n self.attention = VisualBertRegionToPhraseAttention(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_model_forward(VISUAL_BERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n visual_embeds=None,\n visual_attention_mask=None,\n visual_token_type_ids=None,\n image_text_alignment=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n region_to_phrase_position=None,\n labels=None,\n ):\n r\"\"\"\n region_to_phrase_position (:obj:`torch.LongTensor` of shape ``(batch_size, total_sequence_length)``, `optional`):\n The positions depicting the position of the image embedding corresponding to the textual tokens.\n\n labels (:obj:`torch.LongTensor` of shape ``(batch_size, total_sequence_length, visual_sequence_length)``, `optional`):\n Labels for computing the masked language modeling loss. KLDivLoss is computed against these labels and\n the outputs from the attention layer.\n\n Returns:\n\n Example::\n\n >>> # Assumption: `get_visual_embeddings(image)` gets the visual embeddings of the image in the batch.\n >>> from transformers import BertTokenizer, VisualBertForRegionToPhraseAlignment\n >>> import torch\n\n >>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n >>> model = VisualBertForRegionToPhraseAlignment.from_pretrained('uclanlp/visualbert-vqa-coco-pre')\n\n >>> text = \"Who is eating the apple?\"\n >>> inputs = tokenizer(text, return_tensors='pt')\n >>> visual_embeds = get_visual_embeddings(image).unsqueeze(0)\n >>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long) #example\n >>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)\n >>> region_to_phrase_position = torch.ones((1, inputs[\"input_ids\"].shape[-1]+visual_embeds.shape[-2]))\n\n >>> inputs.update({{\n ... \"region_to_phrase_position\": region_to_phrase_position,\n ... \"visual_embeds\": visual_embeds,\n ... \"visual_token_type_ids\": visual_token_type_ids,\n ... \"visual_attention_mask\": visual_attention_mask\n ... }})\n\n >>> labels = torch.ones((1, inputs[\"input_ids\"].shape[-1]+visual_embeds.shape[-2], visual_embeds.shape[-2])) # Batch size 1\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> scores = outputs.logits\n \"\"\"\n if region_to_phrase_position is None:\n raise ValueError(\"`region_to_phrase_position` should not be None when using Flickr Model.\")\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.visual_bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n visual_embeds=visual_embeds,\n visual_attention_mask=visual_attention_mask,\n visual_token_type_ids=visual_token_type_ids,\n image_text_alignment=image_text_alignment,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n region_to_phrase_position_mask = (region_to_phrase_position != -1).long()\n\n # Make the -1 become 0\n region_to_phrase_position = region_to_phrase_position * region_to_phrase_position_mask\n\n # Selected_positions = batch x selected position x dim\n expanded_region_to_phrase_positions = region_to_phrase_position.unsqueeze(2).expand(\n region_to_phrase_position.size(0), region_to_phrase_position.size(1), sequence_output.size(2)\n )\n selected_positions = sequence_output.gather(1, expanded_region_to_phrase_positions)\n\n # Visual Features = batch x visual_feature_length x dim\n # This will need separate image and visual masks.\n visual_features = sequence_output[:, attention_mask.size(1) :]\n\n if visual_features.size(1) != visual_attention_mask.size(1):\n raise ValueError(\n f\"Visual features length :{visual_features.size(1)} should be the same\"\n f\" as visual attention mask length: {visual_attention_mask.size(1)}.\"\n )\n\n logits = self.attention(selected_positions, visual_features, visual_attention_mask)\n\n loss = None\n\n if labels is not None:\n\n # scores = batch x selected position x visual_feature\n # scores = selected_positions.bmm(visual_features.transpose(1,2))\n # label = batch x selected_postion x needed position\n loss_fct = KLDivLoss(reduction=\"batchmean\")\n log_softmax = LogSoftmax(dim=-1)\n scores = log_softmax(logits)\n labels = labels.contiguous()\n loss = loss_fct(scores, labels)\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.nn.KLDivLoss", "torch.nn.LogSoftmax", "torch.cat", "torch.zeros", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.nn.Tanh", "torch.nn.Linear", "torch.matmul", "torch.arange", "torch.gather" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dreadn0ught/mlrose
[ "2a9d604ea464cccc48f30b8fe6b81fe5c4337c80" ]
[ "tests/test_fitness.py" ]
[ "\"\"\" Unit tests for fitness.py\"\"\"\n\n# Author: Genevieve Hayes\n# License: BSD 3 clause\n\nimport unittest\nimport numpy as np\nfrom mlrose import (OneMax, FlipFlop, FourPeaks, SixPeaks, ContinuousPeaks,\n Knapsack, TravellingSales, Queens, MaxKColor,\n CustomFitness)\nfrom mlrose.fitness import head, tail, max_run\n# The above functions are not automatically imported at initialization, so\n# must be imported explicitly from fitness.py.\n\n\nclass TestFitness(unittest.TestCase):\n \"\"\"Tests for fitness.py.\"\"\"\n\n @staticmethod\n def test_onemax():\n \"\"\"Test OneMax fitness function\"\"\"\n state = np.array([0, 1, 0, 1, 1, 1, 1])\n assert OneMax().evaluate(state) == 5\n\n @staticmethod\n def test_flipflop():\n \"\"\"Test FlipFlop fitness function\"\"\"\n state = np.array([0, 1, 0, 1, 1, 1, 1])\n assert FlipFlop().evaluate(state) == 3\n\n @staticmethod\n def test_head():\n \"\"\"Test head function\"\"\"\n state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])\n assert head(1, state) == 4\n\n @staticmethod\n def test_tail():\n \"\"\"Test tail function\"\"\"\n state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])\n assert tail(1, state) == 2\n\n @staticmethod\n def test_max_run_middle():\n \"\"\"Test max_run function for case where run is in the middle of the\n state\"\"\"\n state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])\n assert max_run(1, state) == 5\n\n @staticmethod\n def test_max_run_start():\n \"\"\"Test max_run function for case where run is at the start of the\n state\"\"\"\n state = np.array([1, 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, 1, 1, 4, 6, 1, 1])\n assert max_run(1, state) == 6\n\n @staticmethod\n def test_max_run_end():\n \"\"\"Test max_run function for case where run is at the end of the\n state\"\"\"\n state = np.array([1, 1, 1, 1, 0, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n assert max_run(1, state) == 9\n\n @staticmethod\n def test_fourpeaks_r0():\n \"\"\"Test FourPeaks fitness function for the case where R=0 and max>0\"\"\"\n state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])\n assert FourPeaks(t_pct=0.30).evaluate(state) == 4\n\n @staticmethod\n def test_fourpeaks_r_gt0():\n \"\"\"Test FourPeaks fitness function for the case where R>0 and max>0\"\"\"\n state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])\n assert FourPeaks(t_pct=0.15).evaluate(state) == 16\n\n @staticmethod\n def test_fourpeaks_r0_max0():\n \"\"\"Test FourPeaks fitness function for the case where R=0 and max=0\"\"\"\n state = np.array([0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1])\n assert FourPeaks(t_pct=0.30).evaluate(state) == 0\n\n @staticmethod\n def test_sixpeaks_r0():\n \"\"\"Test SixPeaks fitness function for the case where R=0 and max>0\"\"\"\n state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])\n assert SixPeaks(t_pct=0.30).evaluate(state) == 4\n\n @staticmethod\n def test_sixpeaks_r_gt0():\n \"\"\"Test SixPeaks fitness function for the case where R>0 and max>0\"\"\"\n state = np.array([1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0])\n assert SixPeaks(t_pct=0.15).evaluate(state) == 16\n\n @staticmethod\n def test_sixpeaks_r0_max0():\n \"\"\"Test SixPeaks fitness function for the case where R=0 and max=0\"\"\"\n state = np.array([0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1])\n assert SixPeaks(t_pct=0.30).evaluate(state) == 0\n\n @staticmethod\n def test_sixpeaks_r_gt0_max2():\n \"\"\"Test SixPeaks fitness function for the case where R>0 and max>0\n based on the second condition\"\"\"\n state = np.array([0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1])\n assert SixPeaks(t_pct=0.15).evaluate(state) == 16\n\n @staticmethod\n def test_continuouspeaks_r0():\n \"\"\"Test ContinuousPeaks fitness function for case when R = 0.\"\"\"\n state = np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1])\n assert ContinuousPeaks(t_pct=0.30).evaluate(state) == 5\n\n @staticmethod\n def test_continuouspeaks_r_gt():\n \"\"\"Test ContinuousPeaks fitness function for case when R > 0.\"\"\"\n state = np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1])\n\n assert ContinuousPeaks(t_pct=0.15).evaluate(state) == 17\n\n @staticmethod\n def test_knapsack_weight_lt_max():\n \"\"\"Test Knapsack fitness function for case where total weight is less\n than the maximum\"\"\"\n weights = [10, 5, 2, 8, 15]\n values = [1, 2, 3, 4, 5]\n max_weight_pct = 0.6\n\n state = np.array([1, 0, 2, 1, 0])\n assert Knapsack(weights, values, max_weight_pct).evaluate(state) == 11\n\n @staticmethod\n def test_knapsack_weight_gt_max():\n \"\"\"Test Knapsack fitness function for case where total weight is\n greater than the maximum\"\"\"\n weights = [10, 5, 2, 8, 15]\n values = [1, 2, 3, 4, 5]\n max_weight_pct = 0.4\n\n state = np.array([1, 0, 2, 1, 0])\n assert Knapsack(weights, values, max_weight_pct).evaluate(state) == 0\n\n @staticmethod\n def test_travelling_sales_coords():\n \"\"\"Test TravellingSales fitness function for case where city nodes\n coords are specified.\"\"\"\n\n coords = [(0, 0), (3, 0), (3, 2), (2, 4), (1, 3)]\n\n state = np.array([0, 1, 4, 3, 2])\n\n assert (round(TravellingSales(coords=coords).evaluate(state), 4)\n == 13.8614)\n\n @staticmethod\n def test_travelling_sales_dists():\n \"\"\"Test TravellingSales fitness function for case where distances\n between node pairs are specified.\"\"\"\n\n dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),\n (4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]\n\n state = np.array([0, 1, 4, 3, 2])\n\n assert TravellingSales(distances=dists).evaluate(state) == 29\n\n @staticmethod\n def test_travelling_sales_invalid():\n \"\"\"Test TravellingSales fitness function for invalid tour\"\"\"\n\n dists = [(0, 1, 3), (0, 2, 5), (0, 3, 1), (0, 4, 7), (1, 3, 6),\n (4, 1, 9), (2, 3, 8), (2, 4, 2), (3, 2, 8), (3, 4, 4)]\n\n state = np.array([0, 1, 2, 3, 4])\n\n assert TravellingSales(distances=dists).evaluate(state) == np.inf\n\n @staticmethod\n def test_queens():\n \"\"\"Test Queens fitness function\"\"\"\n state = np.array([1, 4, 1, 3, 5, 5, 2, 7])\n assert Queens().evaluate(state) == 6\n\n @staticmethod\n def test_max_k_color():\n \"\"\"Test MaxKColor fitness function\"\"\"\n edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]\n\n state = np.array([0, 1, 0, 1, 1])\n assert MaxKColor(edges).evaluate(state) == 3\n\n @staticmethod\n def test_custom_fitness():\n \"\"\"Test CustomFitness fitness function\"\"\"\n # Define custom finess function\n def cust_fn(state, c):\n return c*np.sum(state)\n\n state = np.array([1, 2, 3, 4, 5])\n kwargs = {'c': 10}\n assert CustomFitness(cust_fn, **kwargs).evaluate(state) == 150\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BPearlstine/colour
[ "40f0281295496774d2a19eee017d50fd0c265bd8", "40f0281295496774d2a19eee017d50fd0c265bd8", "40f0281295496774d2a19eee017d50fd0c265bd8", "40f0281295496774d2a19eee017d50fd0c265bd8", "40f0281295496774d2a19eee017d50fd0c265bd8", "40f0281295496774d2a19eee017d50fd0c265bd8", "40f0281295496774d2a19eee017d50fd0c265bd8" ]
[ "colour/volume/tests/test_spectrum.py", "colour/temperature/tests/test_cie_d.py", "colour/models/rgb/datasets/dji_dgamut.py", "colour/models/rgb/transfer_functions/tests/test_gamma.py", "colour/blindness/machado2009.py", "colour/models/rgb/transfer_functions/tests/test_filmic_pro.py", "colour/models/rgb/transfer_functions/tests/test_dicom_gsdf.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.volume.spectrum` module.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.volume import (generate_pulse_waves, XYZ_outer_surface,\n is_within_visible_spectrum)\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'TestGeneratePulseWaves', 'TestXYZOuterSurface',\n 'TestIsWithinVisibleSpectrum'\n]\n\n\nclass TestGeneratePulseWaves(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.volume.spectrum.generate_pulse_waves`\n definition unit tests methods.\n \"\"\"\n\n def test_generate_pulse_waves(self):\n \"\"\"\n Tests :func:`colour.volume.spectrum.generate_pulse_waves`\n definition.\n \"\"\"\n\n np.testing.assert_array_equal(\n generate_pulse_waves(5),\n np.array([\n [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000],\n [1.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000],\n [0.00000000, 1.00000000, 0.00000000, 0.00000000, 0.00000000],\n [0.00000000, 0.00000000, 1.00000000, 0.00000000, 0.00000000],\n [0.00000000, 0.00000000, 0.00000000, 1.00000000, 0.00000000],\n [0.00000000, 0.00000000, 0.00000000, 0.00000000, 1.00000000],\n [1.00000000, 1.00000000, 0.00000000, 0.00000000, 0.00000000],\n [0.00000000, 1.00000000, 1.00000000, 0.00000000, 0.00000000],\n [0.00000000, 0.00000000, 1.00000000, 1.00000000, 0.00000000],\n [0.00000000, 0.00000000, 0.00000000, 1.00000000, 1.00000000],\n [1.00000000, 0.00000000, 0.00000000, 0.00000000, 1.00000000],\n [1.00000000, 1.00000000, 1.00000000, 0.00000000, 0.00000000],\n [0.00000000, 1.00000000, 1.00000000, 1.00000000, 0.00000000],\n [0.00000000, 0.00000000, 1.00000000, 1.00000000, 1.00000000],\n [1.00000000, 0.00000000, 0.00000000, 1.00000000, 1.00000000],\n [1.00000000, 1.00000000, 0.00000000, 0.00000000, 1.00000000],\n [1.00000000, 1.00000000, 1.00000000, 1.00000000, 0.00000000],\n [0.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000],\n [1.00000000, 0.00000000, 1.00000000, 1.00000000, 1.00000000],\n [1.00000000, 1.00000000, 0.00000000, 1.00000000, 1.00000000],\n [1.00000000, 1.00000000, 1.00000000, 0.00000000, 1.00000000],\n [1.00000000, 1.00000000, 1.00000000, 1.00000000, 1.00000000],\n ]))\n\n\nclass TestXYZOuterSurface(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.volume.spectrum.XYZ_outer_surface`\n definition unit tests methods.\n \"\"\"\n\n def test_XYZ_outer_surface(self):\n \"\"\"\n Tests :func:`colour.volume.spectrum.XYZ_outer_surface`\n definition.\n \"\"\"\n\n np.testing.assert_array_almost_equal(\n XYZ_outer_surface(84),\n np.array([\n [0.00000000e+00, 0.00000000e+00, 0.00000000e+00],\n [1.47669249e-03, 4.15303476e-05, 6.98843624e-03],\n [1.62812757e-01, 3.71143871e-02, 9.01514713e-01],\n [1.86508941e-01, 5.66174645e-01, 9.13551791e-02],\n [6.15553478e-01, 3.84277759e-01, 4.74220708e-04],\n [3.36220454e-02, 1.23545569e-02, 0.00000000e+00],\n [1.02795008e-04, 3.71211582e-05, 0.00000000e+00],\n [1.64289450e-01, 3.71559174e-02, 9.08503149e-01],\n [3.49321699e-01, 6.03289032e-01, 9.92869892e-01],\n [8.02062419e-01, 9.50452405e-01, 9.18293998e-02],\n [6.49175523e-01, 3.96632316e-01, 4.74220708e-04],\n [3.37248404e-02, 1.23916780e-02, 0.00000000e+00],\n [1.57948749e-03, 7.86515058e-05, 6.98843624e-03],\n [3.50798391e-01, 6.03330563e-01, 9.99858328e-01],\n [9.64875177e-01, 9.87566792e-01, 9.93344113e-01],\n [8.35684465e-01, 9.62806961e-01, 9.18293998e-02],\n [6.49278318e-01, 3.96669437e-01, 4.74220708e-04],\n [3.52015329e-02, 1.24332084e-02, 6.98843624e-03],\n [1.64392245e-01, 3.71930386e-02, 9.08503149e-01],\n [9.66351869e-01, 9.87608322e-01, 1.00033255e+00],\n [9.98497222e-01, 9.99921348e-01, 9.93344113e-01],\n [8.35787260e-01, 9.62844083e-01, 9.18293998e-02],\n [6.50755011e-01, 3.96710968e-01, 7.46265695e-03],\n [1.98014290e-01, 4.95475954e-02, 9.08503149e-01],\n [3.50901186e-01, 6.03367684e-01, 9.99858328e-01],\n [9.99973915e-01, 9.99962879e-01, 1.00033255e+00],\n [9.98600017e-01, 9.99958470e-01, 9.93344113e-01],\n [8.37263952e-01, 9.62885613e-01, 9.88178360e-02],\n [8.13567768e-01, 4.33825355e-01, 9.08977370e-01],\n [3.84523232e-01, 6.15722241e-01, 9.99858328e-01],\n [9.66454664e-01, 9.87645443e-01, 1.00033255e+00],\n [1.00007671e+00, 1.00000000e+00, 1.00033255e+00],\n ]),\n decimal=7)\n\n\nclass TestIsWithinVisibleSpectrum(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.volume.spectrum.is_within_visible_spectrum`\n definition unit tests methods.\n \"\"\"\n\n def test_is_within_visible_spectrum(self):\n \"\"\"\n Tests :func:`colour.volume.spectrum.is_within_visible_spectrum`\n definition.\n \"\"\"\n\n self.assertTrue(\n is_within_visible_spectrum(np.array([0.3205, 0.4131, 0.5100])))\n\n self.assertFalse(\n is_within_visible_spectrum(np.array([-0.0005, 0.0031, 0.0010])))\n\n self.assertTrue(\n is_within_visible_spectrum(np.array([0.4325, 0.3788, 0.1034])))\n\n self.assertFalse(\n is_within_visible_spectrum(np.array([0.0025, 0.0088, 0.0340])))\n\n def test_n_dimensional_is_within_visible_spectrum(self):\n \"\"\"\n Tests :func:`colour.volume.spectrum.is_within_visible_spectrum`\n definition n-dimensional arrays support.\n \"\"\"\n\n a = np.array([0.3205, 0.4131, 0.5100])\n b = is_within_visible_spectrum(a)\n\n a = np.tile(a, (6, 1))\n b = np.tile(b, 6)\n np.testing.assert_almost_equal(is_within_visible_spectrum(a), b)\n\n a = np.reshape(a, (2, 3, 3))\n b = np.reshape(b, (2, 3))\n np.testing.assert_almost_equal(is_within_visible_spectrum(a), b)\n\n @ignore_numpy_errors\n def test_nan_is_within_visible_spectrum(self):\n \"\"\"\n Tests :func:`colour.volume.spectrum.is_within_visible_spectrum`\n definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=3))\n for case in cases:\n is_within_visible_spectrum(case)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.temperature.cie_d` module.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\nfrom itertools import permutations\n\nfrom colour.temperature import xy_to_CCT_CIE_D, CCT_to_xy_CIE_D\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestXy_to_CCT_CIE_D', 'TestCCT_to_xy_CIE_D']\n\n\nclass TestXy_to_CCT_CIE_D(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.temperature.cie_d.xy_to_CCT_CIE_D` definition units\n tests methods.\n \"\"\"\n\n def test_xy_to_CCT_CIE_D(self):\n \"\"\"\n Tests :func:`colour.temperature.cie_d.xy_to_CCT_CIE_D` definition.\n \"\"\"\n\n np.testing.assert_allclose(\n xy_to_CCT_CIE_D(\n np.array([0.382343625000000, 0.383766261015578]),\n {'method': 'Nelder-Mead'}),\n 4000,\n rtol=0.0000001,\n atol=0.0000001)\n\n np.testing.assert_allclose(\n xy_to_CCT_CIE_D(\n np.array([0.305357431486880, 0.321646345474552]),\n {'method': 'Nelder-Mead'}),\n 7000,\n rtol=0.0000001,\n atol=0.0000001)\n\n np.testing.assert_allclose(\n xy_to_CCT_CIE_D(\n np.array([0.24985367, 0.254799464210944]),\n {'method': 'Nelder-Mead'}),\n 25000,\n rtol=0.0000001,\n atol=0.0000001)\n\n def test_n_dimensional_xy_to_CCT_CIE_D(self):\n \"\"\"\n Tests :func:`colour.temperature.cie_d.xy_to_CCT_CIE_D` definition\n n-dimensional arrays support.\n \"\"\"\n\n xy = np.array([0.382343625000000, 0.383766261015578])\n CCT = xy_to_CCT_CIE_D(xy)\n\n xy = np.tile(xy, (6, 1))\n CCT = np.tile(CCT, 6)\n np.testing.assert_almost_equal(xy_to_CCT_CIE_D(xy), CCT, decimal=7)\n\n xy = np.reshape(xy, (2, 3, 2))\n CCT = np.reshape(CCT, (2, 3))\n np.testing.assert_almost_equal(xy_to_CCT_CIE_D(xy), CCT, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_xy_to_CCT_CIE_D(self):\n \"\"\"\n Tests :func:`colour.temperature.cie_d.xy_to_CCT_CIE_D` definition nan\n support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=2))\n for case in cases:\n xy_to_CCT_CIE_D(case)\n\n\nclass TestCCT_to_xy_CIE_D(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.temperature.cie_d.CCT_to_xy_CIE_D` definition\n unit tests methods.\n \"\"\"\n\n def test_CCT_to_xy_CIE_D(self):\n \"\"\"\n Tests :func:`colour.temperature.cie_d.CCT_to_xy_CIE_D` definition.\n \"\"\"\n\n np.testing.assert_almost_equal(\n CCT_to_xy_CIE_D(4000),\n np.array([0.382343625000000, 0.383766261015578]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n CCT_to_xy_CIE_D(7000),\n np.array([0.305357431486880, 0.321646345474552]),\n decimal=7)\n\n np.testing.assert_almost_equal(\n CCT_to_xy_CIE_D(25000),\n np.array([0.24985367, 0.254799464210944]),\n decimal=7)\n\n def test_n_dimensional_CCT_to_xy_CIE_D(self):\n \"\"\"\n Tests :func:`colour.temperature.cie_d.CCT_to_xy_CIE_D` definition\n n-dimensional arrays support.\n \"\"\"\n\n CCT = 4000\n xy = CCT_to_xy_CIE_D(CCT)\n\n CCT = np.tile(CCT, 6)\n xy = np.tile(xy, (6, 1))\n np.testing.assert_almost_equal(CCT_to_xy_CIE_D(CCT), xy, decimal=7)\n\n CCT = np.reshape(CCT, (2, 3))\n xy = np.reshape(xy, (2, 3, 2))\n np.testing.assert_almost_equal(CCT_to_xy_CIE_D(CCT), xy, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_CCT_to_xy_CIE_D(self):\n \"\"\"\n Tests :func:`colour.temperature.cie_d.CCT_to_xy_CIE_D` definition\n nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n cases = set(permutations(cases * 3, r=2))\n for case in cases:\n CCT_to_xy_CIE_D(case)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDJI D-Gamut Colourspace\n=======================\n\nDefines the *DJI D-Gamut* colourspace:\n\n- :attr:`colour.models.DJI_D_GAMUT_COLOURSPACE`.\n\nSee Also\n--------\n`RGB Colourspaces Jupyter Notebook\n<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\\\nblob/master/notebooks/models/rgb.ipynb>`_\n\nReferences\n----------\n- :cite:`DJI2017` : Dji. (2017). White Paper on D-Log and D-Gamut of DJI\n Cinema Color System. Retrieved from https://dl.djicdn.com/downloads/\\\nzenmuse+x7/20171010/D-Log_D-Gamut_Whitepaper.pdf\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\n\nfrom colour.colorimetry import ILLUMINANTS\nfrom colour.models.rgb import (RGB_Colourspace, log_encoding_DJIDLog,\n log_decoding_DJIDLog)\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'DJI_D_GAMUT_PRIMARIES', 'DJI_D_GAMUT_WHITEPOINT_NAME',\n 'DJI_D_GAMUT_WHITEPOINT', 'DJI_D_GAMUT_TO_XYZ_MATRIX',\n 'XYZ_TO_DJI_D_GAMUT_MATRIX', 'DJI_D_GAMUT_COLOURSPACE'\n]\n\nDJI_D_GAMUT_PRIMARIES = np.array([\n [0.71, 0.31],\n [0.21, 0.88],\n [0.09, -0.08],\n])\n\"\"\"\n*DJI D-Gamut* colourspace primaries.\n\nDJI_D_GAMUT_PRIMARIES : ndarray, (3, 2)\n\"\"\"\n\nDJI_D_GAMUT_WHITEPOINT_NAME = 'D65'\n\"\"\"\n*DJI D-Gamut* colourspace whitepoint name.\n\nDJI_D_GAMUT_WHITEPOINT : unicode\n\"\"\"\n\nDJI_D_GAMUT_WHITEPOINT = (ILLUMINANTS['CIE 1931 2 Degree Standard Observer'][\n DJI_D_GAMUT_WHITEPOINT_NAME])\n\"\"\"\n*DJI D-Gamut* colourspace whitepoint.\n\nDJI_D_GAMUT_WHITEPOINT : ndarray\n\"\"\"\n\nDJI_D_GAMUT_TO_XYZ_MATRIX = np.array([[0.6482, 0.1940,\n 0.1082], [0.2830, 0.8132, -0.0962],\n [-0.0183, -0.0832, 1.1903]])\n\"\"\"\n*DJI D-Gamut* colourspace to *CIE XYZ* tristimulus values matrix.\n\nDJI_D_GAMUT_TO_XYZ_MATRIX : array_like, (3, 3)\n\"\"\"\n\nXYZ_TO_DJI_D_GAMUT_MATRIX = np.array([[1.7257, -0.4314,\n -0.1917], [-0.6025, 1.3906, 0.1671],\n [-0.0156, 0.0905, 0.8489]])\n\"\"\"\n*CIE XYZ* tristimulus values to *DJI D-Gamut* colourspace matrix.\n\nXYZ_TO_DJI_D_GAMUT_MATRIX : array_like, (3, 3)\n\"\"\"\n\nDJI_D_GAMUT_COLOURSPACE = RGB_Colourspace(\n 'DJI D-Gamut',\n DJI_D_GAMUT_PRIMARIES,\n DJI_D_GAMUT_WHITEPOINT,\n DJI_D_GAMUT_WHITEPOINT_NAME,\n DJI_D_GAMUT_TO_XYZ_MATRIX,\n XYZ_TO_DJI_D_GAMUT_MATRIX,\n log_encoding_DJIDLog,\n log_decoding_DJIDLog,\n)\nDJI_D_GAMUT_COLOURSPACE.__doc__ = \"\"\"\n*DJI_D-Gamut* colourspace.\n\n References\n ----------\n :cite:`DJI2017`\n\nDJI_D_GAMUT_COLOURSPACE : RGB_Colourspace\n\"\"\"\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.models.rgb.transfer_functions.gamma`\nmodule.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\n\nfrom colour.models.rgb.transfer_functions import gamma_function\nfrom colour.utilities import ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestGammaFunction']\n\n\nclass TestGammaFunction(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.gamma.gamma_function`\n definition unit tests methods.\n \"\"\"\n\n def test_gamma_function(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.gamma.\\\ngamma_function` definition.\n \"\"\"\n\n self.assertAlmostEqual(gamma_function(0.0, 2.2), 0.0, places=7)\n\n self.assertAlmostEqual(\n gamma_function(0.18, 2.2), 0.022993204992707, places=7)\n\n self.assertAlmostEqual(\n gamma_function(0.022993204992707, 1.0 / 2.2), 0.18, places=7)\n\n self.assertAlmostEqual(\n gamma_function(-0.18, 2.0), 0.0323999999999998, places=7)\n\n np.testing.assert_array_equal(gamma_function(-0.18, 2.2), np.nan)\n\n self.assertAlmostEqual(\n gamma_function(-0.18, 2.2, 'Mirror'), -0.022993204992707, places=7)\n\n self.assertAlmostEqual(\n gamma_function(-0.18, 2.2, 'Preserve'), -0.18, places=7)\n\n self.assertAlmostEqual(\n gamma_function(-0.18, 2.2, 'Clamp'), 0, places=7)\n\n np.testing.assert_array_equal(gamma_function(-0.18, -2.2), np.nan)\n\n self.assertAlmostEqual(\n gamma_function(0.0, -2.2, 'Mirror'), 0.0, places=7)\n\n self.assertAlmostEqual(\n gamma_function(0.0, 2.2, 'Preserve'), 0.0, places=7)\n\n self.assertAlmostEqual(gamma_function(0.0, 2.2, 'Clamp'), 0, places=7)\n\n def test_n_dimensional_gamma_function(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.gamma.\\\ngamma_function` definition n-dimensional arrays support.\n \"\"\"\n\n a = 0.18\n a_p = gamma_function(a, 2.2)\n\n a = np.tile(a, 6)\n a_p = np.tile(a_p, 6)\n np.testing.assert_almost_equal(gamma_function(a, 2.2), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3))\n a_p = np.reshape(a_p, (2, 3))\n np.testing.assert_almost_equal(gamma_function(a, 2.2), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3, 1))\n a_p = np.reshape(a_p, (2, 3, 1))\n np.testing.assert_almost_equal(gamma_function(a, 2.2), a_p, decimal=7)\n\n a = -0.18\n a_p = -0.022993204992707\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Mirror'), a_p, decimal=7)\n\n a = np.tile(a, 6)\n a_p = np.tile(a_p, 6)\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Mirror'), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3))\n a_p = np.reshape(a_p, (2, 3))\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Mirror'), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3, 1))\n a_p = np.reshape(a_p, (2, 3, 1))\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Mirror'), a_p, decimal=7)\n\n a = -0.18\n a_p = -0.18\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Preserve'), a_p, decimal=7)\n\n a = np.tile(a, 6)\n a_p = np.tile(a_p, 6)\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Preserve'), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3))\n a_p = np.reshape(a_p, (2, 3))\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Preserve'), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3, 1))\n a_p = np.reshape(a_p, (2, 3, 1))\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Preserve'), a_p, decimal=7)\n\n a = -0.18\n a_p = 0.0\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Clamp'), a_p, decimal=7)\n\n a = np.tile(a, 6)\n a_p = np.tile(a_p, 6)\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Clamp'), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3))\n a_p = np.reshape(a_p, (2, 3))\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Clamp'), a_p, decimal=7)\n\n a = np.reshape(a, (2, 3, 1))\n a_p = np.reshape(a_p, (2, 3, 1))\n np.testing.assert_almost_equal(\n gamma_function(a, 2.2, 'Clamp'), a_p, decimal=7)\n\n def test_raise_exception_gamma_function(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.gamma.\\\ngamma_function` definition raised exception.\n \"\"\"\n\n self.assertRaises(ValueError, gamma_function, 0.18, 1, 'Undefined')\n\n @ignore_numpy_errors\n def test_nan_gamma_function(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.gamma.\\\ngamma_function` definition nan support.\n \"\"\"\n\n cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]\n\n for case in cases:\n gamma_function(case, case)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nSimulation of CVD - Machado, Oliveira and Fernandes (2009)\n==========================================================\n\nDefines *Machado et al. (2009)* objects for simulation of colour vision\ndeficiency:\n\n- :func:`colour.anomalous_trichromacy_cmfs_Machado2009`\n- :func:`colour.anomalous_trichromacy_matrix_Machado2009`\n- :func:`colour.cvd_matrix_Machado2009`\n\nSee Also\n--------\n`Machado et al. (2009) - CVD IPython Notebook\n<http://nbviewer.ipython.org/github/colour-science/colour-ipython/\\\nblob/master/notebooks/cvd/Machado2009.ipynb>`_\n\nReferences\n----------\n- :cite:`Colblindorb` : Colblindor. (n.d.). Protanopia - Red-Green Color\n Blindness. Retrieved July 4, 2015, from http://www.color-blindness.com/\\\nprotanopia-red-green-color-blindness/\n- :cite:`Colblindora` : Colblindor. (n.d.). Deuteranopia - Red-Green Color\n Blindness. Retrieved July 4, 2015, from http://www.color-blindness.com/\\\ndeuteranopia-red-green-color-blindness/\n- :cite:`Colblindorc` : Colblindor. (n.d.). Tritanopia - Blue-Yellow Color\n Blindness. Retrieved July 4, 2015, from http://www.color-blindness.com/\\\ntritanopia-blue-yellow-color-blindness/\n- :cite:`Machado2009` : Machado, G. M., Oliveira, M. M., & Fernandes, L.\n (2009). A Physiologically-based Model for Simulation of Color Vision\n Deficiency. IEEE Transactions on Visualization and Computer Graphics,\n 15(6), 1291-1298. doi:10.1109/TVCG.2009.113\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\n\nfrom colour.blindness import CVD_MATRICES_MACHADO2010\nfrom colour.colorimetry import SpectralShape\nfrom colour.utilities import (dot_matrix, dot_vector, tsplit, tstack,\n usage_warning)\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'LMS_TO_WSYBRG_MATRIX', 'RGB_to_WSYBRG_matrix',\n 'anomalous_trichromacy_cmfs_Machado2009',\n 'anomalous_trichromacy_matrix_Machado2009', 'cvd_matrix_Machado2009'\n]\n\nLMS_TO_WSYBRG_MATRIX = np.array([\n [0.600, 0.400, 0.000],\n [0.240, 0.105, -0.700],\n [1.200, -1.600, 0.400],\n])\n\"\"\"\nIngling and Tsou (1977) matrix converting from cones responses to\nopponent-colour space.\n\nLMS_TO_WSYBRG_MATRIX : array_like, (3, 3)\n\"\"\"\n\n\ndef RGB_to_WSYBRG_matrix(cmfs, primaries):\n \"\"\"\n Computes the matrix transforming from *RGB* colourspace to opponent-colour\n space using *Machado et al. (2009)* method.\n\n Parameters\n ----------\n cmfs : LMS_ConeFundamentals\n *LMS* cone fundamentals colour matching functions.\n primaries : RGB_DisplayPrimaries\n *RGB* display primaries tri-spectral distributions.\n\n Returns\n -------\n ndarray\n Matrix transforming from *RGB* colourspace to opponent-colour space.\n\n Examples\n --------\n >>> from colour import DISPLAYS_RGB_PRIMARIES, LMS_CMFS\n >>> cmfs = LMS_CMFS['Stockman & Sharpe 2 Degree Cone Fundamentals']\n >>> d_LMS = np.array([15, 0, 0])\n >>> primaries = DISPLAYS_RGB_PRIMARIES['Apple Studio Display']\n >>> RGB_to_WSYBRG_matrix( # doctest: +ELLIPSIS\n ... cmfs, primaries)\n array([[ 0.2126535..., 0.6704626..., 0.1168838...],\n [ 4.7095295..., 12.4862869..., -16.1958165...],\n [-11.1518474..., 15.2534789..., -3.1016315...]])\n \"\"\"\n\n wavelengths = cmfs.wavelengths\n WSYBRG = dot_vector(LMS_TO_WSYBRG_MATRIX, cmfs.values)\n WS, YB, RG = tsplit(WSYBRG)\n\n extrapolator_args = {'method': 'Constant', 'left': 0, 'right': 0}\n primaries = primaries.copy().align(\n cmfs.shape, extrapolator_args=extrapolator_args)\n\n R, G, B = tsplit(primaries.values)\n\n WS_R = np.trapz(R * WS, wavelengths)\n WS_G = np.trapz(G * WS, wavelengths)\n WS_B = np.trapz(B * WS, wavelengths)\n\n YB_R = np.trapz(R * YB, wavelengths)\n YB_G = np.trapz(G * YB, wavelengths)\n YB_B = np.trapz(B * YB, wavelengths)\n\n RG_R = np.trapz(R * RG, wavelengths)\n RG_G = np.trapz(G * RG, wavelengths)\n RG_B = np.trapz(B * RG, wavelengths)\n\n M_G = np.array([\n [WS_R, WS_G, WS_B],\n [YB_R, YB_G, YB_B],\n [RG_R, RG_G, RG_B],\n ])\n\n PWS = 1 / (WS_R + WS_G + WS_B)\n PYB = 1 / (YB_R + YB_G + YB_B)\n PRG = 1 / (RG_R + RG_G + RG_B)\n\n M_G *= np.array([PWS, PYB, PRG])[:, np.newaxis]\n\n return M_G\n\n\ndef anomalous_trichromacy_cmfs_Machado2009(cmfs, d_LMS):\n \"\"\"\n Shifts given *LMS* cone fundamentals colour matching functions with given\n :math:`\\\\Delta_{LMS}` shift amount in nanometers to simulate anomalous\n trichromacy using *Machado et al. (2009)* method.\n\n Parameters\n ----------\n cmfs : LMS_ConeFundamentals\n *LMS* cone fundamentals colour matching functions.\n d_LMS : array_like\n :math:`\\\\Delta_{LMS}` shift amount in nanometers.\n\n Notes\n -----\n - Input *LMS* cone fundamentals colour matching functions interval is\n expected to be 1 nanometer, incompatible input will be interpolated\n at 1 nanometer interval.\n - Input :math:`\\\\Delta_{LMS}` shift amount is in domain [0, 20].\n\n Returns\n -------\n LMS_ConeFundamentals\n Anomalous trichromacy *LMS* cone fundamentals colour matching\n functions.\n\n Warning\n -------\n *Machado et al. (2009)* simulation of tritanomaly is based on the shift\n paradigm as an approximation to the actual phenomenon and restrain the\n model from trying to model tritanopia.\n The pre-generated matrices are using a shift value in domain [5, 59]\n contrary to the domain [0, 20] used for protanomaly and deuteranomaly\n simulation.\n\n References\n ----------\n :cite:`Colblindorb`, :cite:`Colblindora`, :cite:`Colblindorc`,\n :cite:`Machado2009`\n\n Examples\n --------\n >>> from colour import LMS_CMFS\n >>> cmfs = LMS_CMFS['Stockman & Sharpe 2 Degree Cone Fundamentals']\n >>> cmfs[450]\n array([ 0.0498639, 0.0870524, 0.955393 ])\n >>> anomalous_trichromacy_cmfs_Machado2009(cmfs, np.array([15, 0, 0]))[450]\n ... # doctest: +ELLIPSIS\n array([ 0.0891288..., 0.0870524 , 0.955393 ])\n \"\"\"\n\n cmfs = cmfs.copy()\n if cmfs.shape.interval != 1:\n cmfs.interpolate(SpectralShape(interval=1))\n\n cmfs.extrapolator_args = {'method': 'Constant', 'left': 0, 'right': 0}\n\n L, M, _S = tsplit(cmfs.values)\n d_L, d_M, d_S = tsplit(d_LMS)\n\n if d_S != 0:\n usage_warning(\n '\"Machado et al. (2009)\" simulation of tritanomaly is based on '\n 'the shift paradigm as an approximation to the actual phenomenon '\n 'and restrain the model from trying to model tritanopia.\\n'\n 'The pre-generated matrices are using a shift value in domain '\n '[5, 59] contrary to the domain [0, 20] used for protanomaly and '\n 'deuteranomaly simulation.')\n\n area_L = np.trapz(L, cmfs.wavelengths)\n area_M = np.trapz(M, cmfs.wavelengths)\n\n def alpha(x):\n \"\"\"\n Computes :math:`alpha` factor.\n \"\"\"\n\n return (20 - x) / 20\n\n # Corrected equations as per:\n # http://www.inf.ufrgs.br/~oliveira/pubs_files/\n # CVD_Simulation/CVD_Simulation.html#Errata\n L_a = alpha(d_L) * L + 0.96 * area_L / area_M * (1 - alpha(d_L)) * M\n M_a = alpha(d_M) * M + 1 / 0.96 * area_M / area_L * (1 - alpha(d_M)) * L\n S_a = cmfs[cmfs.wavelengths - d_S][:, 2]\n\n LMS_a = tstack([L_a, M_a, S_a])\n cmfs[cmfs.wavelengths] = LMS_a\n\n severity = '{0}, {1}, {2}'.format(d_L, d_M, d_S)\n template = '{0} - Anomalous Trichromacy ({1})'\n cmfs.name = template.format(cmfs.name, severity)\n cmfs.strict_name = template.format(cmfs.strict_name, severity)\n\n return cmfs\n\n\ndef anomalous_trichromacy_matrix_Machado2009(cmfs, primaries, d_LMS):\n \"\"\"\n Computes *Machado et al. (2009)* *CVD* matrix for given *LMS* cone\n fundamentals colour matching functions and display primaries tri-spectral\n distributions with given :math:`\\\\Delta_{LMS}` shift amount in nanometers\n to simulate anomalous trichromacy.\n\n Parameters\n ----------\n cmfs : LMS_ConeFundamentals\n *LMS* cone fundamentals colour matching functions.\n primaries : RGB_DisplayPrimaries\n *RGB* display primaries tri-spectral distributions.\n d_LMS : array_like\n :math:`\\\\Delta_{LMS}` shift amount in nanometers.\n\n Notes\n -----\n - Input *LMS* cone fundamentals colour matching functions interval is\n expected to be 1 nanometer, incompatible input will be interpolated\n at 1 nanometer interval.\n - Input :math:`\\\\Delta_{LMS}` shift amount is in domain [0, 20].\n\n Returns\n -------\n ndarray\n Anomalous trichromacy matrix.\n\n References\n ----------\n :cite:`Colblindorb`, :cite:`Colblindora`, :cite:`Colblindorc`,\n :cite:`Machado2009`\n\n Examples\n --------\n >>> from colour import DISPLAYS_RGB_PRIMARIES, LMS_CMFS\n >>> cmfs = LMS_CMFS['Stockman & Sharpe 2 Degree Cone Fundamentals']\n >>> d_LMS = np.array([15, 0, 0])\n >>> primaries = DISPLAYS_RGB_PRIMARIES['Apple Studio Display']\n >>> anomalous_trichromacy_matrix_Machado2009(cmfs, primaries, d_LMS)\n ... # doctest: +ELLIPSIS\n array([[-0.2777465..., 2.6515008..., -1.3737543...],\n [ 0.2718936..., 0.2004786..., 0.5276276...],\n [ 0.0064404..., 0.2592157..., 0.7343437...]])\n \"\"\"\n\n if cmfs.shape.interval != 1:\n cmfs = cmfs.copy().interpolate(SpectralShape(interval=1))\n\n M_n = RGB_to_WSYBRG_matrix(cmfs, primaries)\n cmfs_a = anomalous_trichromacy_cmfs_Machado2009(cmfs, d_LMS)\n M_a = RGB_to_WSYBRG_matrix(cmfs_a, primaries)\n\n return dot_matrix(np.linalg.inv(M_n), M_a)\n\n\ndef cvd_matrix_Machado2009(deficiency, severity):\n \"\"\"\n Computes *Machado et al. (2009)* *CVD* matrix for given deficiency and\n severity using the pre-computed matrices dataset.\n\n Parameters\n ----------\n deficiency : unicode\n {'Protanomaly', 'Deuteranomaly', 'Tritanomaly'}\n Colour blindness / vision deficiency types :\n - *Protanomaly* : defective long-wavelength cones (L-cones). The\n complete absence of L-cones is known as *Protanopia* or\n *red-dichromacy*.\n - *Deuteranomaly* : defective medium-wavelength cones (M-cones) with\n peak of sensitivity moved towards the red sensitive cones. The complete\n absence of M-cones is known as *Deuteranopia*.\n - *Tritanomaly* : defective short-wavelength cones (S-cones), an\n alleviated form of blue-yellow color blindness. The complete absence of\n S-cones is known as *Tritanopia*.\n severity : numeric\n Severity of the colour vision deficiency in domain [0, 1].\n\n Returns\n -------\n ndarray\n *CVD* matrix.\n\n References\n ----------\n :cite:`Colblindorb`, :cite:`Colblindora`, :cite:`Colblindorc`,\n :cite:`Machado2009`\n\n Examples\n --------\n >>> cvd_matrix_Machado2009('Protanomaly', 0.15) # doctest: +ELLIPSIS\n array([[ 0.7869875..., 0.2694875..., -0.0564735...],\n [ 0.0431695..., 0.933774 ..., 0.023058 ...],\n [-0.004238 ..., -0.0024515..., 1.0066895...]])\n \"\"\"\n\n if deficiency.lower() == 'tritanomaly':\n usage_warning(\n '\"Machado et al. (2009)\" simulation of tritanomaly is based on '\n 'the shift paradigm as an approximation to the actual phenomenon '\n 'and restrain the model from trying to model tritanopia.\\n'\n 'The pre-generated matrices are using a shift value in domain '\n '[5, 59] contrary to the domain [0, 20] used for protanomaly and '\n 'deuteranomaly simulation.')\n\n matrices = CVD_MATRICES_MACHADO2010[deficiency]\n samples = np.array(sorted(matrices.keys()))\n index = min(np.searchsorted(samples, severity), len(samples) - 1)\n\n a = samples[index]\n b = samples[min(index + 1, len(samples) - 1)]\n\n m1, m2 = matrices[a], matrices[b]\n\n if a == b:\n # 1.0 severity CVD matrix, returning directly.\n return m1\n else:\n return m1 + (severity - a) * ((m2 - m1) / (b - a))\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.models.rgb.transfer_functions.filmic_pro`\nmodule.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\n\nfrom colour.models.rgb.transfer_functions import (log_encoding_FilmicPro6,\n log_decoding_FilmicPro6)\nfrom colour.utilities import domain_range_scale, ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestLogEncoding_FilmicPro6', 'TestLogDecoding_FilmicPro6']\n\n\nclass TestLogEncoding_FilmicPro6(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_encoding_FilmicPro6` definition unit tests methods.\n \"\"\"\n\n def test_log_encoding_FilmicPro6(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_encoding_FilmicPro6` definition.\n \"\"\"\n\n self.assertAlmostEqual(log_encoding_FilmicPro6(0.0), -np.inf, places=7)\n\n self.assertAlmostEqual(\n log_encoding_FilmicPro6(0.18), 0.606634519924703, places=7)\n\n self.assertAlmostEqual(\n log_encoding_FilmicPro6(1.0), 1.000000819999999, places=7)\n\n def test_n_dimensional_log_encoding_FilmicPro6(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_encoding_FilmicPro6` definition n-dimensional arrays support.\n \"\"\"\n\n x = 0.18\n y = log_encoding_FilmicPro6(x)\n\n x = np.tile(x, 6)\n y = np.tile(y, 6)\n np.testing.assert_almost_equal(\n log_encoding_FilmicPro6(x), y, decimal=7)\n\n x = np.reshape(x, (2, 3))\n y = np.reshape(y, (2, 3))\n np.testing.assert_almost_equal(\n log_encoding_FilmicPro6(x), y, decimal=7)\n\n x = np.reshape(x, (2, 3, 1))\n y = np.reshape(y, (2, 3, 1))\n np.testing.assert_almost_equal(\n log_encoding_FilmicPro6(x), y, decimal=7)\n\n def test_domain_range_scale_log_encoding_FilmicPro6(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_encoding_FilmicPro6` definition domain and range scale support.\n \"\"\"\n\n x = 0.18\n y = log_encoding_FilmicPro6(x)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n log_encoding_FilmicPro6(x * factor), y * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_log_encoding_FilmicPro6(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_encoding_FilmicPro6` definition nan support.\n \"\"\"\n\n log_encoding_FilmicPro6(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestLogDecoding_FilmicPro6(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_decoding_FilmicPro6` definition unit tests methods.\n \"\"\"\n\n def test_log_decoding_FilmicPro6(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_decoding_FilmicPro6` definition.\n \"\"\"\n\n np.testing.assert_array_equal(log_decoding_FilmicPro6(-np.inf), 0.0)\n\n self.assertAlmostEqual(\n log_decoding_FilmicPro6(0.606634519924703), 0.18, places=7)\n\n self.assertAlmostEqual(\n log_decoding_FilmicPro6(1.000000819999999), 1.0, places=7)\n\n def test_n_dimensional_log_decoding_FilmicPro6(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_decoding_FilmicPro6` definition n-dimensional arrays support.\n \"\"\"\n\n y = 0.606634519924703\n x = log_decoding_FilmicPro6(y)\n\n y = np.tile(y, 6)\n x = np.tile(x, 6)\n np.testing.assert_almost_equal(\n log_decoding_FilmicPro6(y), x, decimal=7)\n\n y = np.reshape(y, (2, 3))\n x = np.reshape(x, (2, 3))\n np.testing.assert_almost_equal(\n log_decoding_FilmicPro6(y), x, decimal=7)\n\n y = np.reshape(y, (2, 3, 1))\n x = np.reshape(x, (2, 3, 1))\n np.testing.assert_almost_equal(\n log_decoding_FilmicPro6(y), x, decimal=7)\n\n def test_domain_range_scale_log_decoding_FilmicPro6(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_decoding_FilmicPro6` definition domain and range scale support.\n \"\"\"\n\n y = 0.606634519924703\n x = log_decoding_FilmicPro6(y)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n log_decoding_FilmicPro6(y * factor), x * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_log_decoding_FilmicPro6(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.filmic_pro.\\\nlog_decoding_FilmicPro6` definition nan support.\n \"\"\"\n\n log_decoding_FilmicPro6(\n np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# -*- coding: utf-8 -*-\n\"\"\"\nDefines unit tests for :mod:`colour.models.rgb.transfer_functions.dicom_gsdf`\nmodule.\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nimport unittest\n\nfrom colour.models.rgb.transfer_functions import oetf_DICOMGSDF, eotf_DICOMGSDF\nfrom colour.utilities import domain_range_scale, ignore_numpy_errors\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['TestOetf_DICOMGSDF', 'TestEotf_DICOMGSDF']\n\n\nclass TestOetf_DICOMGSDF(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\noetf_DICOMGSDF` definition unit tests methods.\n \"\"\"\n\n def test_oetf_DICOMGSDF(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\noetf_DICOMGSDF` definition.\n \"\"\"\n\n self.assertAlmostEqual(\n oetf_DICOMGSDF(0.05), 0.001007281350787, places=7)\n\n self.assertAlmostEqual(\n oetf_DICOMGSDF(130.0662), 0.500486263438448, places=7)\n\n self.assertAlmostEqual(\n oetf_DICOMGSDF(4000), 1.000160314715578, places=7)\n\n self.assertAlmostEqual(\n oetf_DICOMGSDF(130.0662, out_int=True), 512, places=7)\n\n def test_n_dimensional_oetf_DICOMGSDF(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\noetf_DICOMGSDF` definition n-dimensional arrays support.\n \"\"\"\n\n L = 130.0662\n J = oetf_DICOMGSDF(L)\n\n L = np.tile(L, 6)\n J = np.tile(J, 6)\n np.testing.assert_almost_equal(oetf_DICOMGSDF(L), J, decimal=7)\n\n L = np.reshape(L, (2, 3))\n J = np.reshape(J, (2, 3))\n np.testing.assert_almost_equal(oetf_DICOMGSDF(L), J, decimal=7)\n\n L = np.reshape(L, (2, 3, 1))\n J = np.reshape(J, (2, 3, 1))\n np.testing.assert_almost_equal(oetf_DICOMGSDF(L), J, decimal=7)\n\n def test_domain_range_scale_oetf_DICOMGSDF(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\noetf_DICOMGSDF` definition domain and range scale support.\n \"\"\"\n\n L = 130.0662\n J = oetf_DICOMGSDF(L)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n oetf_DICOMGSDF(L * factor), J * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_oetf_DICOMGSDF(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\noetf_DICOMGSDF` definition nan support.\n \"\"\"\n\n oetf_DICOMGSDF(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nclass TestEotf_DICOMGSDF(unittest.TestCase):\n \"\"\"\n Defines :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\neotf_DICOMGSDF` definition unit tests methods.\n \"\"\"\n\n def test_eotf_DICOMGSDF(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\neotf_DICOMGSDF` definition.\n \"\"\"\n\n self.assertAlmostEqual(\n eotf_DICOMGSDF(0.001007281350787), 0.050143440671692, places=7)\n\n self.assertAlmostEqual(\n eotf_DICOMGSDF(0.500486263438448), 130.062864706476550, places=7)\n\n self.assertAlmostEqual(\n eotf_DICOMGSDF(1.000160314715578), 3997.586161113322300, places=7)\n\n self.assertAlmostEqual(\n eotf_DICOMGSDF(512, in_int=True), 130.065284012159790, places=7)\n\n def test_n_dimensional_eotf_DICOMGSDF(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\neotf_DICOMGSDF` definition n-dimensional arrays support.\n \"\"\"\n\n J = 0.500486263438448\n L = eotf_DICOMGSDF(J)\n\n J = np.tile(J, 6)\n L = np.tile(L, 6)\n np.testing.assert_almost_equal(eotf_DICOMGSDF(J), L, decimal=7)\n\n J = np.reshape(J, (2, 3))\n L = np.reshape(L, (2, 3))\n np.testing.assert_almost_equal(eotf_DICOMGSDF(J), L, decimal=7)\n\n J = np.reshape(J, (2, 3, 1))\n L = np.reshape(L, (2, 3, 1))\n np.testing.assert_almost_equal(eotf_DICOMGSDF(J), L, decimal=7)\n\n def test_domain_range_scale_eotf_DICOMGSDF(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\neotf_DICOMGSDF` definition domain and range scale support.\n \"\"\"\n\n J = 0.500486263438448\n L = eotf_DICOMGSDF(J)\n\n d_r = (('reference', 1), (1, 1), (100, 100))\n for scale, factor in d_r:\n with domain_range_scale(scale):\n np.testing.assert_almost_equal(\n eotf_DICOMGSDF(J * factor), L * factor, decimal=7)\n\n @ignore_numpy_errors\n def test_nan_eotf_DICOMGSDF(self):\n \"\"\"\n Tests :func:`colour.models.rgb.transfer_functions.dicom_gsdf.\\\neotf_DICOMGSDF` definition nan support.\n \"\"\"\n\n eotf_DICOMGSDF(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.reshape", "numpy.array", "numpy.tile" ], [ "numpy.reshape", "numpy.array", "numpy.tile" ], [ "numpy.array" ], [ "numpy.reshape", "numpy.tile" ], [ "numpy.linalg.inv", "numpy.array", "numpy.trapz", "numpy.searchsorted" ], [ "numpy.reshape", "numpy.array", "numpy.tile" ], [ "numpy.reshape", "numpy.array", "numpy.tile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kingreatwill/penter
[ "2d027fd2ae639ac45149659a410042fe76b9dab0", "2d027fd2ae639ac45149659a410042fe76b9dab0" ]
[ "third/opencv/gaussian_mix.py", "ml/sampling/demo.py" ]
[ "#!/usr/bin/env python\n\n# Python 2/3 compatibility\nfrom __future__ import print_function\nimport sys\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n xrange = range\n\nimport numpy as np\nimport cv2 as cv\n\nfrom numpy import random\n\ndef make_gaussians(cluster_n, img_size):\n points = []\n ref_distrs = []\n for _i in range(cluster_n):\n mean = (0.1 + 0.8*random.rand(2)) * img_size\n a = (random.rand(2, 2)-0.5)*img_size*0.1\n cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)\n n = 100 + random.randint(900)\n pts = random.multivariate_normal(mean, cov, n)\n points.append( pts )\n ref_distrs.append( (mean, cov) )\n points = np.float32( np.vstack(points) )\n return points, ref_distrs\n\ndef draw_gaussain(img, mean, cov, color):\n x, y = np.int32(mean)\n w, u, _vt = cv.SVDecomp(cov)\n ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)\n s1, s2 = np.sqrt(w)*3.0\n cv.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv.LINE_AA)\n\n\ndef main():\n cluster_n = 5\n img_size = 512\n\n print('press any key to update distributions, ESC - exit\\n')\n\n while True:\n print('sampling distributions...')\n points, ref_distrs = make_gaussians(cluster_n, img_size)\n\n print('EM (opencv) ...')\n em = cv.ml.EM_create()\n em.setClustersNumber(cluster_n)\n em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC)\n em.trainEM(points)\n means = em.getMeans()\n covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232\n found_distrs = zip(means, covs)\n print('ready!\\n')\n\n img = np.zeros((img_size, img_size, 3), np.uint8)\n for x, y in np.int32(points):\n cv.circle(img, (x, y), 1, (255, 255, 255), -1)\n for m, cov in ref_distrs:\n draw_gaussain(img, m, cov, (0, 255, 0))\n for m, cov in found_distrs:\n draw_gaussain(img, m, cov, (0, 0, 255))\n\n cv.imshow('gaussian mixture', img)\n ch = cv.waitKey(0)\n if ch == 27:\n break\n\n print('Done')\n\n\nif __name__ == '__main__':\n print(__doc__)\n main()\n cv.destroyAllWindows()\n", "target_x_0 = [-1.0, 1.0]\ntarget_x_1 = [-1.0, 1.0]\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nvertexes = np.array([[target_x_0[0], target_x_1[0]],\n [target_x_0[0], target_x_1[1]],\n [target_x_0[1], target_x_1[1]],\n [target_x_0[1], target_x_1[0]],\n [target_x_0[0], target_x_1[0]]]\n )\ng1 = lambda x: x[0]\ng2 = lambda x: x[1]\nplt.plot(g1(vertexes), g2(vertexes), 'red')\nplt.show()" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.random.multivariate_normal", "numpy.eye", "numpy.int32", "numpy.arctan2", "numpy.random.rand", "numpy.zeros", "numpy.vstack", "numpy.random.randint" ], [ "numpy.array", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
edfong/npl
[ "9e94287a7c253a33addcafb431c384be8a7dd8df" ]
[ "experiments/Toy_GMM/run_NPL_toygmm.py" ]
[ "\"\"\" \nRunning RR-NPL for Toy GMM (set R_restarts = 0 for FI-NPL)\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport time\nimport copy\nfrom npl import bootstrap_gmm as bgmm\nfrom npl.maximise_gmm import init_toy\nfrom npl.maximise_gmm import sampleprior_toy\n\ndef load_data(seed):\n #load data and parameters\n gmm_data = np.load('./sim_data/gmm_data_sep_seed{}.npy'.format(seed),allow_pickle = True).item()\n\n #Extract parameters from data\n N_data = gmm_data['N']\n K_clusters = gmm_data['K']\n D_data = gmm_data['D']\n y = gmm_data['y']\n \n return y,N_data,K_clusters,D_data\n\ndef main(B_postsamples,R_restarts): #B_postsamples is number of bootstrap samples, R_restarts is number of repeats in RR-NPL (set to 0 for FI-NPL)\n for n in range(30):\n seed = 100+n\n\n np.random.seed(seed)\n y,N_data,K_clusters,D_data = load_data(seed)\n #prior settings\n alph_conc=0 #alph_concentration\n T_trunc = 500 #DP truncation\n tol = 1e-7\n max_iter = 6000\n \n\n start = time.time()\n pi_bb,mu_bb,sigma_bb= bgmm.bootstrap_gmm(B_postsamples,alph_conc,T_trunc,y,N_data,D_data,K_clusters,R_restarts,tol,max_iter,init_toy,None)\n end = time.time()\n\n print(end-start)\n\n #save file\n dict_bb = {'pi': pi_bb.tolist(),'sigma': sigma_bb.tolist(), 'mu': mu_bb.tolist(),'time': end-start}\n\n par_bb = pd.Series(data = dict_bb)\n\n if R_restarts ==0: \n par_bb.to_pickle('./parameters/par_bb_sep_fi__B{}_seed{}'.format(B_postsamples,seed)) #uncomment for FI-NPL\n else:\n par_bb.to_pickle('./parameters/par_bb_sep_rr_rep{}_B{}_seed{}'.format(R_restarts,B_postsamples,seed))\n\ndef main_plot(B_postsamples,R_restarts):\n seed = 100 \n np.random.seed(seed)\n\n gmm_data = np.load('./sim_data_plot/gmm_data_sep.npy',allow_pickle = True).item()\n\n #Extract parameters from data\n N_data = gmm_data['N']\n K_clusters = gmm_data['K']\n D_data = gmm_data['D']\n y = gmm_data['y']\n #prior settings\n alph_conc=0 #alph_concentration\n T_trunc = 500 #DP truncation\n tol = 1e-7\n max_iter = 6000\n\n start = time.time()\n pi_bb,mu_bb,sigma_bb= bgmm.bootstrap_gmm(B_postsamples,alph_conc,T_trunc,y,N_data,D_data,K_clusters,R_restarts,tol,max_iter,init_toy,None)\n end = time.time()\n\n print(end-start)\n\n #save file\n dict_bb = {'pi': pi_bb.tolist(),'sigma': sigma_bb.tolist(), 'mu': mu_bb.tolist(),'time': end-start}\n\n par_bb = pd.Series(data = dict_bb)\n\n if R_restarts ==0: \n par_bb.to_pickle('./parameters/par_bb_sep_fi_B{}_plot'.format(B_postsamples))\n else:\n par_bb.to_pickle('./parameters/par_bb_sep_rr_rep{}_B{}_plot_tol'.format(R_restarts,B_postsamples))\n\nif __name__=='__main__': \n #RR-NPL and FI-NPL experiments\n main(2000,10)\n main(2000,0)\n\n #Posterior samples for plots\n main_plot(2000,0)\n main_plot(2000,1)\n main_plot(2000,2)\n main_plot(2000,5)\n main_plot(2000,10)\n" ]
[ [ "numpy.load", "pandas.Series", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
manuelamigotto/Application-of-data-science-and-machine-learning-on-the-one-health-project-combatting-zoonoses
[ "7219feaa44ac1b1fa056be0190fd048af7a89598" ]
[ "geocode_utils.py" ]
[ "import datetime\r\nimport os\r\nimport pyarrow.feather as feather\r\nimport pandas as pd\r\nimport jsonpickle\r\n\r\ndef generate_error_file_name(error_foldername, error_filename):\r\n now = datetime.datetime.now()\r\n year = now.strftime(\"%Y\")\r\n month = now.strftime(\"%m\")\r\n day = now.strftime(\"%d\")\r\n time = now.strftime(\"%H%M%S\")\r\n\r\n '''Check if directory exists, if not, create it'''\r\n CHECK_FOLDER = os.path.isdir(error_foldername)\r\n\r\n # If folder doesn't exist, then create it.\r\n if not CHECK_FOLDER:\r\n os.makedirs(error_foldername)\r\n\r\n file_errors_name = error_filename + year + month + day + time + \".txt\"\r\n return error_foldername + '/' + file_errors_name\r\n\r\ndef set_lat_long(df, row_id, latitude, longitude):\r\n df.iloc[row_id,df.columns.get_loc('latitude')] = latitude\r\n df.iloc[row_id,df.columns.get_loc('longitude')] = longitude\r\n\r\ndef set_notFound(df, row_id):\r\n df.iloc[row_id,df.columns.get_loc('latitude')]=\"NotFound\" \r\n df.iloc[row_id,df.columns.get_loc('longitude')]=\"NotFound\"\r\n\r\ndef add_complete_geocode_address(df1):\r\n #create column for complete address for geocoding \r\n df1['geocode_address'] = df1['INDIRIZZO PRELIEVO'].fillna('-')\r\n df1['CAP PRELIEVO'] = df1['CAP PRELIEVO'].astype(str).str[:5]\r\n df1['geocode_address'] = df1['geocode_address'].astype(str) + ',' + \\\r\n df1['CAP PRELIEVO'].astype(str) + ',' + df1['COMUNE PRELIEVO'] + ',' + df1['PROVINCIA PRELIEVO'] + ',' + 'Italia'\r\n df1['geocode_address'] = df1['geocode_address'].map(lambda x: x.lstrip(',-'))\r\n return df1 \r\n \r\ndef create_complete_address(file_in, file_out, file_type, printOn = True, saveOn = True):\r\n if file_type == 'feather':\r\n df1 = feather.read_feather(file_in) #'dataframe_sigla'\r\n if file_type == 'csv':\r\n df1 = pd.read_csv(file_in)\r\n\r\n df1 = add_complete_geocode_address(df1)\r\n \r\n if printOn:\r\n print(\"item with prov address: \" + str(sum(pd.notnull(df1['INDIRIZZO ATTPROV CORRELATA']))))\r\n print(\"item with prel address: \" + str(sum(pd.notnull(df1['INDIRIZZO ATTPREL CORRELATA']))))\r\n print(\"item with NO prel address: \" + str(sum(pd.isnull(df1['INDIRIZZO ATTPREL CORRELATA']))))\r\n \r\n if saveOn:\r\n if file_type == 'csv':\r\n #save dataframe in csv file format\r\n df1.to_csv(file_out, index=False) #'df_sigla_country.csv'\r\n if file_type == 'feather':\r\n feather.write_feather(df1, file_out)\r\n else:\r\n return df1\r\n \r\ndef create_rag_soc_address(file_in, file_out, file_type):\r\n print(\"Adding geocode addresses...\")\r\n if file_type == 'feather':\r\n df1 = feather.read_feather(file_in) #'dataframe_sigla'\r\n if file_type == 'csv':\r\n df1 = pd.read_csv(file_in)\r\n #create address geocoding column\r\n df1['geocode_address'] = df1['RAGSOCATTPROVCORR']\r\n df1['geocode_address'] = df1['geocode_address'].fillna(df1['RAGIONE SOCATTPRELCORR']) + ',' + \\\r\n df1['CAP PRELIEVO'].astype(str).str[:5] + ',' + \\\r\n df1['COMUNE PRELIEVO'] + ',' + df1['PROVINCIA PRELIEVO'] + ',' + 'Italia'\r\n \r\n #save dataframe in csv file format\r\n df1.to_csv(file_out, index=False)\r\n print(\"File with geocode addresses saved\")\r\n \r\ndef create_centroid_address(file_in, file_out, file_type):\r\n print(\"Adding geocode addresses...\")\r\n if file_type == 'feather':\r\n df1 = feather.read_feather(file_in) #'dataframe_sigla'\r\n if file_type == 'csv':\r\n df1 = pd.read_csv(file_in)\r\n\r\n #create address geocoding column\r\n df1['geocode_address'] = df1['CAP PRELIEVO'].astype(str).str[:5] + ',' + \\\r\n df1['COMUNE PRELIEVO'] + ',' + df1['PROVINCIA PRELIEVO'] + ',' + 'Italia'\r\n df1['geocode_address'] = df1['geocode_address'].astype(str).map(lambda x: x.lstrip(',-')) \r\n \r\n #save dataframe in csv file format\r\n df1.to_csv(file_out, index=False)\r\n print(\"File with geocode addresses saved\")\r\n \r\ndef open_addresses_dict_if_exist(addresses_dict_json):\r\n addr_dict = {}\r\n #check if extists json file of address dictionary (key=string address value=DistanceFeature)\r\n file_exists = os.path.exists(addresses_dict_json)\r\n \r\n if file_exists:\r\n with open(addresses_dict_json, \"r\") as json_file:\r\n geo_dict_obj = jsonpickle.decode(json_file.read()) \r\n addr_dict = geo_dict_obj.features_dict\r\n return addr_dict" ]
[ [ "pandas.notnull", "pandas.read_csv", "pandas.isnull" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Wuyunfan-BUPT/ImageProcess
[ "32c6c12f7f4a7e5493d9791f8be37c9adc6065b3" ]
[ "rawImageProcess/raw_main.py" ]
[ "import imageio\nimport copy as cp\nimport numpy as np\nimport cv2\nfrom PIL import Image\n\n'''\n ###############\n addBoundary(img, kernel)\n convolve1(img, kernel, filter_type, mode='same')\n convolve(img, kernel, filter_type, mode='same')\n wise_element_sum(img, kernel, filter_type)\n 上面四个函数用于构建高斯滤波器,与我写的第二章节的作业中的滤波器一样(我是先做第二章作业再做第一章的)\n'''\n\ndef addBoundary(img, kernel):\n '''\n 给图像添加边界\n :param img: 输入图像\n :param kernel:卷积核\n :return: 加边界后的图像\n '''\n kernel_size = kernel.shape[0]\n addLine = (int)((kernel_size - 1) / 2)\n img_ = cv2.copyMakeBorder(img, addLine, addLine, addLine, addLine, cv2.BORDER_CONSTANT, value=0);\n return img_\n\ndef convolve1(img, kernel, filter_type, mode='same'):\n '''\n 单通道图像与卷积核的卷积,主要用于灰度图\n :param img: 输入单通道图像矩阵\n :param kernel: 卷积核\n :param model: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加\n :return: 卷积后的图像\n '''\n if mode == 'same':\n img_ = addBoundary(img, kernel)\n kernel_height = kernel.shape[0]\n kernel_width = kernel.shape[1]\n # 横向卷积、纵向卷积的次数\n conv_height = img_.shape[0] - kernel_height + 1\n conv_width = img_.shape[1] - kernel_width + 1\n # 卷积结果存储在conv中\n conv = np.zeros((conv_height, conv_width), dtype='uint8')\n\n for i in range(conv_height):\n for j in range(conv_width):\n conv[i][j] = wise_element_sum(img_[i:i + kernel_height, j:j + kernel_width], kernel, filter_type)\n return conv\n\ndef convolve(img, kernel, filter_type, mode='same'):\n '''\n 三通道卷积,主要用于彩色图\n :param img: 输入图像矩阵\n :param kernel: 卷积核\n :param mode: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加\n :return: 卷积后的图像矩阵\n '''\n\n R = np.mat(img[:, :, 0])\n G = np.mat(img[:, :, 1])\n B = np.mat(img[:, :, 2])\n conv_B = convolve1(img[:, :, 0], kernel, filter_type, mode)\n conv_G = convolve1(img[:, :, 1], kernel, filter_type, mode)\n conv_R = convolve1(img[:, :, 2], kernel, filter_type, mode)\n\n conv_img = np.dstack([conv_B, conv_G, conv_R])\n return conv_img\n\ndef wise_element_sum(img, kernel, filter_type):\n '''\n 对于某一次卷积结果的取值\n :param img: 输入的图片片段矩阵\n :param kernel: 卷积核\n :param modle: medium,gauss,mean, 即选择中值滤波、高斯滤波、还是均值滤波,其他滤波方式以后添加\n :return: 返回该像素值\n '''\n if filter_type == 'medium_Filter':\n temp = img * kernel\n list = []\n for i in range(temp.shape[0]):\n for j in range(temp.shape[1]):\n list.append(temp[i][j])\n list.sort()\n if list[int(len(list) / 2)] > 255:\n return 255\n elif list[int(len(list) / 2)] < 0:\n return 0\n else:\n return list[int(len(list) / 2)]\n # 均值、高斯滤波等\n else:\n result = (img * kernel).sum()\n if result < 0:\n return 0\n elif result > 255:\n return 255\n else:\n return result\n\ndef Gauss_Fileter(img, kernel_size, sigma):\n '''\n 高斯滤波器\n :param img: 输入图像\n :param kernel_size: 卷积核大小\n :param sigma: 高斯函数的标准差\n :return: 高斯滤波后的图片\n '''\n # 避免除0\n if sigma == 0:\n sigma = 6\n kernel = np.zeros([kernel_size, kernel_size])\n kernel_center = kernel_size / 2 # 卷积核中心位置\n sum_val = 0 # 记录卷积核中数字之和\n for i in range(0, kernel_size):\n for j in range(0, kernel_size):\n kernel[i, j] = np.exp((-(i - kernel_center) ** 2 + (j - kernel_center) ** 2) / (2 * (sigma ** 2)))\n sum_val += kernel[i, j]\n # 得到卷积核\n kernel = kernel / sum_val\n img_out = convolve(img, kernel, filter_type='Gauss_Fileter', mode='same')\n # img_out = scipy.signal.convolve2d(img, kernel, mode='same', boundary='symm')\n # 返回图片\n return img_out\n\n\n\n\n\n\ndef white_balance(img):\n '''\n 原始灰度世界算法\n :param img: cv2.imread读取的图片数据\n :return: 返回的白平衡结果图片数据\n '''\n\n B, G, R = np.double(img[:, :, 0]), np.double(img[:, :, 1]), np.double(img[:, :, 2])\n B_ave, G_ave, R_ave = np.mean(B), np.mean(G), np.mean(R)\n K = (B_ave + G_ave + R_ave) / 3\n Kb = K / B_ave\n Kg = K / G_ave\n Kr = K / R_ave\n Bnew = B * Kb\n Gnew = G * Kg\n Rnew = R * Kr\n\n for i in range(len(Bnew)):\n for j in range(len(Bnew[0])):\n Bnew[i][j] = 255 if Bnew[i][j] > 255 else Bnew[i][j]\n Gnew[i][j] = 255 if Gnew[i][j] > 255 else Gnew[i][j]\n Rnew[i][j] = 255 if Rnew[i][j] > 255 else Rnew[i][j]\n\n # print(np.mean(Ba), np.mean(Ga), np.mean(Ra))\n\n dst_img = np.uint8(np.zeros_like(img))\n dst_img[:, :, 0] = Bnew\n dst_img[:, :, 1] = Gnew\n dst_img[:, :, 2] = Rnew\n return dst_img\n\ndef deMosaic(raw_image):\n '''\n 对图片插值,转为RGB图片\n :param raw_image: 输入单通道的图片\n :return: RGB图\n '''\n H = raw_image.shape[0]\n W = raw_image.shape[1]\n R = raw_image\n r_image = cp.deepcopy(R)\n g_image = cp.deepcopy(R)\n b_image = cp.deepcopy(R)\n\n for i in range(0, H - 1, 2):\n for j in range(0, W - 1, 2):\n r_image[i + 1][j] = raw_image[i][j]\n r_image[i + 1][j + 1] = raw_image[i][j]\n r_image[i][j + 1] = raw_image[i][j]\n\n for i in range(0, H - 1, 2):\n for j in range(0, W - 1, 2):\n temp = raw_image[i + 1][j] / 2 + raw_image[i][j + 1] / 2\n g_image[i][j] = temp\n g_image[i + 1][j + 1] = temp\n g_image[i + 1][j] = temp\n g_image[i][j + 1] = temp\n\n for i in range(0, H - 1, 2):\n for j in range(0, W - 1, 2):\n b_image[i + 1][j] = raw_image[i + 1][j + 1]\n b_image[i][j] = raw_image[i + 1][j + 1]\n b_image[i][j + 1] = raw_image[i + 1][j + 1]\n\n rgb_image = cv2.merge([b_image, g_image, r_image])\n return rgb_image\n\n\n\ndef deMosaic1(raw_image):\n '''\n 对图片插值,转为RGB图片,主要与deMosaic()函数的效果作对比\n :param raw_image: 输入单通道的图片\n :return: RGB图\n '''\n H = raw_image.shape[0]\n W = raw_image.shape[1]\n R = raw_image\n r_image = cp.deepcopy(R)\n g_image = cp.deepcopy(R)\n b_image = cp.deepcopy(R)\n\n for i in range(1, H - 1, 3):\n for j in range(1, W - 1, 3):\n temp = (raw_image[i-1][j-1]+raw_image[i+1][j-1]+raw_image[i-1][j+1]+raw_image[i+1][j+1])/4\n r_image[i - 1][j] = temp\n r_image[i+1][j] = raw_image[i][j]\n #r_image[i][j + 1] = raw_image[i][j]\n\n for i in range(0, H - 1, 2):\n for j in range(0, W - 1, 2):\n #temp = raw_image[i + 1][j] / 2 + raw_image[i][j + 1] / 2\n g_image[i][j] = raw_image[i][j]\n g_image[i + 1][j + 1] = raw_image[i][j]\n g_image[i + 1][j] = raw_image[i][j]\n g_image[i][j + 1] = raw_image[i][j]\n\n for i in range(0, H - 1, 2):\n for j in range(1, W - 1, 2):\n b_image[i][j-1] = raw_image[i][j]\n b_image[i][j+1] = raw_image[i][j]\n #b_image[i][j + 1] = raw_image[i + 1][j + 1]\n\n rgb_image = cv2.merge([b_image, g_image, r_image])\n return rgb_image\n\n\ndef deMosaic2(raw_image):\n '''\n 对图片插值,转为RGB图片,主要与deMosaic()函数的效果作对比\n :param raw_image: 输入单通道的图片\n :return: RGB图\n '''\n H = raw_image.shape[0]\n W = raw_image.shape[1]\n R = raw_image\n r_image = cp.deepcopy(R)\n g_image = cp.deepcopy(R)\n b_image = cp.deepcopy(R)\n\n for i in range(0, H - 1, 2):\n for j in range(0, W - 1, 2):\n\n r_image[i,j]=raw_image[i][j]\n r_image[i + 1][j] = raw_image[i][j]/2\n r_image[i + 1][j + 1] = raw_image[i][j]/2\n r_image[i][j + 1] = raw_image[i][j]/2\n\n for i in range(0, H - 1, 2):\n for j in range(0, W - 1, 2):\n temp = raw_image[i + 1][j] / 2 + raw_image[i][j + 1] / 2\n g_image[i][j] = temp\n g_image[i + 1][j + 1] = temp/2\n g_image[i + 1][j] = temp/2\n g_image[i][j + 1] = temp/2\n\n for i in range(0, H - 1, 2):\n for j in range(0, W - 1, 2):\n r_image[i][j] = raw_image[i+1][j+1]\n b_image[i + 1][j] = raw_image[i + 1][j + 1]/2\n b_image[i][j] = raw_image[i + 1][j + 1]/2\n b_image[i][j + 1] = raw_image[i + 1][j + 1]/2\n\n rgb_image = cv2.merge([b_image, g_image, r_image])\n return rgb_image\n\n\ndef gamma_correct(img,gamma):\n '''\n 用于gamma校正\n :param img: 输入RGB图\n :return: gamma校正后的图\n '''\n img = np.power(img / 255.0, gamma)\n img = img * 255\n return img\n\n\ndef main():\n Image.MAX_IMAGE_PIXELS = None\n\n img = cv2.imread(\"raw-data-BayerpatternEncodedImage.tif\", 1).astype(np.float)\n\n single_img = img[:, :, 0]\n imageio.imsave('单通道图片.jpg', single_img)\n\n #组合一\n deMosaic_img = deMosaic(single_img)\n imageio.imsave('RGB图片1.jpg', deMosaic_img)\n balance_img = white_balance(deMosaic_img)\n imageio.imsave('白平衡1.jpg', balance_img)\n gamma_img = gamma_correct(balance_img, 1.2)\n imageio.imsave('gamma校正1.jpg', gamma_img)\n Filter_img = Gauss_Fileter(gamma_img, 5, 25)\n imageio.imsave('高斯滤波1-sigma=25.jpg', Filter_img)\n '''\n \n #组合二\n deMosaic_img = deMosaic(single_img)\n imageio.imsave('RGB图片2.jpg', deMosaic_img)\n Filter_img = Gauss_Fileter( deMosaic_img, 5, 25)\n imageio.imsave('高斯滤波2-sigma=25.jpg', Filter_img)\n\n balance_img = white_balance(Filter_img)\n imageio.imsave('白平衡2.jpg', balance_img)\n\n gamma_img = gamma_correct(balance_img, 1.2)\n imageio.imsave('gamma校正2.jpg', gamma_img)\n\n '''\n\n\n\n\n\n\n\n '''\n #组合1各步骤不同参数对图片处理的效果\n \n deMosaic_img = deMosaic(single_img)\n imageio.imsave('RGB图片0.jpg', deMosaic_img)\n\n deMosaic_img1 = deMosaic1(single_img)\n imageio.imsave('RGB图片1.jpg', deMosaic_img1)\n\n deMosaic_img2 = deMosaic2(single_img)\n imageio.imsave('RGB图片2.jpg', deMosaic_img2)\n\n\n balance_img = white_balance(deMosaic_img)\n imageio.imsave('白平衡3.jpg', balance_img)\n balance_img1 = white_balance(deMosaic_img1)\n imageio.imsave('白平衡4.jpg', balance_img1)\n balance_img2 = white_balance(deMosaic_img2)\n imageio.imsave('白平衡5.jpg', balance_img2)\n\n\n gamma_img = gamma_correct(balance_img, 1.2)\n imageio.imsave('gamma校正.jpg', gamma_img)\n\n gamma_img1 = gamma_correct(balance_img, 2)\n imageio.imsave('gamma校正1.jpg', gamma_img1)\n\n gamma_img2 = gamma_correct(balance_img, 4)\n imageio.imsave('gamma校正2.jpg', gamma_img2)\n\n gamma_img3 = gamma_correct(balance_img, 0.8)\n imageio.imsave('gamma校正3.jpg', gamma_img3)\n\n gamma_img4 = gamma_correct(balance_img, 0.5)\n imageio.imsave('gamma校正4.jpg', gamma_img4)\n\n\n gamma_img5 = gamma_correct(balance_img, 0.1)\n imageio.imsave('gamma校正5.jpg', gamma_img5)\n\n\n\n Filter_img = Gauss_Fileter(gamma_img, 5, 15)\n imageio.imsave('高斯滤波-sigma=15.jpg', Filter_img)\n Filter_img1 = Gauss_Fileter(gamma_img, 5, 25)\n imageio.imsave('高斯滤波-sigma=25.jpg', Filter_img1)\n Filter_img3 = Gauss_Fileter(gamma_img, 5, 5)\n imageio.imsave('高斯滤波-sigma=5.jpg', Filter_img3)\n '''\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.power", "numpy.dstack", "numpy.mean", "numpy.zeros_like", "numpy.exp", "numpy.zeros", "numpy.double", "numpy.mat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zelunwu/ECCOv4-py
[ "03b6a1b01fcd17b0b88c25bee205c195df52d7fa", "03b6a1b01fcd17b0b88c25bee205c195df52d7fa" ]
[ "ecco_v4_py/resample_to_latlon.py", "ecco_v4_py/test_llc_array_loading_and_conversion.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division,print_function\nimport numpy as np\nimport matplotlib.pylab as plt\nimport xarray as xr\n\n# The Proj class can convert from geographic (longitude,latitude) to native\n# map projection (x,y) coordinates and vice versa, or from one map projection\n# coordinate system directly to another.\n# https://pypi.python.org/pypi/pyproj?\n#\nimport pyresample as pr\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\ndef resample_to_latlon(orig_lons, orig_lats, orig_field,\n new_grid_min_lat, new_grid_max_lat, new_grid_delta_lat,\n new_grid_min_lon, new_grid_max_lon, new_grid_delta_lon,\n nprocs_user=1, radius_of_influence = 100000, \n fill_value = None, mapping_method = 'bin_average') :\n\n #%%\n if type(orig_lats) == xr.core.dataarray.DataArray:\n orig_lons_1d = orig_lons.values.ravel()\n orig_lats_1d = orig_lats.values.ravel()\n \n elif type(orig_lats) == np.ndarray:\n orig_lats_1d = orig_lats.ravel()\n orig_lons_1d = orig_lons.ravel()\n else:\n raise TypeError('orig_lons and orig_lats variable either a DataArray or numpy.ndarray. \\n'\n 'Found type(orig_lons) = %s and type(orig_lats) = %s' % \n (type(orig_lons), type(orig_lats)))\n\n if type(orig_field) == xr.core.dataarray.DataArray:\n orig_field = orig_field.values\n elif type(orig_field) != np.ndarray and \\\n type(orig_field) != np.ma.core.MaskedArray :\n raise TypeError('orig_field must be a type of DataArray, ndarray, or MaskedArray. \\n' \n 'Found type(orig_field) = %s' % type(orig_field))\n\n # prepare for the nearest neighbor mapping\n\n # first define the lat lon points of the original data\n orig_grid = pr.geometry.SwathDefinition(lons=orig_lons_1d,\n lats=orig_lats_1d)\n\n # the latitudes to which we will we interpolate\n num_lats = (new_grid_max_lat - new_grid_min_lat) / new_grid_delta_lat + 1\n num_lons = (new_grid_max_lon - new_grid_min_lon) / new_grid_delta_lat + 1\n\n if (num_lats > 0) and (num_lons > 0):\n # linspace is preferred when using floats!\n lat_tmp = np.linspace(new_grid_min_lat, new_grid_max_lat, num=num_lats)\n lon_tmp = np.linspace(new_grid_min_lon, new_grid_max_lon, num=num_lons)\n\n new_grid_lon, new_grid_lat = np.meshgrid(lon_tmp, lat_tmp)\n\n # define the lat lon points of the two parts.\n new_grid = pr.geometry.GridDefinition(lons=new_grid_lon,\n lats=new_grid_lat)\n\n if mapping_method == 'nearest_neighbor':\n data_latlon_projection = \\\n pr.kd_tree.resample_nearest(orig_grid, orig_field, new_grid,\n radius_of_influence=radius_of_influence,\n fill_value=None,\n nprocs=nprocs_user)\n elif mapping_method == 'bin_average':\n wf = lambda r: 1\n \n data_latlon_projection = \\\n pr.kd_tree.resample_custom(orig_grid, orig_field, new_grid,\n radius_of_influence=radius_of_influence,\n weight_funcs = wf,\n fill_value=None,\n nprocs=nprocs_user)\n else:\n raise ValueError('mapping_method must be nearest_neighbor or bin_average. \\n'\n 'Found mapping_method = %s ' % mapping_method)\n\n else:\n raise ValueError('Number of lat and lon points to interpolate to must be > 0. \\n'\n 'Found num_lats = %d, num lons = %d' % (num_lats,num_lons))\n\n return new_grid_lon, new_grid_lat, data_latlon_projection\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n", "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 23 15:13:33 2019\n\n@author: ifenty\n\"\"\"\nfrom __future__ import division\n\nimport numpy as np\nimport matplotlib.pylab as plt\n\nfrom .llc_array_conversion import llc_compact_to_tiles\nfrom .llc_array_conversion import llc_compact_to_faces\nfrom .llc_array_conversion import llc_faces_to_tiles\nfrom .llc_array_conversion import llc_faces_to_compact\nfrom .llc_array_conversion import llc_tiles_to_faces\nfrom .llc_array_conversion import llc_tiles_to_compact\n\n\nfrom .read_bin_llc import read_llc_to_compact, read_llc_to_faces, read_llc_to_tiles\nfrom .tile_plot import plot_tiles\n\n\n# Tests the read_bin_llc and llc_array_conversion routines\n# %%\n### Load model grid coordinates (longitude, latitude)\n\n\ndef run_read_bin_and_llc_conversion_test(llc_grid_dir, llc_lons_fname='XC.data', \n llc_hfacc_fname='hFacC.data', llc=90, \n llc_grid_filetype = '>f', \n make_plots=False):\n\n \"\"\"\n\n Runs test on the read_bin_llc and llc_conversion routines\n\n\n Parameters\n ----------\n llc_grid_dir : string\n A string with the directory of the binary file to open\n\n llc_lons_fname : string\n A string with the name of the XC grid file [XC.data]\n\n llc_hfacc_fname : string\n A string with the name of the hfacC grid file [hFacC.data]\n\n llc : int\n the size of the llc grid. For ECCO v4, we use the llc90 domain \n so `llc` would be `90`. \n Default: 90\n\n llc_grid_filetype: string\n the file type, default is big endian (>) 32 bit float (f)\n alternatively, ('<d') would be little endian (<) 64 bit float (d)\n Deafult: '>f'\n \n make_plots : boolean\n A boolean specifiying whether or not to make plots\n Deafult: False\n\n Returns\n -------\n 1 : all tests passed\n 0 : at least one test failed\n \n \"\"\"\n \n\n # SET TEST RESULT = 1 TO START\n TEST_RESULT = 1\n\n # %% ----------- TEST 1: 2D field XC FOM GRID FILE\n \n #%% 1a LOAD COMPACT\n tmpXC_c = read_llc_to_compact(llc_grid_dir, llc_lons_fname, llc=llc,\n filetype=llc_grid_filetype)\n tmpXC_f = read_llc_to_faces(llc_grid_dir, llc_lons_fname, llc=llc,\n filetype=llc_grid_filetype)\n tmpXC_t = read_llc_to_tiles(llc_grid_dir, llc_lons_fname, llc=llc,\n filetype=llc_grid_filetype)\n \n if make_plots:\n #plt.close('all')\n\n for f in range(1,6):\n plt.figure()\n plt.imshow(tmpXC_f[f]);plt.colorbar() \n \n plot_tiles(tmpXC_t)\n plt.draw()\n raw_input(\"Press Enter to continue...\")\n\n \n #%% 1b CONVERT COMPACT TO FACES, TILES\n tmpXC_cf = llc_compact_to_faces(tmpXC_c)\n tmpXC_ct = llc_compact_to_tiles(tmpXC_c)\n \n\n for f in range(1,6):\n tmp = np.unique(tmpXC_f[f] - tmpXC_cf[f])\n print ('unique diffs CF ', f, tmp)\n\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 1b-1')\n return TEST_RESULT\n\n tmp = np.unique(tmpXC_ct - tmpXC_t)\n print ('unique diffs for CT ', tmp)\n\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 1b-2')\n return TEST_RESULT\n \n \n \n #%% 1c CONVERT FACES TO TILES, COMPACT\n tmpXC_ft = llc_faces_to_tiles(tmpXC_f)\n tmpXC_fc = llc_faces_to_compact(tmpXC_f)\n \n # unique diff tests \n tmp = np.unique(tmpXC_t - tmpXC_ft)\n print ('unique diffs for FT ', tmp)\n\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 1c-1')\n return TEST_RESULT\n \n \n tmp = np.unique(tmpXC_fc - tmpXC_c)\n print ('unique diffs FC', tmp )\n\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 1c-2')\n return TEST_RESULT\n \n \n #%% 1d CONVERT TILES to FACES, COMPACT \n tmpXC_tf = llc_tiles_to_faces(tmpXC_t)\n tmpXC_tc = llc_tiles_to_compact(tmpXC_t)\n \n # unique diff tests \n for f in range(1,6):\n tmp = np.unique(tmpXC_f[f] - tmpXC_tf[f])\n print ('unique diffs for TF ', f, tmp)\n \n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 1d-1')\n return TEST_RESULT\n \n \n tmp = np.unique(tmpXC_tc - tmpXC_c)\n print ('unique diffs TC', tmp)\n \n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 1d-2')\n return TEST_RESULT\n \n \n #%% 1e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT\n \n tmpXC_cftfc = llc_faces_to_compact(llc_tiles_to_faces(llc_faces_to_tiles(llc_compact_to_faces(tmpXC_c))))\n tmp = np.unique(tmpXC_cftfc - tmpXC_c)\n \n print ('unique diffs CFTFC', tmp)\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 1e')\n return TEST_RESULT\n \n \n # %% ----------- TEST 2: 3D fields HFACC FOM GRID FILE\n \n #%% 2a LOAD COMPACT\n tmpHF_c = read_llc_to_compact(llc_grid_dir, llc_hfacc_fname, llc=llc,nk=50,\n filetype=llc_grid_filetype)\n tmpHF_f = read_llc_to_faces(llc_grid_dir, llc_hfacc_fname, llc=llc, nk=50,\n filetype=llc_grid_filetype)\n tmpHF_t = read_llc_to_tiles(llc_grid_dir, llc_hfacc_fname, llc=llc, nk=50,\n filetype=llc_grid_filetype)\n \n tmpHF_c.shape\n \n if make_plots:\n #plt.close('all')\n plt.imshow(tmpHF_c[0,:]);plt.colorbar() \n plot_tiles(tmpHF_t[:,0,:])\n plot_tiles(tmpHF_t[:,20,:])\n plt.draw()\n raw_input(\"Press Enter to continue...\")\n \n #%% 2b CONVERT COMPACT TO FACES, TILES\n tmpHF_cf = llc_compact_to_faces(tmpHF_c)\n tmpHF_ct = llc_compact_to_tiles(tmpHF_c)\n \n # unique diff tests \n for f in range(1,6):\n tmp = np.unique(tmpHF_f[f] - tmpHF_cf[f])\n print ('unique diffs CF ', f, tmp)\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 2b-1')\n return TEST_RESULT\n \n\n tmp = np.unique(tmpHF_ct - tmpHF_t)\n print ('unique diffs CT ', tmp)\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 2b-2')\n return TEST_RESULT\n\n if make_plots: \n for k in [0, 20]:\n for f in range(1,6):\n plt.figure()\n plt.imshow(tmpHF_cf[f][k,:], origin='lower');plt.colorbar() \n plt.draw()\n raw_input(\"Press Enter to continue...\")\n\n #%% 2c CONVERT FACES TO TILES, COMPACT\n tmpHF_ft = llc_faces_to_tiles(tmpHF_f)\n tmpHF_fc = llc_faces_to_compact(tmpHF_f)\n \n if make_plots:\n #plt.close('all')\n plot_tiles(tmpHF_ft[:,0,:])\n plot_tiles(tmpHF_ft[:,20,:])\n plt.draw()\n raw_input(\"Press Enter to continue...\")\n\n # unique diff tests \n tmp = np.unique(tmpHF_t - tmpHF_ft)\n print ('unique diffs FT ', tmp)\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 2c-1')\n return TEST_RESULT\n\n tmp = np.unique(tmpHF_fc - tmpHF_c)\n print ('unique diffs FC', tmp)\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 2c-2')\n return TEST_RESULT\n\n \n #%% 2d CONVERT TILES to FACES, COMPACT \n tmpHF_tf = llc_tiles_to_faces(tmpHF_t)\n tmpHF_tc = llc_tiles_to_compact(tmpHF_t)\n \n if make_plots:\n #plt.close('all')\n for k in [0, 20]:\n for f in range(1,6):\n plt.figure()\n plt.imshow(tmpHF_tf[f][k,:], origin='lower');plt.colorbar() \n plt.draw()\n raw_input(\"Press Enter to continue...\")\n\n\n # unique diff tests \n for f in range(1,6):\n tmp = np.unique(tmpHF_f[f] - tmpHF_tf[f])\n print ('unique diffs TF ', f, tmp)\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 2d-1')\n return TEST_RESULT\n \n tmp = np.unique(tmpHF_tc - tmpHF_c)\n print ('unique diffs TC ', tmp)\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 2d-1')\n return TEST_RESULT\n \n \n #%% 2e CONVERT COMPACT TO FACES TO TILES TO FACES TO COMPACT\n \n tmpHF_cftfc = llc_faces_to_compact(llc_tiles_to_faces(\n llc_faces_to_tiles(llc_compact_to_faces(tmpHF_c))))\n\n tmp = np.unique(tmpHF_cftfc - tmpHF_c)\n\n print ('unique diffs CFTFC ', tmp)\n if len(tmp) != 1 or tmp[0] != 0:\n TEST_RESULT = 0\n print ('failed on 2e')\n return TEST_RESULT\n \n print ('YOU MADE IT THIS FAR, TESTS PASSED!')\n \n return TEST_RESULT\n\n\n\n\n####################### ###########################\n #%%\nif __name__== \"__main__\":\n\n import sys\n import matplotlib\n sys.path.append('/Users/ifenty/ECCOv4-py/')\n import ecco_v4_py as ecco\n import matplotlib.pylab as plt\n\n llc_grid_dir = '/Volumes/ECCO_BASE/ECCO_v4r3/grid_llc90/'\n llc_lons_fname='XC.data'\n llc_hfacc_fname='hFacC.data', \n llc=90, \n llc_grid_filetype = '>f', \n make_plots=False\n #%%\n TEST_RESULT = ecco.run_read_bin_and_llc_conversion_test(llc_grid_dir, make_plots=True)\n\n print(TEST_RESULT)\n\n\n\n" ]
[ [ "numpy.meshgrid", "numpy.linspace" ], [ "numpy.unique", "matplotlib.pylab.draw", "matplotlib.pylab.figure", "matplotlib.pylab.imshow", "matplotlib.pylab.colorbar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tianzheng4/Distributionally-Adversarial-Attack
[ "f6d941b33cc50981b46d0ac40aa071bc25cf3a3e" ]
[ "convex_adversarial-master/examples/cifar.py" ]
[ "# import waitGPU\n# import setGPU\n# waitGPU.wait(utilization=20, available_memory=10000, interval=60)\n# waitGPU.wait(gpu_ids=[1,3], utilization=20, available_memory=10000, interval=60)\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport random\n \nimport setproctitle\n\nimport problems as pblm\nfrom trainer import *\n\nimport math\nimport numpy\n\ndef select_model(m): \n if m == 'large': \n # raise ValueError\n model = pblm.cifar_model_large().cuda()\n elif m == 'resnet': \n model = pblm.cifar_model_resnet(N=args.resnet_N, factor=args.resnet_factor).cuda()\n else: \n model = pblm.cifar_model().cuda() \n return model\n\nif __name__ == \"__main__\": \n args = pblm.argparser(epsilon = 0.0347, starting_epsilon=0.001, batch_size = 50, \n opt='sgd', lr=0.05)\n\n print(\"saving file to {}\".format(args.prefix))\n setproctitle.setproctitle(args.prefix)\n\n train_log = open(args.prefix + \"_train.log\", \"w\")\n test_log = open(args.prefix + \"_test.log\", \"w\")\n\n train_loader, test_loader = pblm.cifar_loaders(args.batch_size)\n\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n random.seed(0)\n numpy.random.seed(0)\n\n sampler_indices = []\n model = [select_model(args.model)]\n\n kwargs = pblm.args2kwargs(args)\n best_err = 1\n\n\n for _ in range(0,args.cascade): \n if _ > 0: \n # reduce dataset to just uncertified examples\n print(\"Reducing dataset...\")\n train_loader = sampler_robust_cascade(train_loader, model, args.epsilon, **kwargs)\n if train_loader is None: \n print('No more examples, terminating')\n break\n sampler_indices.append(train_loader.sampler.indices)\n\n print(\"Adding a new model\")\n model.append(select_model(args.model))\n \n if args.opt == 'adam': \n opt = optim.Adam(model[-1].parameters(), lr=args.lr)\n elif args.opt == 'sgd': \n opt = optim.SGD(model[-1].parameters(), lr=args.lr, \n momentum=args.momentum,\n weight_decay=args.weight_decay)\n else: \n raise ValueError(\"Unknown optimizer\")\n lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.5)\n eps_schedule = np.linspace(args.starting_epsilon, \n args.epsilon, \n args.schedule_length)\n\n for t in range(args.epochs):\n lr_scheduler.step(epoch=max(t-len(eps_schedule), 0))\n if t < len(eps_schedule) and args.starting_epsilon is not None: \n epsilon = float(eps_schedule[t])\n else:\n epsilon = args.epsilon\n\n\n # standard training\n if args.method == 'baseline': \n train_baseline(train_loader, model[0], opt, t, train_log,\n args.verbose)\n err = evaluate_baseline(test_loader, model[0], t, test_log,\n args.verbose)\n\n # madry training\n elif args.method=='madry':\n train_madry(train_loader, model[0], args.epsilon, \n opt, t, train_log, args.verbose)\n err = evaluate_madry(test_loader, model[0], args.epsilon, \n t, test_log, args.verbose)\n\n # robust cascade training\n elif args.cascade > 1: \n train_robust(train_loader, model[-1], opt, epsilon, t,\n train_log, args.verbose, args.real_time,\n l1_type=args.l1_train, bounded_input=False,\n clip_grad=1, **kwargs)\n err = evaluate_robust_cascade(test_loader, model,\n args.epsilon, t, test_log, args.verbose,\n l1_type=args.l1_test, bounded_input=False, \n **kwargs)\n\n # robust training\n else:\n train_robust(train_loader, model[0], opt, epsilon, t,\n train_log, args.verbose, args.real_time,\n l1_type=args.l1_train, bounded_input=False, clip_grad=1,\n **kwargs)\n err = evaluate_robust(test_loader, model[0], args.epsilon, t,\n test_log, args.verbose, args.real_time,\n l1_type=args.l1_test, bounded_input=False, \n **kwargs)\n \n if err < best_err: \n best_err = err\n torch.save({\n 'state_dict' : [m.state_dict() for m in model], \n 'err' : best_err,\n 'epoch' : t,\n 'sampler_indices' : sampler_indices\n }, args.prefix + \"_best.pth\")\n \n torch.save({ \n 'state_dict': [m.state_dict() for m in model],\n 'err' : err,\n 'epoch' : t,\n 'sampler_indices' : sampler_indices\n }, args.prefix + \"_checkpoint.pth\")\n" ]
[ [ "torch.manual_seed", "numpy.random.seed", "torch.cuda.manual_seed_all", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tsadakane/TIGRE
[ "a853cd2d4a6bc9509c01414b85ca75b4448fd700" ]
[ "Python/tigre/utilities/filtering.py" ]
[ "from __future__ import division\r\nfrom __future__ import print_function\r\nfrom numpy.core.arrayprint import dtype_is_implied\r\nfrom tigre.utilities.parkerweight import parkerweight\r\nimport numpy as np\r\nfrom scipy.fft import fft, ifft\r\n\r\nimport warnings\r\n\r\nimport numpy as np\r\nfrom tigre.utilities.parkerweight import parkerweight\r\n\r\n\r\n# TODO: Fix parker\r\ndef filtering(proj, geo, angles, parker, verbose=False):\r\n if parker:\r\n proj=parkerweight(proj.transpose(0,2,1),geo,angles,parker).transpose(0,2,1)\r\n\r\n filt_len=max(64,2**nextpow2(2*max(geo.nDetector)))\r\n ramp_kernel=ramp_flat(filt_len)\r\n\r\n d=1\r\n filt=filter(geo.filter,ramp_kernel[0],filt_len,d,verbose=verbose)\r\n filt=np.kron(np.ones((np.int64(geo.nDetector[0]),1),dtype=np.float32),filt)\r\n\r\n padding = int((filt_len-geo.nDetector[1])//2 )\r\n scale_factor = (geo.DSD[0]/geo.DSO[0]) * (2 * np.pi/ len(angles)) / ( 4 * geo.dDetector[0] ) \r\n\r\n #filter 2 projection at a time packing in to complex container\r\n fproj=np.empty((geo.nDetector[0],filt_len),dtype=np.complex64)\r\n for i in range(0,angles.shape[0]-1,2):\r\n fproj.fill(0)\r\n fproj.real[:,padding:padding+geo.nDetector[1]]=proj[i]\r\n fproj.imag[:,padding:padding+geo.nDetector[1]]=proj[i+1]\r\n\r\n fproj=fft(fproj,axis=1)\r\n fproj=fproj*filt\r\n fproj=ifft(fproj,axis=1)\r\n\r\n proj[i]=fproj.real[:,padding:padding+geo.nDetector[1]] * scale_factor\r\n proj[i+1]=fproj.imag[:,padding:padding+geo.nDetector[1]] * scale_factor\r\n\r\n #if odd number of projections filter last solo\r\n if angles.shape[0] % 2:\r\n fproj.fill(0)\r\n fproj.real[:,padding:padding+geo.nDetector[1]]=proj[angles.shape[0]-1]\r\n\r\n fproj=fft(fproj,axis=1)\r\n fproj=fproj*filt\r\n fproj=np.real(ifft(fproj,axis=1)) \r\n proj[angles.shape[0]-1]=fproj[:,padding:padding+geo.nDetector[1]] * scale_factor\r\n\r\n return proj\r\n\r\n\r\ndef ramp_flat(n, verbose=False):\r\n nn = np.arange(-n / 2, n / 2)\r\n h = np.zeros(nn.shape, dtype=np.float32)\r\n h[int(n / 2)] = 1 / 4\r\n odd = nn % 2 == 1\r\n h[odd] = -1 / (np.pi * nn[odd]) ** 2\r\n return h, nn\r\n\r\n\r\ndef filter(filter, kernel, order, d, verbose=False):\r\n f_kernel = abs(np.fft.fft(kernel)) * 2\r\n\r\n filt = f_kernel[: int((order / 2) + 1)]\r\n w = 2 * np.pi * np.arange(len(filt)) / order\r\n\r\n if filter in {\"ram_lak\", None}:\r\n if filter is None and verbose:\r\n warnings.warn(\"no filter selected, using default ram_lak\")\r\n elif filter == \"shepp_logan\":\r\n filt[1:] *= np.sin(w[1:] / (2 * d)) / (w[1:] / (2 * d))\r\n elif filter == \"cosine\":\r\n filt[1:] *= np.cos(w[1:] / (2 * d))\r\n elif filter == \"hamming\":\r\n filt[1:] *= 0.54 + 0.46 * np.cos(w[1:] / d)\r\n elif filter == \"hann\":\r\n filt[1:] *= (1 + np.cos(w[1:]) / d) / 2\r\n else:\r\n raise ValueError(\"filter not recognised: \" + str(filter))\r\n\r\n filt[w > np.pi * d] = 0\r\n filt = np.hstack((filt, filt[1:-1][::-1]))\r\n return filt.astype(np.float32)\r\n\r\n\r\ndef nextpow2(n):\r\n i = 1\r\n while (2 ** i) < n:\r\n i += 1\r\n return i\r\n" ]
[ [ "numpy.hstack", "scipy.fft.ifft", "numpy.fft.fft", "numpy.arange", "numpy.cos", "numpy.empty", "numpy.sin", "numpy.int64", "numpy.zeros", "scipy.fft.fft" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.9", "1.5", "1.7", "1.8" ], "tensorflow": [] } ]
BrookInternSOMA/UNIT_tensorflow
[ "4d7430a6f0bd3bea72d821e14db6e6442c02ed32" ]
[ "ops.py" ]
[ "import tensorflow as tf\nimport tensorflow.contrib as tf_contrib\nfrom tensorflow.contrib.layers import variance_scaling_initializer as he_init\n\ndef conv(x, channels, kernel=3, stride=2, pad=0, normal_weight_init=False, activation_fn='leaky', scope='conv_0') :\n with tf.variable_scope(scope) :\n x = tf.pad(x, [[0,0], [pad, pad], [pad, pad], [0,0]])\n\n if normal_weight_init :\n x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel, kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n strides=stride, kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n\n else :\n if activation_fn == 'relu' :\n x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel, kernel_initializer=he_init(), strides=stride,\n kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n else :\n x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel, strides=stride,\n kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n\n\n x = activation(x, activation_fn)\n\n return x\n\ndef deconv(x, channels, kernel=3, stride=2, normal_weight_init=False, activation_fn='leaky', scope='deconv_0') :\n with tf.variable_scope(scope):\n if normal_weight_init:\n x = tf.layers.conv2d_transpose(inputs=x, filters=channels, kernel_size=kernel,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n strides=stride, padding='SAME', kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n\n else:\n if activation_fn == 'relu' :\n x = tf.layers.conv2d_transpose(inputs=x, filters=channels, kernel_size=kernel, kernel_initializer=he_init(), strides=stride, padding='SAME',\n kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n else :\n x = tf.layers.conv2d_transpose(inputs=x, filters=channels, kernel_size=kernel, strides=stride, padding='SAME',\n kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n\n x = activation(x, activation_fn)\n\n return x\n\ndef resblock(x_init, channels, kernel=3, stride=1, pad=1, dropout_ratio=0.0, normal_weight_init=False, is_training=True, norm_fn='instance', scope='resblock_0') :\n assert norm_fn in ['instance', 'batch', 'weight', 'spectral', None]\n with tf.variable_scope(scope) :\n with tf.variable_scope('res1') :\n x = tf.pad(x_init, [[0, 0], [pad, pad], [pad, pad], [0, 0]])\n\n if normal_weight_init :\n x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n strides=stride, kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n else :\n x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel, kernel_initializer=he_init(),\n strides=stride, kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n\n if norm_fn == 'instance' :\n x = instance_norm(x, 'res1_instance')\n if norm_fn == 'batch' :\n x = batch_norm(x, is_training, 'res1_batch')\n\n x = relu(x)\n with tf.variable_scope('res2') :\n x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])\n\n if normal_weight_init :\n x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),\n strides=stride, kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n else :\n x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel, strides=stride,\n kernel_regularizer=tf_contrib.layers.l2_regularizer(scale=0.0001))\n\n if norm_fn == 'instance' :\n x = instance_norm(x, 'res2_instance')\n if norm_fn == 'batch' :\n x = batch_norm(x, is_training, 'res2_batch')\n\n if dropout_ratio > 0.0 :\n x = tf.layers.dropout(x, rate=dropout_ratio, training=is_training)\n\n return x + x_init\n\ndef activation(x, activation_fn='leaky') :\n assert activation_fn in ['relu', 'leaky', 'tanh', 'sigmoid', 'swish', None]\n if activation_fn == 'leaky':\n x = lrelu(x)\n\n if activation_fn == 'relu':\n x = relu(x)\n\n if activation_fn == 'sigmoid':\n x = sigmoid(x)\n\n if activation_fn == 'tanh' :\n x = tanh(x)\n\n if activation_fn == 'swish' :\n x = swish(x)\n\n return x\n\ndef lrelu(x, alpha=0.01) :\n # pytorch alpha is 0.01\n return tf.nn.leaky_relu(x, alpha)\n\ndef relu(x) :\n return tf.nn.relu(x)\n\ndef sigmoid(x) :\n return tf.sigmoid(x)\n\ndef tanh(x) :\n return tf.tanh(x)\n\ndef swish(x) :\n return x * sigmoid(x)\n\ndef batch_norm(x, is_training=False, scope='batch_nom') :\n return tf_contrib.layers.batch_norm(x,\n decay=0.9, epsilon=1e-05,\n center=True, scale=True, updates_collections=None,\n is_training=is_training, scope=scope)\n\ndef instance_norm(x, scope='instance') :\n return tf_contrib.layers.instance_norm(x,\n epsilon=1e-05,\n center=True, scale=True,\n scope=scope)\n\ndef gaussian_noise_layer(mu):\n sigma = 1.0\n gaussian_random_vector = tf.random_normal(shape=tf.shape(mu), mean=0.0, stddev=1.0, dtype=tf.float32)\n return mu + sigma * gaussian_random_vector\n\ndef KL_divergence(mu) :\n # KL_divergence = 0.5 * tf.reduce_sum(tf.square(mu) + tf.square(sigma) - tf.log(1e-8 + tf.square(sigma)) - 1, axis = -1)\n # loss = tf.reduce_mean(KL_divergence)\n mu_2 = tf.square(mu)\n loss = tf.reduce_mean(mu_2)\n\n return loss\n\ndef L1_loss(x, y) :\n loss = tf.reduce_mean(tf.abs(x - y))\n return loss\n\ndef discriminator_loss(real, fake, smoothing=False, use_lasgan=False) :\n if use_lasgan :\n if smoothing :\n real_loss = tf.reduce_mean(tf.squared_difference(real, 0.9)) * 0.5\n else :\n real_loss = tf.reduce_mean(tf.squared_difference(real, 1.0)) * 0.5\n\n fake_loss = tf.reduce_mean(tf.square(fake)) * 0.5\n else :\n if smoothing :\n real_labels = tf.fill(tf.shape(real), 0.9)\n else :\n real_labels = tf.ones_like(real)\n\n fake_labels = tf.zeros_like(fake)\n\n real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=real_labels, logits=real))\n fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=fake_labels, logits=fake))\n\n loss = real_loss + fake_loss\n\n return loss\n\ndef generator_loss(fake, smoothing=False, use_lsgan=False) :\n if use_lsgan :\n if smoothing :\n loss = tf.reduce_mean(tf.squared_difference(fake, 0.9)) * 0.5\n else :\n loss = tf.reduce_mean(tf.squared_difference(fake, 1.0)) * 0.5\n else :\n if smoothing :\n fake_labels = tf.fill(tf.shape(fake), 0.9)\n else :\n fake_labels = tf.ones_like(fake)\n\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=fake_labels, logits=fake))\n\n return loss\n\n" ]
[ [ "tensorflow.layers.dropout", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.tanh", "tensorflow.pad", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.truncated_normal_initializer", "tensorflow.square", "tensorflow.shape", "tensorflow.zeros_like", "tensorflow.contrib.layers.batch_norm", "tensorflow.contrib.layers.instance_norm", "tensorflow.nn.leaky_relu", "tensorflow.nn.relu", "tensorflow.reduce_mean", "tensorflow.sigmoid", "tensorflow.ones_like", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.variable_scope", "tensorflow.squared_difference", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
danielkelshaw/PySwallow
[ "262acd899937715059876e059e1edf2eabae15c0" ]
[ "tests/swallows/test_mo_swallow.py" ]
[ "import numpy as np\nimport pytest\n\nimport pyswallow as ps\nimport pyswallow.handlers.boundary_handler as psbh\n\n\nclass TestMOSwallow:\n\n @pytest.fixture\n def swallow(self):\n bounds = {\n 'x0': [-50.0, 50.0],\n 'x1': [-50.0, 50.0]\n }\n\n swallow = ps.MOSwallow(bounds, n_obj=2)\n return swallow\n\n @pytest.fixture\n def opp_swallow(self):\n bounds = {\n 'x0': [-50.0, 50.0],\n 'x1': [-50.0, 50.0]\n }\n\n opp_swallow = ps.MOSwallow(bounds, n_obj=2)\n return opp_swallow\n\n def test_move(self, swallow):\n swallow.position = np.array([0.0, 0.0])\n swallow.velocity = np.array([10.0, 10.0])\n\n bh = psbh.StandardBH()\n swallow.move(bh)\n\n assert np.array_equal(swallow.position, swallow.velocity)\n\n def test_dominate(self, swallow, opp_swallow):\n opp_swallow.fitness = [50.0, 50.0]\n swallow.fitness = [5.0, 5.0]\n\n ret_bool = swallow.dominate(opp_swallow)\n\n assert ret_bool\n\n def test_self_dominate(self, swallow):\n swallow.fitness = [5.0, 5.0]\n swallow.pbest_fitness = [50.0, 50.0]\n\n ret_bool = swallow.self_dominate()\n\n assert ret_bool\n" ]
[ [ "numpy.array", "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexgaskell10/encoded_kge
[ "2959c058125515a3e0e0b811ffe8086d6699006c", "2959c058125515a3e0e0b811ffe8086d6699006c" ]
[ "kge/model/complex.py", "kge/util/dump.py" ]
[ "import torch\nfrom kge import Config, Dataset\nfrom kge.model.kge_model import RelationalScorer, KgeModel\n\n\nclass ComplExScorer(RelationalScorer):\n r\"\"\"Implementation of the ComplEx KGE scorer.\n\n Reference: Théo Trouillon, Johannes Welbl, Sebastian Riedel, Éric Gaussier and\n Guillaume Bouchard: Complex Embeddings for Simple Link Prediction. ICML 2016.\n `<http://proceedings.mlr.press/v48/trouillon16.pdf>`_\n\n \"\"\"\n\n def __init__(self, config: Config, dataset: Dataset, configuration_key=None):\n super().__init__(config, dataset, configuration_key)\n\n def score_emb(self, s_emb, p_emb, o_emb, combine: str):\n n = p_emb.size(0)\n\n # Here we use a fast implementation of computing the ComplEx scores using\n # Hadamard products, as in Eq. (11) of paper.\n #\n # Split the relation and object embeddings into real part (first half) and\n # imaginary part (second half).\n p_emb_re, p_emb_im = (t.contiguous() for t in p_emb.chunk(2, dim=1))\n o_emb_re, o_emb_im = (t.contiguous() for t in o_emb.chunk(2, dim=1))\n\n # combine them again to create a column block for each required combination\n s_all = torch.cat((s_emb, s_emb), dim=1) # re, im, re, im\n r_all = torch.cat((p_emb_re, p_emb, -p_emb_im), dim=1) # re, re, im, -im\n o_all = torch.cat((o_emb, o_emb_im, o_emb_re), dim=1) # re, im, im, re\n\n if combine == \"spo\":\n out = (s_all * o_all * r_all).sum(dim=1)\n elif combine == \"sp_\":\n out = (s_all * r_all).mm(o_all.transpose(0, 1))\n elif combine == \"_po\":\n out = (r_all * o_all).mm(s_all.transpose(0, 1))\n else:\n return super().score_emb(s_emb, p_emb, o_emb, combine)\n\n return out.view(n, -1)\n\n\nclass ComplEx(KgeModel):\n r\"\"\"Implementation of the ComplEx KGE model.\"\"\"\n\n def __init__(\n self,\n config: Config,\n dataset: Dataset,\n configuration_key=None,\n init_for_load_only=False,\n ):\n super().__init__(\n config=config,\n dataset=dataset,\n scorer=ComplExScorer,\n configuration_key=configuration_key,\n init_for_load_only=init_for_load_only,\n )\n", "import os\nfrom collections import OrderedDict\nimport sys\nimport torch\nimport csv\nimport yaml\nimport socket\nimport copy\n\nfrom kge.job import Trace\nfrom kge import Config\n\n\n## EXPORTED METHODS #####################################################################\n\n\ndef add_dump_parsers(subparsers):\n # 'kge dump' can have associated sub-commands which can have different args\n parser_dump = subparsers.add_parser(\"dump\", help=\"Dump objects to stdout\")\n subparsers_dump = parser_dump.add_subparsers(\n title=\"dump_command\", dest=\"dump_command\"\n )\n subparsers_dump.required = True\n _add_dump_trace_parser(subparsers_dump)\n _add_dump_checkpoint_parser(subparsers_dump)\n _add_dump_config_parser(subparsers_dump)\n\n\ndef dump(args):\n \"\"\"Execute the 'kge dump' commands. \"\"\"\n if args.dump_command == \"trace\":\n _dump_trace(args)\n elif args.dump_command == \"checkpoint\":\n _dump_checkpoint(args)\n elif args.dump_command == \"config\":\n _dump_config(args)\n else:\n raise ValueError()\n\n\ndef get_config_for_job_id(job_id, folder_path):\n config = Config(load_default=True)\n if job_id:\n config_path = os.path.join(\n folder_path, \"config\", job_id.split(\"-\")[0] + \".yaml\"\n )\n else:\n config_path = os.path.join(folder_path, \"config.yaml\")\n if os.path.isfile(config_path):\n config.load(config_path, create=True)\n else:\n raise Exception(\"Could not find config file for {}\".format(job_id))\n return config\n\n\n### DUMP CHECKPOINT #####################################################################\n\n\ndef _add_dump_checkpoint_parser(subparsers_dump):\n parser_dump_checkpoint = subparsers_dump.add_parser(\n \"checkpoint\", help=(\"Dump information stored in a checkpoint\")\n )\n parser_dump_checkpoint.add_argument(\n \"source\",\n help=\"A path to either a checkpoint or a job folder (then uses best or, \"\n \"if not present, last checkpoint).\",\n nargs=\"?\",\n default=\".\",\n )\n parser_dump_checkpoint.add_argument(\n \"--keys\",\n \"-k\",\n type=str,\n nargs=\"*\",\n help=\"List of keys to include (separated by space)\",\n )\n\n\ndef _dump_checkpoint(args):\n \"\"\"Execute the 'dump checkpoint' command.\"\"\"\n\n # Determine checkpoint to use\n if os.path.isfile(args.source):\n checkpoint_file = args.source\n else:\n checkpoint_file = Config.best_or_last_checkpoint_file(args.source)\n\n # Load the checkpoint and strip some fieleds\n checkpoint = torch.load(checkpoint_file, map_location=\"cpu\")\n\n # Dump it\n print(f\"# Dump of checkpoint: {checkpoint_file}\")\n print(f\"parameter_names: {list(checkpoint['model'][0].keys())}\")\n excluded_keys = {\"model\", \"optimizer_state_dict\"}\n if args.keys is not None:\n excluded_keys = {key for key in excluded_keys if key not in args.keys}\n excluded_keys = excluded_keys.union(\n {key for key in checkpoint if key not in args.keys}\n )\n excluded_keys = {key for key in excluded_keys if key in checkpoint}\n for key in excluded_keys:\n del checkpoint[key]\n if excluded_keys:\n print(f\"# Excluded keys: {excluded_keys}\")\n yaml.dump(checkpoint, sys.stdout)\n\n\n### DUMP TRACE ##########################################################################\n\n\ndef _add_dump_trace_parser(subparsers_dump):\n parser_dump_trace = subparsers_dump.add_parser(\n \"trace\",\n help=(\n \"Dump the trace of a job to stdout as CSV (default) or YAML. The tracefile\"\n \" is processed backwards starting from the last entry. Further options\"\n \" allow to start processing from a particular checkpoint, job_id, or\"\n \" epoch number.\"\n ),\n )\n parser_dump_trace.add_argument(\n \"source\",\n help=\"A path to either a checkpoint or a job folder.\",\n nargs=\"?\",\n default=\".\",\n )\n parser_dump_trace.add_argument(\n \"--train\",\n action=\"store_const\",\n const=True,\n default=False,\n help=(\n \"Include entries from training jobs (enabled when none of --train, --valid,\"\n \" or --test is specified).\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--valid\",\n action=\"store_const\",\n const=True,\n default=False,\n help=(\n \"Include entries from validation or evaluation jobs on the valid split\"\n \" (enabled when none of --train, --valid, or --test is specified).\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--test\",\n action=\"store_const\",\n const=True,\n default=False,\n help=(\n \"Include entries from evaluation on the test data split (enabled when \"\n \" none of --train, --valid, or --test is specified).\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--search\",\n action=\"store_const\",\n const=True,\n default=False,\n help=(\n \"Dump the tracefile of a search job. The best result of every \"\n \" search trial is dumped. The options --train, --valid, --test,\"\n \" --truncate, --job_id, --checkpoint, --batch, and --example are not\"\n \" applicable.\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--keysfile\",\n default=False,\n help=(\n \"A path to a file which contains lines in the format\"\n \" 'new_key_name'='key_name'. For every line in the keys file, the command\"\n \" searches the value of 'key_name' in the trace entries (first) and\"\n \" config (second) and adds a respective column in the CSV file with name\"\n \" 'new_key_name'. Additionally, for 'key_name' the special keys '$folder',\"\n \" '$machine' '$checkpoint' and '$base_model' can be used.\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--keys\",\n \"-k\",\n nargs=\"*\",\n type=str,\n help=(\n \"A list of 'key' entries (separated by space). Each 'key' has form\"\n \" 'new_key_name=key_name' or 'key_name'. This adds a column as in the\"\n \" --keysfile option. When only 'key_name' is provided, it is also used as\"\n \" the column name in the CSV file.\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--checkpoint\",\n default=False,\n action=\"store_const\",\n const=True,\n help=(\n \"If source is a path to a job folder and --checkpoint is set, the best\"\n \" (if present) or last checkpoint is used to determine the job_id from\"\n \" where the tracefile is processed backwards.\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--job_id\",\n default=False,\n help=(\n \"Specifies the training job_id in the tracefile from where to start\"\n \" processing backwards when no checkpoint is specified. If not provided,\"\n \" the job_id of the last training job entry in the tracefile is used.\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--truncate\",\n action=\"store\",\n default=False,\n const=True,\n nargs=\"?\",\n help=(\n \"Takes an integer argument which defines the maximum epoch number from\"\n \" where the tracefile is processed backwards. If not provided, all epochs\"\n \" are included (the epoch number can still be bounded by a specified\"\n \" job_id or checkpoint). When a checkpoint is specified, (by providing one\"\n \" explicitly as source or by using --checkpoint), --truncate can\"\n \" additionally be enabled without an argument which sets the maximum epoch\"\n \" number to the epoch provided by the checkpoint.\"\n ),\n )\n parser_dump_trace.add_argument(\n \"--yaml\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"Dump YAML instead of CSV.\",\n )\n parser_dump_trace.add_argument(\n \"--batch\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"Include entries on batch level.\",\n )\n parser_dump_trace.add_argument(\n \"--example\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"Include entries on example level.\",\n )\n parser_dump_trace.add_argument(\n \"--no-header\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"Exclude column names (header) from the CSV file.\",\n )\n parser_dump_trace.add_argument(\n \"--no-default-keys\",\n \"-K\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"Exclude default keys from the CSV file.\",\n )\n parser_dump_trace.add_argument(\n \"--list-keys\",\n action=\"store\",\n const=True,\n default=False,\n nargs=\"?\",\n help=\"Output the CSV default keys and all usable keys for --keysfile and --keys\"\n \" for the given configuration of options. Takes an optional string argument\"\n \" which separates the listed keys (default comma), e.g. use $'\\\\n' to display\"\n \" every key on a new line.\",\n )\n\n\ndef _dump_trace(args):\n \"\"\"Execute the 'dump trace' command.\"\"\"\n if (\n args.train\n or args.valid\n or args.test\n or args.truncate\n or args.job_id\n or args.checkpoint\n or args.batch\n or args.example\n ) and args.search:\n sys.exit(\n \"--search and any of --train, --valid, --test, --truncate, --job_id,\"\n \" --checkpoint, --batch, --example are mutually exclusive\"\n )\n\n entry_type_specified = True\n if not (args.train or args.valid or args.test or args.search):\n entry_type_specified = False\n args.train = True\n args.valid = True\n args.test = True\n\n truncate_flag = False\n truncate_epoch = None\n if isinstance(args.truncate, bool) and args.truncate:\n truncate_flag = True\n elif not isinstance(args.truncate, bool):\n if not args.truncate.isdigit():\n sys.exit(\"Integer argument or no argument for --truncate must be used\")\n truncate_epoch = int(args.truncate)\n\n checkpoint_path = None\n if \".pt\" in os.path.split(args.source)[-1]:\n checkpoint_path = args.source\n folder_path = os.path.split(args.source)[0]\n else:\n # determine job_id and epoch from last/best checkpoint automatically\n if args.checkpoint:\n checkpoint_path = Config.best_or_last_checkpoint_file(args.source)\n folder_path = args.source\n if not checkpoint_path and truncate_flag:\n sys.exit(\n \"--truncate can only be used as a flag when a checkpoint is specified.\"\n \" Consider specifying a checkpoint or use an integer argument for the\"\n \" --truncate option\"\n )\n if checkpoint_path and args.job_id:\n sys.exit(\n \"--job_id cannot be used together with a checkpoint as the checkpoint\"\n \" already specifies the job_id\"\n )\n trace = os.path.join(folder_path, \"trace.yaml\")\n if not os.path.isfile(trace):\n sys.exit(f\"No file 'trace.yaml' found at {os.path.abspath(folder_path)}\")\n\n # process additional keys from --keys and --keysfile\n keymap = OrderedDict()\n additional_keys = []\n if args.keysfile:\n with open(args.keysfile, \"r\") as keyfile:\n additional_keys = keyfile.readlines()\n if args.keys:\n additional_keys += args.keys\n for line in additional_keys:\n line = line.rstrip(\"\\n\").replace(\" \", \"\")\n name_key = line.split(\"=\")\n if len(name_key) == 1:\n name_key += name_key\n keymap[name_key[0]] = name_key[1]\n\n job_id = None\n # use job_id and truncate_epoch from checkpoint\n if checkpoint_path and truncate_flag:\n checkpoint = torch.load(f=checkpoint_path, map_location=\"cpu\")\n job_id = checkpoint[\"job_id\"]\n truncate_epoch = checkpoint[\"epoch\"]\n # only use job_id from checkpoint\n elif checkpoint_path:\n checkpoint = torch.load(f=checkpoint_path, map_location=\"cpu\")\n job_id = checkpoint[\"job_id\"]\n # no checkpoint specified job_id might have been set manually\n elif args.job_id:\n job_id = args.job_id\n # don't restrict epoch number in case it has not been specified yet\n if not truncate_epoch:\n truncate_epoch = float(\"inf\")\n\n entries, job_epochs = [], {}\n if not args.search:\n entries, job_epochs = Trace.grep_training_trace_entries(\n tracefile=trace,\n train=args.train,\n test=args.test,\n valid=args.valid,\n example=args.example,\n batch=args.batch,\n job_id=job_id,\n epoch_of_last=truncate_epoch,\n )\n if not entries and (args.search or not entry_type_specified):\n entries = Trace.grep_entries(tracefile=trace, conjunctions=[f\"scope: train\"])\n truncate_epoch = None\n if entries:\n args.search = True\n if not entries and entry_type_specified:\n sys.exit(\n \"No relevant trace entries found. If this was a trace from a search\"\n \" job, dont use any of --train --valid --test.\"\n )\n elif not entries:\n sys.exit(\"No relevant trace entries found.\")\n\n if args.list_keys:\n all_trace_keys = set()\n\n if not args.yaml:\n csv_writer = csv.writer(sys.stdout)\n # dict[new_name] = (lookup_name, where)\n # if where==\"config\"/\"trace\" it will be looked up automatically\n # if where==\"sep\" it must be added in in the write loop separately\n if args.no_default_keys:\n default_attributes = OrderedDict()\n else:\n default_attributes = OrderedDict(\n [\n (\"job_id\", (\"job_id\", \"sep\")),\n (\"dataset\", (\"dataset.name\", \"config\")),\n (\"model\", (\"model\", \"sep\")),\n (\"reciprocal\", (\"reciprocal\", \"sep\")),\n (\"job\", (\"job\", \"sep\")),\n (\"job_type\", (\"type\", \"trace\")),\n (\"split\", (\"split\", \"sep\")),\n (\"epoch\", (\"epoch\", \"trace\")),\n (\"avg_loss\", (\"avg_loss\", \"trace\")),\n (\"avg_penalty\", (\"avg_penalty\", \"trace\")),\n (\"avg_cost\", (\"avg_cost\", \"trace\")),\n (\"metric_name\", (\"valid.metric\", \"config\")),\n (\"metric\", (\"metric\", \"sep\")),\n ]\n )\n if args.search:\n default_attributes[\"child_folder\"] = (\"folder\", \"trace\")\n default_attributes[\"child_job_id\"] = (\"child_job_id\", \"sep\")\n\n if not (args.no_header or args.list_keys):\n csv_writer.writerow(\n list(default_attributes.keys()) + [key for key in keymap.keys()]\n )\n # store configs for job_id's s.t. they need to be loaded only once\n configs = {}\n warning_shown = False\n for entry in entries:\n current_epoch = entry.get(\"epoch\")\n job_type = entry.get(\"job\")\n job_id = entry.get(\"job_id\")\n if truncate_epoch and not current_epoch <= float(truncate_epoch):\n continue\n # filter out entries not relevant to the unique training sequence determined\n # by the options; not relevant for search\n if job_type == \"train\":\n if current_epoch > job_epochs[job_id]:\n continue\n elif job_type == \"eval\":\n if \"resumed_from_job_id\" in entry:\n if current_epoch > job_epochs[entry.get(\"resumed_from_job_id\")]:\n continue\n elif \"parent_job_id\" in entry:\n if current_epoch > job_epochs[entry.get(\"parent_job_id\")]:\n continue\n # find relevant config file\n child_job_id = entry.get(\"child_job_id\") if \"child_job_id\" in entry else None\n config_key = (\n entry.get(\"folder\") + \"/\" + str(child_job_id) if args.search else job_id\n )\n if config_key in configs.keys():\n config = configs[config_key]\n else:\n if args.search:\n if not child_job_id and not warning_shown:\n # This warning is from Dec 19, 2019. TODO remove\n print(\n \"Warning: You are dumping the trace of an older search job. \"\n \"This is fine only if \"\n \"the config.yaml files in each subfolder have not been modified \"\n \"after running the corresponding training job.\",\n file=sys.stderr,\n )\n warning_shown = True\n config = get_config_for_job_id(\n child_job_id, os.path.join(folder_path, entry.get(\"folder\"))\n )\n entry[\"type\"] = config.get(\"train.type\")\n else:\n config = get_config_for_job_id(job_id, folder_path)\n configs[config_key] = config\n if args.list_keys:\n all_trace_keys.update(entry.keys())\n continue\n new_attributes = OrderedDict()\n # when training was reciprocal, use the base_model as model\n if config.get_default(\"model\") == \"reciprocal_relations_model\":\n model = config.get_default(\"reciprocal_relations_model.base_model.type\")\n # the string that substitutes $base_model in keymap if it exists\n subs_model = \"reciprocal_relations_model.base_model\"\n reciprocal = 1\n else:\n model = config.get_default(\"model\")\n subs_model = model\n reciprocal = 0\n # search for the additional keys from --keys and --keysfile\n for new_key in keymap.keys():\n lookup = keymap[new_key]\n # search for special keys\n value = None\n if lookup == \"$folder\":\n value = os.path.abspath(folder_path)\n elif lookup == \"$checkpoint\" and checkpoint_path:\n value = os.path.abspath(checkpoint_path)\n elif lookup == \"$machine\":\n value = socket.gethostname()\n if \"$base_model\" in lookup:\n lookup = lookup.replace(\"$base_model\", subs_model)\n # search for ordinary keys; start searching in trace entry then config\n if not value:\n value = entry.get(lookup)\n if not value:\n try:\n value = config.get_default(lookup)\n except:\n pass # value stays None; creates empty field in csv\n if value and isinstance(value, bool):\n value = 1\n elif not value and isinstance(value, bool):\n value = 0\n new_attributes[new_key] = value\n if not args.yaml:\n # find the actual values for the default attributes\n actual_default = default_attributes.copy()\n for new_key in default_attributes.keys():\n lookup, where = default_attributes[new_key]\n if where == \"config\":\n actual_default[new_key] = config.get(lookup)\n elif where == \"trace\":\n actual_default[new_key] = entry.get(lookup)\n # keys with separate treatment\n # \"split\" in {train,test,valid} for the datatype\n # \"job\" in {train,eval,valid,search}\n if job_type == \"train\":\n if \"split\" in entry:\n actual_default[\"split\"] = entry.get(\"split\")\n else:\n actual_default[\"split\"] = \"train\"\n actual_default[\"job\"] = \"train\"\n elif job_type == \"eval\":\n if \"split\" in entry:\n actual_default[\"split\"] = entry.get(\"split\") # test or valid\n else:\n # deprecated\n actual_default[\"split\"] = entry.get(\"data\") # test or valid\n if entry.get(\"resumed_from_job_id\"):\n actual_default[\"job\"] = \"eval\" # from \"kge eval\"\n else:\n actual_default[\"job\"] = \"valid\" # child of training job\n else:\n actual_default[\"job\"] = job_type\n if \"split\" in entry:\n actual_default[\"split\"] = entry.get(\"split\")\n else:\n # deprecated\n actual_default[\"split\"] = entry.get(\"data\") # test or valid\n actual_default[\"job_id\"] = job_id.split(\"-\")[0]\n actual_default[\"model\"] = model\n actual_default[\"reciprocal\"] = reciprocal\n # lookup name is in config value is in trace\n actual_default[\"metric\"] = entry.get(config.get_default(\"valid.metric\"))\n if args.search:\n actual_default[\"child_job_id\"] = entry.get(\"child_job_id\").split(\"-\")[0]\n for key in list(actual_default.keys()):\n if key not in default_attributes:\n del actual_default[key]\n csv_writer.writerow(\n [actual_default[new_key] for new_key in actual_default.keys()]\n + [new_attributes[new_key] for new_key in new_attributes.keys()]\n )\n else:\n entry.update({\"reciprocal\": reciprocal, \"model\": model})\n if keymap:\n entry.update(new_attributes)\n print(entry)\n if args.list_keys:\n # only one config needed\n config = configs[list(configs.keys())[0]]\n options = Config.flatten(config.options)\n options = sorted(\n filter(lambda opt: \"+++\" not in opt, options), key=lambda opt: opt.lower()\n )\n if isinstance(args.list_keys, bool):\n sep = \", \"\n else:\n sep = args.list_keys\n print(\"Default keys for CSV: \")\n print(*default_attributes.keys(), sep=sep)\n print(\"\")\n print(\"Special keys: \")\n print(*[\"$folder\", \"$checkpoint\", \"$machine\", \"$base_model\"], sep=sep)\n print(\"\")\n print(\"Keys found in trace: \")\n print(*sorted(all_trace_keys), sep=sep)\n print(\"\")\n print(\"Keys found in config: \")\n print(*options, sep=sep)\n\n\n### DUMP CONFIG ########################################################################\n\n\ndef _add_dump_config_parser(subparsers_dump):\n parser_dump_config = subparsers_dump.add_parser(\n \"config\", help=(\"Dump a configuration\")\n )\n parser_dump_config.add_argument(\n \"source\",\n help=\"A path to either a checkpoint, a config file, or a job folder.\",\n nargs=\"?\",\n default=\".\",\n )\n\n parser_dump_config.add_argument(\n \"--minimal\",\n \"-m\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Only dump configuration options different from the default configuration (default)\",\n )\n parser_dump_config.add_argument(\n \"--raw\",\n \"-r\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Dump the config as is\",\n )\n parser_dump_config.add_argument(\n \"--full\",\n \"-f\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Add all values from the default configuration before dumping the config\",\n )\n\n parser_dump_config.add_argument(\n \"--include\",\n \"-i\",\n type=str,\n nargs=\"*\",\n help=\"List of keys to include (separated by space). \"\n \"All subkeys are also included. Cannot be used with --raw.\",\n )\n\n parser_dump_config.add_argument(\n \"--exclude\",\n \"-e\",\n type=str,\n nargs=\"*\",\n help=\"List of keys to exclude (separated by space). \"\n \"All subkeys are also exluded. Applied after --include. \"\n \"Cannot be used with --raw.\",\n )\n\n\ndef _dump_config(args):\n \"\"\"Execute the 'dump config' command.\"\"\"\n if not (args.raw or args.full or args.minimal):\n args.minimal = True\n\n if args.raw + args.full + args.minimal != 1:\n raise ValueError(\"Exactly one of --raw, --full, or --minimal must be set\")\n\n if args.raw and (args.include or args.exclude):\n raise ValueError(\n \"--include and --exclude cannot be used with --raw \"\n \"(use --full or --minimal instead).\"\n )\n\n config = Config()\n config_file = None\n if os.path.isdir(args.source):\n config_file = os.path.join(args.source, \"config.yaml\")\n config.load(config_file)\n elif \".yaml\" in os.path.split(args.source)[-1]:\n config_file = args.source\n config.load(config_file)\n else: # a checkpoint\n checkpoint = torch.load(args.source, map_location=\"cpu\")\n if args.raw:\n config = checkpoint[\"config\"]\n else:\n config.load_config(checkpoint[\"config\"])\n\n def print_options(options):\n # drop all arguments that are not included\n if args.include:\n args.include = set(args.include)\n options_copy = copy.deepcopy(options)\n for key in options_copy.keys():\n prefix = key\n keep = False\n while True:\n if prefix in args.include:\n keep = True\n break\n else:\n last_dot_index = prefix.rfind(\".\")\n if last_dot_index < 0:\n break\n else:\n prefix = prefix[:last_dot_index]\n if not keep:\n del options[key]\n\n # remove all arguments that are excluded\n if args.exclude:\n args.exclude = set(args.exclude)\n options_copy = copy.deepcopy(options)\n for key in options_copy.keys():\n prefix = key\n while True:\n if prefix in args.exclude:\n del options[key]\n break\n else:\n last_dot_index = prefix.rfind(\".\")\n if last_dot_index < 0:\n break\n else:\n prefix = prefix[:last_dot_index]\n\n # convert the remaining options to a Config and print it\n config = Config(load_default=False)\n config.set_all(options, create=True)\n print(yaml.dump(config.options))\n\n if args.raw:\n if config_file:\n with open(config_file, \"r\") as f:\n print(f.read())\n else:\n print_options(config.options)\n elif args.full:\n print_options(config.options)\n else: # minimal\n default_config = Config()\n imports = config.get(\"import\")\n if imports is not None:\n if not isinstance(imports, list):\n imports = [imports]\n for module_name in imports:\n default_config._import(module_name)\n default_options = Config.flatten(default_config.options)\n new_options = Config.flatten(config.options)\n minimal_options = {}\n\n for option, value in new_options.items():\n if option not in default_options or default_options[option] != value:\n minimal_options[option] = value\n\n # always retain all imports\n if imports is not None:\n minimal_options[\"import\"] = list(set(imports))\n\n print_options(minimal_options)\n" ]
[ [ "torch.cat" ], [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
giacomo-montibeller/ml-workflow-model-layer
[ "1f875dbf3dc053c2a593c0c6f58ca3630b3e8aa9" ]
[ "main.py" ]
[ "import os\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.metrics import mean_squared_error\n\nclass Main:\n def __init__(self, dataset_path):\n if not os.path.exists(\"data\"):\n os.makedirs(\"data\")\n\n self.dataset_path = dataset_path\n\n def execute(self):\n dataset = load_dataset_from(self.dataset_path)\n remove_outliers(dataset)\n\n input_train, input_test, label_train, label_test = extract_inputs_and_labels_from(dataset)\n\n model = train(input_train, label_train)\n\n evaluate(model, input_test, label_test)\n\n save(model)\n\ndef load_dataset_from(path):\n return pd.read_csv(path, usecols=[\"time\", \"value\"])\n\ndef remove_outliers(dataset):\n z = np.abs(stats.zscore(dataset))\n threshold = 5\n outliers = np.where(z > threshold)[0]\n dataset.drop(outliers, axis=0, inplace=True)\n\ndef extract_inputs_and_labels_from(dataset):\n inputs = dataset[\"time\"]\n labels = dataset[\"value\"]\n return split_train_and_test(inputs, labels)\n\ndef split_train_and_test(inputs, labels):\n input_train, input_test, label_train, label_test = train_test_split(inputs, labels, test_size=0.1)\n input_train = polynomialize(input_train)\n input_test = polynomialize(input_test)\n return input_train, input_test, label_train, label_test\n\ndef polynomialize(input):\n transformer = PolynomialFeatures(degree=10)\n input = input.values.reshape(-1, 1)\n input = transformer.fit_transform(input)\n return input\n\ndef evaluate(model, input_test, label_test):\n predictions = model.predict(input_test)\n print(\"R2 Score: {}\".format(model.score(input_test, label_test)))\n print(\"MSE: {}\".format(mean_squared_error(label_test, predictions)))\n\ndef train(input_train, label_train):\n model = linear_model.LinearRegression()\n return model.fit(input_train, label_train)\n\ndef save(model):\n file_path = \"./data/model.sav\"\n pickle.dump(model, open(file_path, \"wb\"))\n\nif __name__ == '__main__':\n app = Main(\"../ml-workflow-data-layer/data/dataset.csv\")\n\n app.execute()\n" ]
[ [ "pandas.read_csv", "scipy.stats.zscore", "sklearn.preprocessing.PolynomialFeatures", "sklearn.model_selection.train_test_split", "sklearn.metrics.mean_squared_error", "sklearn.linear_model.LinearRegression", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
rroliver/gdal
[ "319c1ea20b10d7501e95ad2dcbb4b6a25fa15fa7" ]
[ "autotest/gcore/tiff_write.py" ]
[ "#!/usr/bin/env pytest\n# -*- coding: utf-8 -*-\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: Test read/write functionality for GeoTIFF format.\n# Author: Frank Warmerdam <[email protected]>\n#\n###############################################################################\n# Copyright (c) 2003, Frank Warmerdam <[email protected]>\n# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Library General Public\n# License as published by the Free Software Foundation; either\n# version 2 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Library General Public License for more details.\n#\n# You should have received a copy of the GNU Library General Public\n# License along with this library; if not, write to the\n# Free Software Foundation, Inc., 59 Temple Place - Suite 330,\n# Boston, MA 02111-1307, USA.\n###############################################################################\n\nimport copy\nimport os\nimport sys\nimport array\nimport shutil\nfrom osgeo import gdal\nfrom osgeo import osr\nimport pytest\n\nsys.path.append('../../gdal/swig/python/samples')\n\nimport gdaltest\n\nrun_tiff_write_api_proxy = True\n\n###############################################################################\n# Get the GeoTIFF driver, and verify a few things about it.\n\n\ndef test_tiff_write_1():\n\n gdaltest.tiff_drv = gdal.GetDriverByName('GTiff')\n assert gdaltest.tiff_drv is not None, 'GTiff driver not found!'\n\n drv_md = gdaltest.tiff_drv.GetMetadata()\n assert drv_md['DMD_MIMETYPE'] == 'image/tiff', 'mime type is wrong'\n\n###############################################################################\n# Create a simple file by copying from an existing one.\n\n\ndef test_tiff_write_2():\n\n src_ds = gdal.Open('data/cfloat64.tif')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_2.tif', src_ds)\n\n bnd = new_ds.GetRasterBand(1)\n assert bnd.Checksum() == 5028, 'Didnt get expected checksum on still-open file'\n\n bnd = None\n new_ds = None\n\n # hopefully it's closed now!\n\n new_ds = gdal.Open('tmp/test_2.tif')\n bnd = new_ds.GetRasterBand(1)\n assert bnd.Checksum() == 5028, 'Didnt get expected checksum on reopened file'\n\n assert bnd.ComputeRasterMinMax() == (74.0, 255.0), \\\n 'ComputeRasterMinMax() returned wrong value'\n\n bnd = None\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_2.tif')\n\n###############################################################################\n# Create a simple file by copying from an existing one.\n\n\ndef test_tiff_write_3():\n\n src_ds = gdal.Open('data/utmsmall.tif')\n\n options = ['TILED=YES', 'BLOCKXSIZE=32', 'BLOCKYSIZE=32']\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_3.tif', src_ds,\n options=options)\n\n bnd = new_ds.GetRasterBand(1)\n assert bnd.Checksum() == 50054, 'Didnt get expected checksum on still-open file'\n\n bnd = None\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_3.tif')\n\n###############################################################################\n# Create a tiled file.\n\n\ndef test_tiff_write_4():\n\n try:\n from osgeo import gdalnumeric\n except ImportError:\n pytest.skip()\n\n options = ['TILED=YES', 'BLOCKXSIZE=32', 'BLOCKYSIZE=32']\n\n new_ds = gdaltest.tiff_drv.Create('tmp/test_4.tif', 40, 50, 3,\n gdal.GDT_Byte, options)\n\n try:\n data_red = gdalnumeric.zeros((50, 40))\n data_green = gdalnumeric.zeros((50, 40))\n data_blue = gdalnumeric.zeros((50, 40))\n except AttributeError:\n import numpy\n data_red = numpy.zeros((50, 40))\n data_green = numpy.zeros((50, 40))\n data_blue = numpy.zeros((50, 40))\n\n for y in range(50):\n for x in range(40):\n data_red[y][x] = x\n data_green[y][x] = y\n data_blue[y][x] = x + y\n\n try:\n data_red = data_red.astype(gdalnumeric.UnsignedInt8)\n data_green = data_green.astype(gdalnumeric.UnsignedInt8)\n data_blue = data_blue.astype(gdalnumeric.UnsignedInt8)\n except AttributeError:\n try:\n data_red = data_red.astype(gdalnumeric.uint8)\n data_green = data_green.astype(gdalnumeric.uint8)\n data_blue = data_blue.astype(gdalnumeric.uint8)\n except AttributeError:\n pass\n\n new_ds.GetRasterBand(1).WriteArray(data_red)\n new_ds.GetRasterBand(2).WriteArray(data_green)\n new_ds.GetRasterBand(3).WriteArray(data_blue)\n\n gt = (0.0, 1.0, 0.0, 50.0, 0.0, -1.0)\n new_ds.SetGeoTransform(gt)\n\n assert new_ds.GetRasterBand(1).Checksum() == 21577 and new_ds.GetRasterBand(2).Checksum() == 20950 and new_ds.GetRasterBand(3).Checksum() == 23730, \\\n 'Wrong checksum.'\n\n assert gt == new_ds.GetGeoTransform(), 'Wrong geotransform.'\n\n new_ds.SetMetadata({'TEST_KEY': 'TestValue'})\n\n new_ds = None\n\n new_ds = gdal.Open('tmp/test_4.tif')\n\n assert new_ds.GetRasterBand(1).Checksum() == 21577 and new_ds.GetRasterBand(2).Checksum() == 20950 and new_ds.GetRasterBand(3).Checksum() == 23730, \\\n 'Wrong checksum (2).'\n\n assert gt == new_ds.GetGeoTransform(), 'Wrong geotransform(2).'\n\n nd = new_ds.GetRasterBand(1).GetNoDataValue()\n assert nd is None, 'Got unexpected nodata value.'\n\n md_dict = new_ds.GetMetadata()\n assert md_dict['TEST_KEY'] == 'TestValue', 'Missing metadata'\n\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_4.tif')\n\n###############################################################################\n# Write a file with GCPs.\n\n\ndef test_tiff_write_5():\n\n src_ds = gdal.Open('data/gcps.vrt')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_5.tif', src_ds)\n\n assert (new_ds.GetGCPProjection().find(\n 'AUTHORITY[\"EPSG\",\"26711\"]') != -1), 'GCP Projection not set properly.'\n\n gcps = new_ds.GetGCPs()\n assert len(gcps) == 4, 'GCP count wrong.'\n\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_5.tif')\n\n # Test SetGCPs on a new GTiff\n new_ds = gdaltest.tiff_drv.Create('tmp/test_5.tif', 10, 10, 1)\n new_ds.SetGCPs(gcps, src_ds.GetGCPProjection())\n new_ds = None\n\n new_ds = gdal.Open('tmp/test_5.tif')\n gcps = new_ds.GetGCPs()\n assert len(gcps) == 4, 'GCP count wrong.'\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_5.tif')\n\n###############################################################################\n# Test a mixture of reading and writing on a DEFLATE compressed file.\n# May crash with libtiff <= 3.8.2, so skip it if BigTIFF is not supported\n# (this is a sign of an older libtiff...)\n\n\ndef test_tiff_write_6():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n options = ['TILED=YES', 'BLOCKXSIZE=32', 'BLOCKYSIZE=32',\n 'COMPRESS=DEFLATE', 'PREDICTOR=2']\n ds = gdaltest.tiff_drv.Create('tmp/test_6.tif', 200, 200, 1,\n gdal.GDT_Byte, options)\n\n # make a 32x32 byte buffer\n buf = array.array('B', list(range(32))).tostring() * 32\n\n ds.WriteRaster(0, 0, 32, 32, buf, buf_type=gdal.GDT_Byte)\n ds.FlushCache()\n ds.WriteRaster(32, 0, 32, 32, buf, buf_type=gdal.GDT_Byte)\n ds.FlushCache()\n buf_read = ds.ReadRaster(0, 0, 32, 32, buf_type=gdal.GDT_Byte)\n\n if buf_read != buf:\n gdaltest.tiff_write_6_failed = True\n pytest.fail('did not get back expected data.')\n\n ds = None\n\n gdaltest.tiff_write_6_failed = False\n gdaltest.tiff_drv.Delete('tmp/test_6.tif')\n\n###############################################################################\n# Test a mixture of reading and writing on a LZW compressed file.\n# Will cause older libtiff versions (<=3.8.2 for sure) to crash, so skip it\n# if BigTIFF is not supported (this is a sign of an older libtiff...)\n\n\ndef test_tiff_write_7():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n options = ['TILED=YES', 'COMPRESS=LZW', 'PREDICTOR=2']\n ds = gdaltest.tiff_drv.Create('tmp/test_7.tif', 200, 200, 1,\n gdal.GDT_Byte, options)\n\n # make a 32x32 byte buffer\n buf = array.array('B', list(range(32))).tostring() * 32\n\n ds.WriteRaster(0, 0, 32, 32, buf, buf_type=gdal.GDT_Byte)\n ds.FlushCache()\n ds.WriteRaster(32, 0, 32, 32, buf, buf_type=gdal.GDT_Byte)\n ds.FlushCache()\n buf_read = ds.ReadRaster(0, 0, 32, 32, buf_type=gdal.GDT_Byte)\n\n assert buf_read == buf, 'did not get back expected data.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_7.tif')\n\n###############################################################################\n# Test a mixture of reading and writing on a PACKBITS compressed file.\n\n\ndef test_tiff_write_8():\n\n options = ['TILED=YES', 'BLOCKXSIZE=32', 'BLOCKYSIZE=32', 'COMPRESS=PACKBITS']\n ds = gdaltest.tiff_drv.Create('tmp/test_8.tif', 200, 200, 1,\n gdal.GDT_Byte, options)\n\n # make a 32x32 byte buffer\n buf = array.array('B', list(range(32))).tostring() * 32\n\n ds.WriteRaster(0, 0, 32, 32, buf, buf_type=gdal.GDT_Byte)\n ds.FlushCache()\n ds.WriteRaster(32, 0, 32, 32, buf, buf_type=gdal.GDT_Byte)\n ds.FlushCache()\n\n buf_read = ds.ReadRaster(0, 0, 32, 32, buf_type=gdal.GDT_Byte)\n\n assert buf_read == buf, 'did not get back expected data.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_8.tif')\n\n###############################################################################\n# Create a simple file by copying from an existing one.\n\n\ndef test_tiff_write_9():\n\n src_ds = gdal.Open('data/byte.tif')\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_9.tif', src_ds,\n options=['NBITS=5'])\n with gdaltest.error_handler():\n new_ds = None\n\n new_ds = gdal.Open('tmp/test_9.tif')\n bnd = new_ds.GetRasterBand(1)\n assert bnd.Checksum() == 5287, 'Didnt get expected checksum on reopened file'\n\n bnd = None\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_9.tif')\n\n###############################################################################\n# 1bit file but with band interleaving, and odd size (not multiple of 8) #1957\n\n\ndef test_tiff_write_10():\n\n ut = gdaltest.GDALTest('GTiff', 'oddsize_1bit2b.tif', 2, 5918,\n options=['NBITS=1', 'INTERLEAVE=BAND'])\n return ut.testCreate(out_bands=2)\n\n###############################################################################\n# Simple 1 bit file, treated through the GTiffBitmapBand class.\n\n\ndef test_tiff_write_11():\n\n ut = gdaltest.GDALTest('GTiff', 'oddsize1bit.tif', 1, 5918,\n options=['NBITS=1', 'COMPRESS=CCITTFAX4'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Read JPEG Compressed YCbCr subsampled image.\n\n\ndef test_tiff_write_12():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n ds = gdal.Open('data/sasha.tif')\n cs = ds.GetRasterBand(3).Checksum()\n assert cs == 31952 or cs == 30145\n\n###############################################################################\n# Write JPEG Compressed YCbCr subsampled image.\n\n\ndef test_tiff_write_13():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n src_ds = gdal.Open('data/sasha.tif')\n ds = gdaltest.tiff_drv.CreateCopy('tmp/sasha.tif', src_ds, options=['PROFILE=BASELINE',\n 'TILED=YES',\n 'COMPRESS=JPEG',\n 'PHOTOMETRIC=YCBCR',\n 'JPEG_QUALITY=31'])\n ds = None\n\n ds = gdal.Open('tmp/sasha.tif')\n cs = ds.GetRasterBand(3).Checksum()\n ds = None\n\n size = os.stat('tmp/sasha.tif').st_size\n\n gdaltest.tiff_drv.Delete('tmp/sasha.tif')\n assert cs == 17347 or cs == 14445, 'fail: bad checksum'\n\n if md['LIBTIFF'] == 'INTERNAL':\n assert size <= 22816, 'fail: bad size'\n\n \n###############################################################################\n# Test creating an in memory copy.\n\n\ndef test_tiff_write_14():\n\n tst = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672)\n\n return tst.testCreateCopy(vsimem=1)\n\n\n###############################################################################\n# Test that we can restrict metadata and georeferencing in the output\n# file using the PROFILE creation option with CreateCopy()\n\ndef test_tiff_write_15():\n\n ds_in = gdal.Open('data/byte.vrt')\n\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tw_15.tif', ds_in, options=['PROFILE=BASELINE'])\n\n ds_in = None\n ds = None\n\n ds = gdal.Open('tmp/tw_15.tif')\n\n md = ds.GetMetadata()\n assert 'test' in md, 'Metadata absent from .aux.xml file.'\n\n md = ds.GetRasterBand(1).GetMetadata()\n assert 'testBand' in md, 'Metadata absent from .aux.xml file.'\n\n ds = None\n\n gdal.Unlink('tmp/tw_15.tif.aux.xml')\n\n ds = gdal.Open('tmp/tw_15.tif')\n\n assert ds.GetGeoTransform() == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0), \\\n 'Got wrong geotransform, profile ignored?'\n\n md = ds.GetMetadata()\n assert 'test' not in md, 'Metadata written to BASELINE file.'\n\n md = ds.GetRasterBand(1).GetMetadata()\n assert 'testBand' not in md, 'Metadata written to BASELINE file.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tw_15.tif')\n\n###############################################################################\n# Test that we can restrict metadata and georeferencing in the output\n# file using the PROFILE creation option with Create()\n\n\ndef test_tiff_write_16():\n\n ds_in = gdal.Open('data/byte.vrt')\n\n ds = gdaltest.tiff_drv.Create('tmp/tw_16.tif', 20, 20, gdal.GDT_Byte,\n options=['PROFILE=BASELINE'])\n\n ds.SetMetadata({'test': 'testvalue'})\n ds.GetRasterBand(1).SetMetadata({'testBand': 'testvalueBand'})\n\n ds.SetGeoTransform((10, 5, 0, 30, 0, -5))\n\n data = ds_in.ReadRaster(0, 0, 20, 20)\n ds.WriteRaster(0, 0, 20, 20, data)\n\n ds_in = None\n ds = None\n\n ds = gdal.Open('tmp/tw_16.tif')\n assert ds.GetGeoTransform() == (0.0, 1.0, 0.0, 0.0, 0.0, 1.0), \\\n 'Got wrong geotransform, profile ignored?'\n\n md = ds.GetMetadata()\n assert 'test' in md, 'Metadata absent from .aux.xml file.'\n\n md = ds.GetRasterBand(1).GetMetadata()\n assert 'testBand' in md, 'Metadata absent from .aux.xml file.'\n\n ds = None\n\n try:\n os.remove('tmp/tw_16.tif.aux.xml')\n except OSError:\n try:\n os.stat('tmp/tw_16.tif.aux.xml')\n except OSError:\n pytest.fail('No .aux.xml file.')\n\n ds = gdal.Open('tmp/tw_16.tif')\n\n md = ds.GetMetadata()\n assert 'test' not in md, 'Metadata written to BASELINE file.'\n\n md = ds.GetRasterBand(1).GetMetadata()\n assert 'testBand' not in md, 'Metadata written to BASELINE file.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tw_16.tif')\n\n###############################################################################\n# Test writing a TIFF with an RPC tag.\n\n\ndef test_tiff_write_17():\n\n # Translate RPC controlled data to GeoTIFF.\n\n ds_in = gdal.Open('data/rpc.vrt')\n rpc_md = ds_in.GetMetadata('RPC')\n\n tmpfilename = '/vsimem/tiff_write_17.tif'\n ds = gdaltest.tiff_drv.CreateCopy(tmpfilename, ds_in)\n\n ds_in = None\n ds = None\n\n # Ensure there is no .aux.xml file which might hold the RPC.\n assert not gdal.VSIStatL(tmpfilename + '.aux.xml'), \\\n 'unexpectedly found.aux.xml file'\n\n # confirm there is no .rpb file created by default.\n assert not gdal.VSIStatL(tmpfilename + '.RPB'), 'unexpectedly found .RPB file'\n\n # confirm there is no _rpc.txt file created by default.\n assert not gdal.VSIStatL(tmpfilename + '_RPC.TXT'), \\\n 'unexpectedly found _RPC.TXT file'\n\n # Open the dataset, and confirm the RPC data is still intact.\n ds = gdal.Open(tmpfilename)\n assert gdaltest.rpcs_equal(ds.GetMetadata('RPC'), rpc_md)\n ds = None\n\n # Modify the RPC\n modified_rpc = copy.copy(rpc_md)\n modified_rpc['LINE_OFF'] = '123456'\n\n ds = gdal.Open(tmpfilename, gdal.GA_Update)\n ds.SetMetadata(modified_rpc, 'RPC')\n ds = None\n\n ds = gdal.Open(tmpfilename)\n assert gdaltest.rpcs_equal(ds.GetMetadata('RPC'), modified_rpc)\n ds = None\n\n # Unset the RPC\n ds = gdal.Open(tmpfilename, gdal.GA_Update)\n ds.SetMetadata(None, 'RPC')\n ds = None\n\n ds = gdal.Open(tmpfilename)\n assert not ds.GetMetadata('RPC'), 'got RPC, but was not expected'\n ds = None\n\n gdaltest.tiff_drv.Delete(tmpfilename)\n\n###############################################################################\n# Test that above test still work with the optimization in the GDAL_DISABLE_READDIR_ON_OPEN\n# case (#3996)\n\n\ndef test_tiff_write_17_disable_readdir():\n oldval = gdal.GetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN')\n gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', 'TRUE')\n ret = test_tiff_write_17()\n gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', oldval)\n return ret\n\n###############################################################################\n# Test writing a TIFF with an RPB file and IMD file.\n\n\ndef test_tiff_write_18():\n\n # Translate RPC controlled data to GeoTIFF.\n\n ds_in = gdal.Open('data/rpc.vrt')\n rpc_md = ds_in.GetMetadata('RPC')\n\n gdaltest.tiff_drv.CreateCopy('tmp/tw_18.tif', ds_in,\n options=['PROFILE=BASELINE'])\n\n # Ensure there is no .aux.xml file which might hold the RPC.\n assert not gdal.VSIStatL('tmp/tm_18.tif.aux.xml'), \\\n 'unexpectedly found tm_18.tif.aux.xml file'\n\n # confirm there is an .rpb and .imd file.\n assert gdal.VSIStatL('tmp/tw_18.RPB') is not None, 'missing .RPB file.'\n assert gdal.VSIStatL('tmp/tw_18.IMD') is not None, 'missing .IMD file.'\n\n # confirm there is no _rpc.txt file created by default.\n assert not gdal.VSIStatL('tmp/tw_18_RPC.TXT'), 'unexpectedly found _RPC.TXT file'\n\n # Open the dataset, and confirm the RPC/IMD data is still intact.\n ds = gdal.Open('tmp/tw_18.tif')\n\n assert gdaltest.rpcs_equal(ds.GetMetadata('RPC'), rpc_md)\n\n imd_md = ds.GetMetadata('IMD')\n assert imd_md['version'] == '\"R\"' and imd_md['numColumns'] == '30324' and imd_md['IMAGE_1.sunEl'] == '39.7', \\\n 'IMD contents wrong?'\n\n ds = None\n\n # Test deferred loading with GetMetadataItem()\n ds = gdal.Open('tmp/tw_18.tif')\n assert ds.GetMetadataItem('LINE_OFF', 'RPC') == '16201', \\\n \"wrong value for GetMetadataItem('LINE_OFF', 'RPC')\"\n assert ds.GetMetadataItem('version', 'IMD') == '\"R\"', \\\n \"wrong value for GetMetadataItem('version', 'IMD')\"\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tw_18.tif')\n\n # Confirm IMD and RPC files are cleaned up. If not likely the\n # file list functionality is not working properly.\n assert not gdal.VSIStatL('tmp/tw_18.RPB'), 'RPB did not get cleaned up.'\n\n assert not gdal.VSIStatL('tmp/tw_18.IMD'), 'IMD did not get cleaned up.'\n\n # Remove the RPC\n gdaltest.tiff_drv.CreateCopy('tmp/tw_18.tif', ds_in,\n options=['PROFILE=BASELINE'])\n ds = gdal.Open('tmp/tw_18.tif', gdal.GA_Update)\n ds.SetMetadata(None, 'RPC')\n ds = None\n assert not os.path.exists('tmp/tw_18.RPB'), 'RPB did not get removed'\n\n gdaltest.tiff_drv.Delete('tmp/tw_18.tif')\n\n###############################################################################\n# Test that above test still work with the optimization in the GDAL_DISABLE_READDIR_ON_OPEN\n# case (#3996)\n\n\ndef test_tiff_write_18_disable_readdir():\n oldval = gdal.GetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN')\n gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', 'TRUE')\n ret = test_tiff_write_18()\n gdal.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', oldval)\n return ret\n\n###############################################################################\n# Test writing a TIFF with an _RPC.TXT\n\n\ndef test_tiff_write_rpc_txt():\n\n # Translate RPC controlled data to GeoTIFF.\n\n ds_in = gdal.Open('data/rpc.vrt')\n\n # Remove IMD before creating the TIFF to avoid creating an .IMD\n # since .IMD + _RPC.TXT is an odd combination\n # If the .IMD is found, we don't try reading _RPC.TXT\n ds_in_without_imd = gdal.GetDriverByName('VRT').CreateCopy('', ds_in)\n ds_in_without_imd.SetMetadata(None, 'IMD')\n\n rpc_md = ds_in.GetMetadata('RPC')\n\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_rpc_txt.tif', ds_in_without_imd,\n options=['PROFILE=BASELINE', 'RPCTXT=YES'])\n\n ds_in = None\n ds = None\n\n # Ensure there is no .aux.xml file which might hold the RPC.\n try:\n os.remove('tmp/tiff_write_rpc_txt.tif.aux.xml')\n except OSError:\n pass\n\n # confirm there is no .RPB file created by default.\n with pytest.raises(IOError, message='unexpectedly found .RPB file'):\n open('tmp/tiff_write_rpc_txt.RPB').read()\n \n\n try:\n open('tmp/tiff_write_rpc_txt_RPC.TXT').read()\n except IOError:\n pytest.fail('missing _RPC.TXT file.')\n\n # Open the dataset, and confirm the RPC data is still intact.\n ds = gdal.Open('tmp/tiff_write_rpc_txt.tif')\n\n assert gdaltest.rpcs_equal(ds.GetMetadata('RPC'), rpc_md)\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_rpc_txt.tif')\n\n # Confirm _RPC.TXT file is cleaned up. If not likely the\n # file list functionality is not working properly.\n with pytest.raises(IOError, message='_RPC.TXT did not get cleaned up.'):\n open('tmp/tiff_write_rpc_txt_RPC.TXT').read()\n \n\n \n###############################################################################\n# Test writing a TIFF with an RPC in .aux.xml\n\n\ndef test_tiff_write_rpc_in_pam():\n\n ds_in = gdal.Open('data/rpc.vrt')\n rpc_md = ds_in.GetMetadata('RPC')\n\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_rpc_in_pam.tif', ds_in,\n options=['PROFILE=BASELINE', 'RPB=NO'])\n\n ds_in = None\n ds = None\n\n # Ensure there is a .aux.xml file which might hold the RPC.\n try:\n os.stat('tmp/tiff_write_rpc_in_pam.tif.aux.xml')\n except OSError:\n pytest.fail('missing .aux.xml file.')\n\n # confirm there is no .RPB file created.\n with pytest.raises(IOError, message='unexpectedly found .RPB file'):\n open('tmp/tiff_write_rpc_txt.RPB').read()\n \n\n # Open the dataset, and confirm the RPC data is still intact.\n ds = gdal.Open('tmp/tiff_write_rpc_in_pam.tif')\n\n assert gdaltest.rpcs_equal(ds.GetMetadata('RPC'), rpc_md)\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_rpc_in_pam.tif')\n###############################################################################\n# Test the write of a pixel-interleaved image with NBITS = 7\n\n\ndef test_tiff_write_19():\n\n src_ds = gdal.Open('data/contig_strip.tif')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/contig_strip_7.tif', src_ds,\n options=['NBITS=7', 'INTERLEAVE=PIXEL'])\n\n new_ds = None\n\n # hopefully it's closed now!\n\n new_ds = gdal.Open('tmp/contig_strip_7.tif')\n assert (new_ds.GetRasterBand(1).Checksum() == src_ds.GetRasterBand(1).Checksum() and \\\n new_ds.GetRasterBand(2).Checksum() == src_ds.GetRasterBand(2).Checksum() and \\\n new_ds.GetRasterBand(3).Checksum() == src_ds.GetRasterBand(3).Checksum()), \\\n 'Didnt get expected checksum on reopened file'\n\n new_ds = None\n src_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/contig_strip_7.tif')\n\n###############################################################################\n# Test write and read of some TIFF tags\n# Expected to fail (properly) with older libtiff versions (<=3.8.2 for sure)\n# Also test unsetting those tags (#5619)\n\n\ndef test_tiff_write_20():\n\n new_ds = gdaltest.tiff_drv.Create('tmp/tags.tif', 1, 1, 1)\n\n values = [('TIFFTAG_DOCUMENTNAME', 'document_name'),\n ('TIFFTAG_IMAGEDESCRIPTION', 'image_description'),\n ('TIFFTAG_SOFTWARE', 'software'),\n ('TIFFTAG_DATETIME', '2009/01/01 13:01:08'),\n # TODO: artitst?\n ('TIFFTAG_ARTIST', 'artitst'),\n ('TIFFTAG_HOSTCOMPUTER', 'host_computer'),\n ('TIFFTAG_COPYRIGHT', 'copyright'),\n ('TIFFTAG_XRESOLUTION', '100'),\n ('TIFFTAG_YRESOLUTION', '101'),\n ('TIFFTAG_RESOLUTIONUNIT', '2 (pixels/inch)'),\n ('TIFFTAG_MINSAMPLEVALUE', '1'),\n ('TIFFTAG_MAXSAMPLEVALUE', '2'),\n ]\n\n new_ds.SetMetadata(dict(values))\n\n new_ds = None\n\n # hopefully it's closed now!\n\n with pytest.raises(OSError, message='did not expected .aux.xml file'):\n os.stat('tmp/tags.tif.aux.xml')\n \n\n new_ds = gdal.Open('tmp/tags.tif')\n md = new_ds.GetMetadata()\n for item in values:\n assert item[0] in md, ('Could not find tag %s' % (item[0]))\n\n assert md[item[0]] == item[1], \\\n ('For tag %s, got %s, expected %s' % (item[0], md[item[0]], item[1]))\n\n new_ds = None\n\n # Test just unsetting once, but leaving other unchanged\n ds = gdal.Open('tmp/tags.tif', gdal.GA_Update)\n ds.SetMetadataItem('TIFFTAG_SOFTWARE', None)\n ds = None\n\n with pytest.raises(OSError, message='did not expected .aux.xml file'):\n os.stat('tmp/tags.tif.aux.xml')\n \n\n ds = gdal.Open('tmp/tags.tif')\n assert ds.GetMetadataItem('TIFFTAG_SOFTWARE') is None, \\\n ('expected unset TIFFTAG_SOFTWARE but got %s' % ds.GetMetadataItem('TIFFTAG_SOFTWARE'))\n assert ds.GetMetadataItem('TIFFTAG_DOCUMENTNAME') is not None, \\\n 'expected set TIFFTAG_DOCUMENTNAME but got None'\n ds = None\n\n # Test unsetting all the remaining items\n ds = gdal.Open('tmp/tags.tif', gdal.GA_Update)\n ds.SetMetadata({})\n ds = None\n\n ds = gdal.Open('tmp/tags.tif')\n got_md = ds.GetMetadata()\n ds = None\n\n assert got_md == {}, 'expected empty metadata list, but got some'\n\n gdaltest.tiff_drv.Delete('tmp/tags.tif')\n\n###############################################################################\n# Test RGBA images with TIFFTAG_EXTRASAMPLES=EXTRASAMPLE_ASSOCALPHA\n\n\ndef test_tiff_write_21():\n\n src_ds = gdal.Open('data/stefan_full_rgba.tif')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/stefan_full_rgba.tif', src_ds)\n\n new_ds = None\n\n new_ds = gdal.Open('tmp/stefan_full_rgba.tif')\n assert new_ds.RasterCount == 4\n for i in range(4):\n assert new_ds.GetRasterBand(i + 1).GetRasterColorInterpretation() == src_ds.GetRasterBand(i + 1).GetRasterColorInterpretation()\n assert new_ds.GetRasterBand(i + 1).Checksum() == src_ds.GetRasterBand(i + 1).Checksum()\n\n new_ds = None\n src_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/stefan_full_rgba.tif')\n\n###############################################################################\n# Test RGBA images with TIFFTAG_EXTRASAMPLES=EXTRASAMPLE_UNSPECIFIED\n\n\ndef test_tiff_write_22():\n\n src_ds = gdal.Open('data/stefan_full_rgba_photometric_rgb.tif')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/stefan_full_rgba_photometric_rgb.tif', src_ds, options=['PHOTOMETRIC=RGB'])\n\n new_ds = None\n\n new_ds = gdal.Open('tmp/stefan_full_rgba_photometric_rgb.tif')\n assert new_ds.RasterCount == 4\n for i in range(4):\n assert new_ds.GetRasterBand(i + 1).GetRasterColorInterpretation() == src_ds.GetRasterBand(i + 1).GetRasterColorInterpretation()\n assert new_ds.GetRasterBand(i + 1).Checksum() == src_ds.GetRasterBand(i + 1).Checksum()\n\n new_ds = None\n src_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/stefan_full_rgba_photometric_rgb.tif')\n\n###############################################################################\n# Test grey+alpha images with ALPHA=YES\n\n\ndef test_tiff_write_23():\n\n src_ds = gdal.Open('data/stefan_full_greyalpha.tif')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/stefan_full_greyalpha.tif', src_ds, options=['ALPHA=YES'])\n\n new_ds = None\n\n new_ds = gdal.Open('tmp/stefan_full_greyalpha.tif')\n assert new_ds.RasterCount == 2\n for i in range(2):\n assert new_ds.GetRasterBand(i + 1).GetRasterColorInterpretation() == src_ds.GetRasterBand(i + 1).GetRasterColorInterpretation()\n assert new_ds.GetRasterBand(i + 1).Checksum() == src_ds.GetRasterBand(i + 1).Checksum()\n\n new_ds = None\n src_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/stefan_full_greyalpha.tif')\n\n###############################################################################\n# Test grey+alpha images without ALPHA=YES\n\n\ndef test_tiff_write_24():\n\n src_ds = gdal.Open('data/stefan_full_greyalpha.tif')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/stefan_full_greyunspecified.tif', src_ds)\n\n new_ds = None\n\n new_ds = gdal.Open('tmp/stefan_full_greyunspecified.tif')\n assert new_ds.RasterCount == 2\n for i in range(2):\n assert new_ds.GetRasterBand(i + 1).GetRasterColorInterpretation() == src_ds.GetRasterBand(i + 1).GetRasterColorInterpretation()\n assert new_ds.GetRasterBand(i + 1).Checksum() == src_ds.GetRasterBand(i + 1).Checksum()\n\n new_ds = None\n src_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/stefan_full_greyunspecified.tif')\n\n###############################################################################\n# Read a CIELAB image to test the RGBA image TIFF interface\n\n\ndef test_tiff_write_25():\n\n src_ds = gdal.Open('data/cielab.tif')\n assert src_ds.RasterCount == 4\n assert src_ds.GetRasterBand(1).Checksum() == 6\n assert src_ds.GetRasterBand(2).Checksum() == 3\n assert src_ds.GetRasterBand(3).Checksum() == 0\n assert src_ds.GetRasterBand(4).Checksum() == 3\n assert src_ds.GetRasterBand(1).GetRasterColorInterpretation() == gdal.GCI_RedBand\n assert src_ds.GetRasterBand(2).GetRasterColorInterpretation() == gdal.GCI_GreenBand\n assert src_ds.GetRasterBand(3).GetRasterColorInterpretation() == gdal.GCI_BlueBand\n assert src_ds.GetRasterBand(4).GetRasterColorInterpretation() == gdal.GCI_AlphaBand\n src_ds = None\n\n\n###############################################################################\n# Test color table in a 8 bit image\n\ndef test_tiff_write_26():\n\n ds = gdaltest.tiff_drv.Create('tmp/ct8.tif', 1, 1, 1, gdal.GDT_Byte)\n\n ct = gdal.ColorTable()\n ct.SetColorEntry(0, (255, 255, 255, 255))\n ct.SetColorEntry(1, (255, 255, 0, 255))\n ct.SetColorEntry(2, (255, 0, 255, 255))\n ct.SetColorEntry(3, (0, 255, 255, 255))\n\n ds.GetRasterBand(1).SetRasterColorTable(ct)\n\n ct = None\n ds = None\n\n ds = gdal.Open('tmp/ct8.tif')\n\n ct = ds.GetRasterBand(1).GetRasterColorTable()\n assert (ct.GetCount() == 256 and \\\n ct.GetColorEntry(0) == (255, 255, 255, 255) and \\\n ct.GetColorEntry(1) == (255, 255, 0, 255) and \\\n ct.GetColorEntry(2) == (255, 0, 255, 255) and \\\n ct.GetColorEntry(3) == (0, 255, 255, 255)), 'Wrong color table entry.'\n\n ct = None\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/ct8.tif')\n\n###############################################################################\n# Test color table in a 16 bit image\n\n\ndef test_tiff_write_27():\n\n ds = gdaltest.tiff_drv.Create('tmp/ct16.tif', 1, 1, 1, gdal.GDT_UInt16)\n\n ct = gdal.ColorTable()\n ct.SetColorEntry(0, (255, 255, 255, 255))\n ct.SetColorEntry(1, (255, 255, 0, 255))\n ct.SetColorEntry(2, (255, 0, 255, 255))\n ct.SetColorEntry(3, (0, 255, 255, 255))\n\n ds.GetRasterBand(1).SetRasterColorTable(ct)\n\n ct = None\n ds = None\n\n ds = gdal.Open('tmp/ct16.tif')\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/ct16_copy.tif', ds)\n del new_ds\n ds = None\n\n ds = gdal.Open('tmp/ct16_copy.tif')\n\n ct = ds.GetRasterBand(1).GetRasterColorTable()\n assert (ct.GetCount() == 65536 and \\\n ct.GetColorEntry(0) == (255, 255, 255, 255) and \\\n ct.GetColorEntry(1) == (255, 255, 0, 255) and \\\n ct.GetColorEntry(2) == (255, 0, 255, 255) and \\\n ct.GetColorEntry(3) == (0, 255, 255, 255)), 'Wrong color table entry.'\n\n ct = None\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/ct16.tif')\n gdaltest.tiff_drv.Delete('tmp/ct16_copy.tif')\n\n###############################################################################\n# Test SetRasterColorInterpretation on a 2 channel image\n\n\ndef test_tiff_write_28():\n\n ds = gdaltest.tiff_drv.Create('tmp/greyalpha.tif', 1, 1, 2)\n\n assert ds.GetRasterBand(2).GetRasterColorInterpretation() == gdal.GCI_Undefined\n\n ds.GetRasterBand(2).SetRasterColorInterpretation(gdal.GCI_AlphaBand)\n\n assert ds.GetRasterBand(2).GetRasterColorInterpretation() == gdal.GCI_AlphaBand\n\n ds = None\n\n ds = gdal.Open('tmp/greyalpha.tif')\n\n assert ds.GetRasterBand(2).GetRasterColorInterpretation() == gdal.GCI_AlphaBand\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/greyalpha.tif')\n\n###############################################################################\n# Test SetRasterColorInterpretation on a 4 channel image\n\n\ndef test_tiff_write_29():\n\n # When creating a 4 channel image with PHOTOMETRIC=RGB,\n # TIFFTAG_EXTRASAMPLES=EXTRASAMPLE_UNSPECIFIED\n ds = gdaltest.tiff_drv.Create('/vsimem/rgba.tif', 1, 1, 4, options=['PHOTOMETRIC=RGB'])\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '0'\n assert ds.GetRasterBand(4).GetRasterColorInterpretation() == gdal.GCI_Undefined\n\n # Now turn on alpha\n ds.GetRasterBand(4).SetRasterColorInterpretation(gdal.GCI_AlphaBand)\n\n assert ds.GetRasterBand(4).GetRasterColorInterpretation() == gdal.GCI_AlphaBand\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '2'\n ds = None\n\n assert gdal.VSIStatL('/vsimem/rgba.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/rgba.tif')\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '2'\n assert ds.GetRasterBand(4).GetRasterColorInterpretation() == gdal.GCI_AlphaBand\n\n # Test cancelling alpha\n gdaltest.tiff_drv.CreateCopy('/vsimem/rgb_no_alpha.tif', ds, options=['ALPHA=NO'])\n ds = None\n\n assert gdal.VSIStatL('/vsimem/rgb_no_alpha.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/rgb_no_alpha.tif')\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '0'\n assert ds.GetRasterBand(4).GetRasterColorInterpretation() == gdal.GCI_Undefined\n\n # Test re-adding alpha\n gdaltest.tiff_drv.CreateCopy('/vsimem/rgb_added_alpha.tif', ds, options=['ALPHA=YES'])\n ds = None\n\n assert gdal.VSIStatL('/vsimem/rgb_added_alpha.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/rgb_added_alpha.tif')\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '2'\n assert ds.GetRasterBand(4).GetRasterColorInterpretation() == gdal.GCI_AlphaBand\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/rgba.tif')\n gdaltest.tiff_drv.Delete('/vsimem/rgb_no_alpha.tif')\n gdaltest.tiff_drv.Delete('/vsimem/rgb_added_alpha.tif')\n\n\n###############################################################################\n# Create a BigTIFF image with BigTIFF=YES\n\ndef test_tiff_write_30():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n ds = gdaltest.tiff_drv.Create('tmp/bigtiff.tif', 1, 1, 1, options=['BigTIFF=YES'])\n ds = None\n\n ds = gdal.Open('tmp/bigtiff.tif')\n assert ds is not None\n ds = None\n\n fileobj = open('tmp/bigtiff.tif', mode='rb')\n binvalues = array.array('b')\n binvalues.fromfile(fileobj, 4)\n fileobj.close()\n\n gdaltest.tiff_drv.Delete('tmp/bigtiff.tif')\n\n # Check BigTIFF signature\n assert (not ((binvalues[2] != 0x2B or binvalues[3] != 0) and\n (binvalues[3] != 0x2B or binvalues[2] != 0)))\n\n###############################################################################\n# Create a BigTIFF image implicitly (more than 4Gb).\n\n\ndef test_tiff_write_31():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n ds = gdaltest.tiff_drv.Create('tmp/bigtiff.tif', 100000, 100000, 1,\n options=['SPARSE_OK=TRUE'])\n ds = None\n\n ds = gdal.Open('tmp/bigtiff.tif')\n assert ds is not None\n ds = None\n\n fileobj = open('tmp/bigtiff.tif', mode='rb')\n binvalues = array.array('b')\n binvalues.fromfile(fileobj, 4)\n fileobj.close()\n\n gdaltest.tiff_drv.Delete('tmp/bigtiff.tif')\n\n # Check BigTIFF signature\n assert (not ((binvalues[2] != 0x2B or binvalues[3] != 0) and\n (binvalues[3] != 0x2B or binvalues[2] != 0)))\n\n###############################################################################\n# Create a rotated image\n\n\ndef test_tiff_write_32():\n\n ds_in = gdal.Open('data/byte.vrt')\n\n # Test creation\n ds = gdaltest.tiff_drv.Create('tmp/byte_rotated.tif', 20, 20, gdal.GDT_Byte)\n\n gt = (10, 3.53553390593, 3.53553390593, 30, 3.53553390593, -3.53553390593)\n ds.SetGeoTransform(gt)\n\n data = ds_in.ReadRaster(0, 0, 20, 20)\n ds.WriteRaster(0, 0, 20, 20, data)\n\n ds_in = None\n\n # Test copy\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/byte_rotated_copy.tif', ds)\n del new_ds\n\n # Check copy\n ds = gdal.Open('tmp/byte_rotated_copy.tif')\n new_gt = ds.GetGeoTransform()\n for i in range(6):\n if abs(new_gt[i] - gt[i]) > 1e-5:\n print('')\n print(('old = ', gt))\n print(('new = ', new_gt))\n pytest.fail('Geotransform differs.')\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/byte_rotated.tif')\n gdaltest.tiff_drv.Delete('tmp/byte_rotated_copy.tif')\n\n###############################################################################\n# Test that metadata is written in .aux.xml file in GeoTIFF profile with CreateCopy\n# (BASELINE is tested by tiff_write_15)\n\n\ndef test_tiff_write_33():\n\n ds_in = gdal.Open('data/byte.vrt')\n\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tw_33.tif', ds_in, options=['PROFILE=GeoTIFF'])\n\n ds_in = None\n\n ds = None\n\n ds = gdal.Open('tmp/tw_33.tif')\n\n md = ds.GetMetadata()\n assert 'test' in md, 'Metadata absent from .aux.xml file.'\n\n md = ds.GetRasterBand(1).GetMetadata()\n assert 'testBand' in md, 'Metadata absent from .aux.xml file.'\n\n ds = None\n\n try:\n os.remove('tmp/tw_33.tif.aux.xml')\n except OSError:\n try:\n os.stat('tmp/tw_33.tif.aux.xml')\n except OSError:\n pytest.fail('No .aux.xml file.')\n\n ds = gdal.Open('tmp/tw_33.tif')\n\n md = ds.GetMetadata()\n assert 'test' not in md, 'Metadata written to GeoTIFF file.'\n\n md = ds.GetRasterBand(1).GetMetadata()\n assert 'testBand' not in md, 'Metadata written to GeoTIFF file.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tw_33.tif')\n\n###############################################################################\n# Test that metadata is written in .aux.xml file in GeoTIFF profile with Create\n# (BASELINE is tested by tiff_write_16)\n\n\ndef test_tiff_write_34():\n\n ds = gdaltest.tiff_drv.Create('tmp/tw_34.tif', 1, 1, gdal.GDT_Byte,\n options=['PROFILE=GeoTIFF'])\n ds.SetMetadata({'test': 'testvalue'})\n ds.GetRasterBand(1).SetMetadata({'testBand': 'testvalueBand'})\n\n ds = None\n\n ds = gdal.Open('tmp/tw_34.tif')\n\n md = ds.GetMetadata()\n assert 'test' in md, 'Metadata absent from .aux.xml file.'\n\n md = ds.GetRasterBand(1).GetMetadata()\n assert 'testBand' in md, 'Metadata absent from .aux.xml file.'\n\n ds = None\n\n try:\n os.remove('tmp/tw_34.tif.aux.xml')\n except OSError:\n try:\n os.stat('tmp/tw_34.tif.aux.xml')\n except OSError:\n pytest.fail('No .aux.xml file.')\n\n ds = gdal.Open('tmp/tw_34.tif')\n\n md = ds.GetMetadata()\n assert 'test' not in md, 'Metadata written to GeoTIFF file.'\n\n md = ds.GetRasterBand(1).GetMetadata()\n assert 'testBand' not in md, 'Metadata written to GeoTIFF file.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tw_34.tif')\n\n###############################################################################\n# Test fallback from internal storage of Geotiff metadata to PAM storage\n# when metadata is too big to fit into the GDALGeotiff tag\n\n\ndef test_tiff_write_35():\n\n # I've no idea why this works, and why this rolled in a\n # loop doesn't work... Python gurus please fix that !\n big_string = 'a'\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n big_string = big_string + big_string\n\n ds = gdaltest.tiff_drv.Create('tmp/tw_35.tif', 1, 1, gdal.GDT_Byte)\n\n md = {}\n md['test'] = big_string\n ds.SetMetadata(md)\n\n md = ds.GetMetadata()\n\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds = None\n gdal.PopErrorHandler()\n\n try:\n os.stat('tmp/tw_35.tif.aux.xml')\n except OSError:\n pytest.fail('No .aux.xml file.')\n\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds = gdal.Open('tmp/tw_35.tif')\n gdal.PopErrorHandler()\n\n md = ds.GetMetadata()\n assert 'test' in md and len(md['test']) == 32768, 'Did not get expected metadata.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tw_35.tif')\n\n###############################################################################\n# Generic functions for the 8 following tests\n\n\ndef tiff_write_big_odd_bits(vrtfilename, tmpfilename, nbits, interleaving):\n ds_in = gdal.Open(vrtfilename)\n\n ds = gdaltest.tiff_drv.CreateCopy(tmpfilename, ds_in, options=['NBITS=' + str(nbits), 'INTERLEAVE=' + interleaving])\n\n ds_in = None\n\n ds = None\n\n ds = gdal.Open(tmpfilename)\n bnd = ds.GetRasterBand(1)\n cs = bnd.Checksum()\n assert cs == 4672, 'Didnt get expected checksum on band 1'\n md = bnd.GetMetadata('IMAGE_STRUCTURE')\n assert md['NBITS'] == str(nbits), 'Didnt get expected NBITS value'\n\n bnd = ds.GetRasterBand(2)\n assert bnd.Checksum() == 4672, 'Didnt get expected checksum on band 2'\n bnd = ds.GetRasterBand(3)\n assert bnd.Checksum() == 4672, 'Didnt get expected checksum on band 3'\n bnd = None\n\n md = ds.GetMetadata('IMAGE_STRUCTURE')\n assert md['INTERLEAVE'] == interleaving, 'Didnt get expected interleaving'\n\n ds = None\n\n gdaltest.tiff_drv.Delete(tmpfilename)\n\n\n###############################################################################\n# Test copy with NBITS=9, INTERLEAVE=PIXEL\n\ndef test_tiff_write_36():\n return tiff_write_big_odd_bits('data/uint16_3band.vrt', 'tmp/tw_36.tif', 9, 'PIXEL')\n\n\n###############################################################################\n# Test copy with NBITS=9, INTERLEAVE=BAND\n\ndef test_tiff_write_37():\n return tiff_write_big_odd_bits('data/uint16_3band.vrt', 'tmp/tw_37.tif', 9, 'BAND')\n\n###############################################################################\n# Test copy with NBITS=12, INTERLEAVE=PIXEL\n\n\ndef test_tiff_write_38():\n return tiff_write_big_odd_bits('data/uint16_3band.vrt', 'tmp/tw_38.tif', 12, 'PIXEL')\n\n###############################################################################\n# Test copy with NBITS=12, INTERLEAVE=BAND\n\n\ndef test_tiff_write_39():\n return tiff_write_big_odd_bits('data/uint16_3band.vrt', 'tmp/tw_39.tif', 12, 'BAND')\n\n###############################################################################\n# Test copy with NBITS=17, INTERLEAVE=PIXEL\n\n\ndef test_tiff_write_40():\n return tiff_write_big_odd_bits('data/uint32_3band.vrt', 'tmp/tw_40tif', 17, 'PIXEL')\n\n###############################################################################\n# Test copy with NBITS=17, INTERLEAVE=BAND\n\n\ndef test_tiff_write_41():\n return tiff_write_big_odd_bits('data/uint32_3band.vrt', 'tmp/tw_41.tif', 17, 'BAND')\n\n###############################################################################\n# Test copy with NBITS=24, INTERLEAVE=PIXEL\n\n\ndef test_tiff_write_42():\n return tiff_write_big_odd_bits('data/uint32_3band.vrt', 'tmp/tw_42.tif', 24, 'PIXEL')\n\n###############################################################################\n# Test copy with NBITS=24, INTERLEAVE=BAND\n\n\ndef test_tiff_write_43():\n return tiff_write_big_odd_bits('data/uint32_3band.vrt', 'tmp/tw_43.tif', 24, 'BAND')\n\n\n###############################################################################\n# Test create with NBITS=9 and preservation through CreateCopy of NBITS\n\ndef test_tiff_write_44():\n\n ds = gdaltest.tiff_drv.Create('tmp/tw_44.tif', 1, 1, 1, gdal.GDT_UInt16, options=['NBITS=9'])\n ds = None\n ds = gdal.Open('tmp/tw_44.tif')\n bnd = ds.GetRasterBand(1)\n md = bnd.GetMetadata('IMAGE_STRUCTURE')\n bnd = None\n assert md['NBITS'] == '9', 'Didnt get expected NBITS value'\n\n ds2 = gdaltest.tiff_drv.CreateCopy('tmp/tw_44_copy.tif', ds)\n ds2 = None\n\n ds2 = gdal.Open('tmp/tw_44_copy.tif')\n bnd = ds2.GetRasterBand(1)\n md = bnd.GetMetadata('IMAGE_STRUCTURE')\n bnd = None\n assert md['NBITS'] == '9', 'Didnt get expected NBITS value'\n\n ds = None\n ds2 = None\n\n gdaltest.tiff_drv.Delete('tmp/tw_44.tif')\n gdaltest.tiff_drv.Delete('tmp/tw_44_copy.tif')\n\n###############################################################################\n# Test create with NBITS=17 and preservation through CreateCopy of NBITS\n\n\ndef test_tiff_write_45():\n\n ds = gdaltest.tiff_drv.Create('tmp/tw_45.tif', 1, 1, 1, gdal.GDT_UInt32, options=['NBITS=17'])\n ds = None\n ds = gdal.Open('tmp/tw_45.tif')\n bnd = ds.GetRasterBand(1)\n md = bnd.GetMetadata('IMAGE_STRUCTURE')\n bnd = None\n assert md['NBITS'] == '17', 'Didnt get expected NBITS value'\n\n ds2 = gdaltest.tiff_drv.CreateCopy('tmp/tw_45_copy.tif', ds)\n ds2 = None\n\n ds2 = gdal.Open('tmp/tw_45_copy.tif')\n bnd = ds2.GetRasterBand(1)\n md = bnd.GetMetadata('IMAGE_STRUCTURE')\n bnd = None\n assert md['NBITS'] == '17', 'Didnt get expected NBITS value'\n\n ds = None\n ds2 = None\n\n gdaltest.tiff_drv.Delete('tmp/tw_45.tif')\n gdaltest.tiff_drv.Delete('tmp/tw_45_copy.tif')\n\n\n###############################################################################\n# Test correct round-tripping of ReadBlock/WriteBlock\n\ndef test_tiff_write_46():\n import struct\n\n with gdaltest.SetCacheMax(0):\n\n ds = gdaltest.tiff_drv.Create(\"tmp/tiff_write_46_1.tif\", 10, 10, 1, options=['NBITS=1'])\n ds.GetRasterBand(1).Fill(0)\n\n ds2 = gdaltest.tiff_drv.Create(\"tmp/tiff_write_46_2.tif\", 10, 10, 1, options=['NBITS=1'])\n ds2.GetRasterBand(1).Fill(1)\n ones = ds2.ReadRaster(0, 0, 10, 1)\n\n # Load the working block\n data = ds.ReadRaster(0, 0, 10, 1)\n\n # Write the working bloc\n ds.WriteRaster(0, 0, 10, 1, ones)\n\n # This will discard the cached block for ds\n ds3 = gdaltest.tiff_drv.Create(\"tmp/tiff_write_46_3.tif\", 10, 10, 1)\n ds3.GetRasterBand(1).Fill(1)\n\n # Load the working block again\n data = ds.ReadRaster(0, 0, 10, 1)\n\n # We expect (1, 1, 1, 1, 1, 1, 1, 1, 1, 1)\n got = struct.unpack('B' * 10, data)\n for g in got:\n assert g == 1, got\n\n ds = None\n ds2 = None\n ds3 = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_46_1.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_46_2.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_46_3.tif')\n\n###############################################################################\n# Test #2457\n\n\ndef test_tiff_write_47():\n\n with gdaltest.SetCacheMax(0):\n ret = test_tiff_write_3()\n return ret\n\n\n###############################################################################\n# Test #2457 with nYOff of RasterIO not aligned on the block height\n\ndef test_tiff_write_48():\n\n with gdaltest.SetCacheMax(0):\n\n src_ds = gdal.Open('data/utmsmall.tif')\n new_ds = gdal.GetDriverByName(\"GTiff\").Create('tmp/tiff_write_48.tif', 100, 100, 1, options=['TILED=YES', 'BLOCKXSIZE=96', 'BLOCKYSIZE=96'])\n data = src_ds.ReadRaster(0, 0, 100, 1)\n data2 = src_ds.ReadRaster(0, 1, 100, 99)\n new_ds.WriteRaster(0, 1, 100, 99, data2)\n new_ds.WriteRaster(0, 0, 100, 1, data)\n new_ds = None\n\n new_ds = None\n new_ds = gdal.Open('tmp/tiff_write_48.tif')\n assert new_ds.GetRasterBand(1).Checksum() == 50054, 'Didnt get expected checksum '\n\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_48.tif')\n\n\n###############################################################################\n# Test copying a CMYK TIFF into another CMYK TIFF\n\ndef test_tiff_write_49():\n\n # We open the source as RAW to get the CMYK bands\n src_ds = gdal.Open('GTIFF_RAW:data/rgbsmall_cmyk.tif')\n\n new_ds = gdal.GetDriverByName(\"GTiff\").CreateCopy('tmp/tiff_write_49.tif', src_ds, options=['PHOTOMETRIC=CMYK'])\n\n # At this point, for the purpose of the copy, the dataset will have been opened as RAW\n assert new_ds.GetRasterBand(1).GetRasterColorInterpretation() == gdal.GCI_CyanBand, \\\n 'Wrong color interpretation.'\n\n new_ds = None\n\n new_ds = gdal.Open('GTIFF_RAW:tmp/tiff_write_49.tif')\n\n for i in range(4):\n assert new_ds.GetRasterBand(i + 1).Checksum() == src_ds.GetRasterBand(i + 1).Checksum(), \\\n 'Didnt get expected checksum '\n\n src_ds = None\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_49.tif')\n\n\n###############################################################################\n# Test creating a CMYK TIFF from another CMYK TIFF\n\ndef test_tiff_write_50():\n\n # We open the source as RAW to get the CMYK bands\n src_ds = gdal.Open('GTIFF_RAW:data/rgbsmall_cmyk.tif')\n\n new_ds = gdal.GetDriverByName(\"GTiff\").Create('tmp/tiff_write_50.tif', src_ds.RasterXSize, src_ds.RasterYSize, 4, options=['PHOTOMETRIC=CMYK'])\n for i in range(4):\n data = src_ds.GetRasterBand(i + 1).ReadRaster(0, 0, src_ds.RasterXSize, src_ds.RasterYSize)\n new_ds.GetRasterBand(i + 1).WriteRaster(0, 0, src_ds.RasterXSize, src_ds.RasterYSize, data)\n\n assert new_ds.GetRasterBand(1).GetRasterColorInterpretation() == gdal.GCI_CyanBand, \\\n 'Wrong color interpretation.'\n\n new_ds = None\n\n new_ds = gdal.Open('GTIFF_RAW:tmp/tiff_write_50.tif')\n\n for i in range(4):\n assert new_ds.GetRasterBand(i + 1).Checksum() == src_ds.GetRasterBand(i + 1).Checksum(), \\\n 'Didnt get expected checksum '\n\n src_ds = None\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_50.tif')\n\n\n###############################################################################\n# Test proper clearing of existing GeoTIFF tags when updating the projection.\n# http://trac.osgeo.org/gdal/ticket/2546\n\ndef test_tiff_write_51():\n shutil.copyfile('data/utmsmall.tif', 'tmp/tiff_write_51.tif')\n\n ds = gdal.Open('tmp/tiff_write_51.tif', gdal.GA_Update)\n\n srs = osr.SpatialReference()\n srs.SetFromUserInput('EPSG:32601')\n ds.SetProjection(srs.ExportToWkt())\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_51.tif')\n wkt = ds.GetProjection()\n ds = None\n\n # Create a new GeoTIFF file with same projection\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_51_ref.tif', 1, 1, 1)\n ds.SetProjection(srs.ExportToWkt())\n ds = None\n\n # Read it back as the reference WKT\n ds = gdal.Open('tmp/tiff_write_51_ref.tif')\n expected_wkt = ds.GetProjection()\n ds = None\n\n assert wkt.find('NAD') == -1 and wkt.find('North Am') == -1, \\\n 'It appears the NAD27 datum was not properly cleared.'\n\n assert wkt == expected_wkt and wkt.find('WGS 84 / UTM zone 1N') != -1, \\\n 'coordinate system does not exactly match.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_51.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_51_ref.tif')\n\n###############################################################################\n# Test the ability to update a paletted TIFF files color table.\n\n\ndef test_tiff_write_52():\n shutil.copyfile('data/test_average_palette.tif', 'tmp/tiff_write_52.tif')\n\n test_ct_data = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 255, 0)]\n\n test_ct = gdal.ColorTable()\n for i, data in enumerate(test_ct_data):\n test_ct.SetColorEntry(i, data)\n\n ds = gdal.Open('tmp/tiff_write_52.tif', gdal.GA_Update)\n ds.GetRasterBand(1).SetRasterColorTable(test_ct)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_52.tif')\n ct = ds.GetRasterBand(1).GetRasterColorTable()\n\n assert ct.GetColorEntry(0) == (255, 0, 0, 255), 'Did not get expected color 0.'\n\n ct = None\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_52.tif')\n\n###############################################################################\n# Test the ability to create a paletted image and then update later.\n\n\ndef test_tiff_write_53():\n test_ct_data = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 255, 0)]\n\n test_ct = gdal.ColorTable()\n for i, data in enumerate(test_ct_data):\n test_ct.SetColorEntry(i, data)\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_53.tif',\n 30, 50, 1,\n options=['PHOTOMETRIC=PALETTE'])\n ds.GetRasterBand(1).Fill(10)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_53.tif', gdal.GA_Update)\n ds.GetRasterBand(1).SetRasterColorTable(test_ct)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_53.tif')\n ct = ds.GetRasterBand(1).GetRasterColorTable()\n\n assert ct.GetColorEntry(0) == (255, 0, 0, 255), 'Did not get expected color 0.'\n\n ct = None\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_53.tif')\n\n\n###############################################################################\n# Same as before except we create an overview before reopening the file and\n# adding the color table\n\ndef test_tiff_write_53_bis():\n test_ct_data = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 255, 0)]\n\n test_ct = gdal.ColorTable()\n for i, data in enumerate(test_ct_data):\n test_ct.SetColorEntry(i, data)\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_53_bis.tif',\n 30, 50, 1,\n options=['PHOTOMETRIC=PALETTE'])\n ds.GetRasterBand(1).Fill(10)\n ds.BuildOverviews('NONE', overviewlist=[2])\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_53_bis.tif', gdal.GA_Update)\n ds.GetRasterBand(1).SetRasterColorTable(test_ct)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_53_bis.tif')\n ct = ds.GetRasterBand(1).GetRasterColorTable()\n\n assert ct.GetColorEntry(0) == (255, 0, 0, 255), 'Did not get expected color 0.'\n\n ct = None\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_53_bis.tif')\n\n###############################################################################\n# Test the ability to create a JPEG compressed TIFF, with PHOTOMETRIC=YCBCR\n# and write data into it without closing it and re-opening it (#2645)\n\n\ndef test_tiff_write_54():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_54.tif',\n 256, 256, 3,\n options=['TILED=YES', 'COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR'])\n ds.GetRasterBand(1).Fill(255)\n ds.FlushCache()\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_54.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_54.tif')\n\n assert cs != 0, 'did not get expected checksum'\n\n\n###############################################################################\n# Test creating and reading an equirectangular file with all parameters (#2706)\n\ndef test_tiff_write_55():\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_55.tif',\n 256, 256, 1)\n srs_expected = 'PROJCS[\"Equirectangular Mars\",GEOGCS[\"GCS_Mars\",DATUM[\"unknown\",SPHEROID[\"unnamed\",3394813.857975945,0]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]],PROJECTION[\"Equirectangular\"],PARAMETER[\"latitude_of_origin\",-2],PARAMETER[\"central_meridian\",184.4129943847656],PARAMETER[\"standard_parallel_1\",-15],PARAMETER[\"false_easting\",0],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]]]'\n\n ds.SetProjection(srs_expected)\n\n ds.SetGeoTransform((100, 1, 0, 200, 0, -1))\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_55.tif')\n srs = ds.GetProjectionRef()\n ds = None\n\n assert srs == srs_expected, \\\n 'failed to preserve Equirectangular projection as expected, old libgeotiff?'\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_55.tif')\n\n###############################################################################\n# Test clearing the colormap from an existing paletted TIFF file.\n\n\ndef test_tiff_write_56():\n\n md = gdaltest.tiff_drv.GetMetadata()\n # Expected to fail with libtiff < 4.0 as it needs TIFFUnsetField, so skip it\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n test_ct_data = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 255, 0)]\n\n test_ct = gdal.ColorTable()\n for i, data in enumerate(test_ct_data):\n test_ct.SetColorEntry(i, data)\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_56.tif',\n 30, 50, 1,\n options=['PHOTOMETRIC=PALETTE'])\n ds.GetRasterBand(1).Fill(10)\n ds = None\n\n test_ct = gdal.ColorTable()\n\n ds = gdal.Open('tmp/tiff_write_56.tif', gdal.GA_Update)\n ds.GetRasterBand(1).SetRasterColorTable(test_ct)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_56.tif')\n ct = ds.GetRasterBand(1).GetRasterColorTable()\n\n assert ct is None, 'color table seemingly not cleared.'\n\n ct = None\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_56.tif')\n\n###############################################################################\n# Test replacing normal norm up georef with rotated georef (#2625)\n\n\ndef test_tiff_write_57():\n\n md = gdaltest.tiff_drv.GetMetadata()\n # Expected to fail with libtiff < 4.0 as it needs TIFFUnsetField, so skip it\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n # copy a file to tmp dir to modify.\n open('tmp/tiff57.tif', 'wb').write(open('data/byte.tif', 'rb').read())\n\n # open and set a non-northup geotransform.\n\n ds = gdal.Open('tmp/tiff57.tif', gdal.GA_Update)\n ds.SetGeoTransform([100, 1, 3, 200, 3, 1])\n ds = None\n\n ds = gdal.Open('tmp/tiff57.tif')\n gt = ds.GetGeoTransform()\n ds = None\n\n assert gt == (100, 1, 3, 200, 3, 1), \\\n 'did not get expected geotransform, perhaps unset is not working?'\n\n gdaltest.tiff_drv.Delete('tmp/tiff57.tif')\n\n###############################################################################\n# Test writing partial end strips (#2748)\n\n\ndef test_tiff_write_58():\n\n md = gdaltest.tiff_drv.GetMetadata()\n\n for compression in ('NONE', 'JPEG', 'LZW', 'DEFLATE', 'PACKBITS'):\n\n if md['DMD_CREATIONOPTIONLIST'].find(compression) != -1:\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_58.tif', 4, 4000, 1, options=['COMPRESS=' + compression])\n ds.GetRasterBand(1).Fill(255)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_58.tif')\n assert ds.GetRasterBand(1).Checksum() == 65241, 'wrong checksum'\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_58.tif')\n else:\n print(('Skipping compression method %s' % compression))\n\n \n###############################################################################\n# Test fix for #2759\n\n\ndef test_tiff_write_59():\n import struct\n\n ret = 'success'\n\n for nbands in (1, 2):\n for nbits in (1, 8, 9, 12, 16, 17, 24, 32):\n\n if nbits <= 8:\n gdal_type = gdal.GDT_Byte\n ctype = 'B'\n elif nbits <= 16:\n gdal_type = gdal.GDT_UInt16\n ctype = 'h'\n else:\n gdal_type = gdal.GDT_UInt32\n ctype = 'i'\n\n ds = gdaltest.tiff_drv.Create(\"tmp/tiff_write_59.tif\", 10, 10, nbands, gdal_type, options=['NBITS=%d' % nbits])\n ds.GetRasterBand(1).Fill(1)\n\n ds = None\n ds = gdal.Open(\"tmp/tiff_write_59.tif\", gdal.GA_Update)\n\n data = struct.pack(ctype * 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)\n ds.GetRasterBand(1).WriteRaster(0, 0, 10, 1, data)\n\n ds = None\n ds = gdal.Open(\"tmp/tiff_write_59.tif\")\n\n data = ds.GetRasterBand(1).ReadRaster(0, 0, 10, 1)\n\n # We expect zeros\n got = struct.unpack(ctype * 10, data)\n for g in got:\n if g != 0:\n print(('nbands=%d, NBITS=%d' % (nbands, nbits)))\n print(got)\n ret = 'fail'\n break\n\n ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_59.tif')\n\n return ret\n\n###############################################################################\n# Test fix for #2760\n\n\ndef test_tiff_write_60():\n\n tuples = [('TFW=YES', 'tmp/tiff_write_60.tfw'),\n ('WORLDFILE=YES', 'tmp/tiff_write_60.wld')]\n\n for options_tuple in tuples:\n # Create case\n with gdaltest.error_handler():\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_60.tif', 10, 10, options=[options_tuple[0], 'PROFILE=BASELINE'])\n gt = (0.0, 1.0, 0.0, 50.0, 0.0, -1.0)\n ds.SetGeoTransform(gt)\n ds = None\n\n with gdaltest.error_handler():\n ds = gdal.Open('tmp/tiff_write_60.tif')\n assert ds.GetGeoTransform() == gt, ('case1: %s != %s' % (ds.GetGeoTransform(), gt))\n\n ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_60.tif')\n\n with pytest.raises(OSError, message='%s should have been deleted' % options_tuple[1]):\n os.stat(options_tuple[1])\n \n\n # CreateCopy case\n src_ds = gdal.Open('data/byte.tif')\n with gdaltest.error_handler():\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_60.tif', src_ds, options=[options_tuple[0], 'PROFILE=BASELINE'])\n gt = (0.0, 1.0, 0.0, 50.0, 0.0, -1.0)\n ds.SetGeoTransform(gt)\n ds = None\n gdal.Unlink('tmp/tiff_write_60.tif.aux.xml')\n\n ds = gdal.Open('tmp/tiff_write_60.tif')\n assert ds.GetGeoTransform() == gt, \\\n ('case2: %s != %s' % (ds.GetGeoTransform(), gt))\n\n ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_60.tif')\n\n with pytest.raises(OSError, message='%s should have been deleted' % options_tuple[1]):\n os.stat(options_tuple[1])\n \n\n \n###############################################################################\n# Test BigTIFF=IF_NEEDED creation option\n\n\ndef test_tiff_write_61():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n ds = gdaltest.tiff_drv.Create('tmp/bigtiff.tif', 50000, 50000, 1,\n options=['BIGTIFF=IF_NEEDED', 'SPARSE_OK=TRUE'])\n ds = None\n\n ds = gdal.Open('tmp/bigtiff.tif')\n assert ds is not None\n ds = None\n\n fileobj = open('tmp/bigtiff.tif', mode='rb')\n binvalues = array.array('b')\n binvalues.fromfile(fileobj, 4)\n fileobj.close()\n\n gdaltest.tiff_drv.Delete('tmp/bigtiff.tif')\n\n # Check classical TIFF signature\n assert (not ((binvalues[2] != 0x2A or binvalues[3] != 0) and\n (binvalues[3] != 0x2A or binvalues[2] != 0)))\n\n###############################################################################\n# Test BigTIFF=IF_SAFER creation option\n\n\ndef test_tiff_write_62():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n ds = gdaltest.tiff_drv.Create('tmp/bigtiff.tif', 50000, 50000, 1,\n options=['BIGTIFF=IF_SAFER', 'SPARSE_OK=TRUE'])\n ds = None\n\n ds = gdal.Open('tmp/bigtiff.tif')\n assert ds is not None\n ds = None\n\n fileobj = open('tmp/bigtiff.tif', mode='rb')\n binvalues = array.array('b')\n binvalues.fromfile(fileobj, 4)\n fileobj.close()\n\n gdaltest.tiff_drv.Delete('tmp/bigtiff.tif')\n\n # Check BigTIFF signature\n assert (not ((binvalues[2] != 0x2B or binvalues[3] != 0) and\n (binvalues[3] != 0x2B or binvalues[2] != 0)))\n\n###############################################################################\n# Test BigTIFF=NO creation option when creating a BigTIFF file would be required\n\n\ndef test_tiff_write_63():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n if int(gdal.VersionInfo('VERSION_NUM')) < 1700:\n pytest.skip()\n\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds = gdaltest.tiff_drv.Create('tmp/bigtiff.tif', 150000, 150000, 1,\n options=['BIGTIFF=NO'])\n gdal.PopErrorHandler()\n\n if ds is None:\n return\n\n pytest.fail()\n\n###############################################################################\n# Test returned projection in WKT format for a WGS84 GeoTIFF (#2787)\n\n\ndef test_tiff_write_64():\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_64.tif', 1, 1, 1)\n srs = osr.SpatialReference()\n srs.SetFromUserInput('WGS84')\n ds.SetProjection(srs.ExportToWkt())\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_64.tif')\n wkt = ds.GetProjection()\n ds = None\n\n expected_wkt = \"\"\"GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",SPHEROID[\"WGS 84\",6378137,298.257223563,AUTHORITY[\"EPSG\",\"7030\"]],AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433],AUTHORITY[\"EPSG\",\"4326\"]]\"\"\"\n\n assert wkt == expected_wkt, 'coordinate system does not exactly match.'\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_64.tif')\n\n###############################################################################\n# Verify that we can write XML metadata.\n\n\ndef test_tiff_write_65():\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_65.tif', 10, 10)\n\n doc = '<doc><test xml:attr=\"abc\"/></doc>'\n ds.SetMetadata([doc], 'xml:test')\n\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_65.tif')\n md = ds.GetMetadata('xml:test')\n ds = None\n\n assert len(md) == 1 and md[0] == doc, 'did not get xml back clean'\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_65.tif')\n\n\n###############################################################################\n# Verify that we can write and read a band-interleaved GeoTIFF with 65535 bands (#2838)\n\ndef test_tiff_write_66():\n\n if gdal.GetConfigOption('SKIP_MEM_INTENSIVE_TEST') is not None:\n pytest.skip()\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_66.tif', 1, 1, 65535, options=['INTERLEAVE=BAND'])\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_66.tif')\n assert ds.RasterCount == 65535\n\n assert ds.GetRasterBand(1).Checksum() == 0\n\n assert ds.GetRasterBand(65535).Checksum() == 0\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_66.tif')\n\n\n###############################################################################\n# Verify that we can write and read a pixel-interleaved GeoTIFF with 65535 bands (#2838)\n\ndef test_tiff_write_67():\n\n if gdal.GetConfigOption('SKIP_MEM_INTENSIVE_TEST') is not None:\n pytest.skip()\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_67.tif', 1, 1, 65535, options=['INTERLEAVE=PIXEL'])\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_67.tif')\n assert ds.RasterCount == 65535\n\n assert ds.GetRasterBand(1).Checksum() == 0\n\n assert ds.GetRasterBand(65535).Checksum() == 0\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_67.tif')\n\n###############################################################################\n# Verify that we can set the color table after a Create() (scenario hit by map.tif in #2820)\n\n\ndef test_tiff_write_68():\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_68.tif', 151, 161, options=['COMPRESS=LZW'])\n ct = gdal.ColorTable()\n ct.SetColorEntry(0, (255, 255, 255, 255))\n ct.SetColorEntry(1, (255, 255, 0, 255))\n ct.SetColorEntry(2, (255, 0, 255, 255))\n ct.SetColorEntry(3, (0, 255, 255, 255))\n ds.GetRasterBand(1).SetRasterColorTable(ct)\n ds.GetRasterBand(1).Fill(255)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_68.tif')\n assert ds.GetRasterBand(1).Checksum() != 0\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_68.tif')\n\n###############################################################################\n# Verify GTiffRasterBand::NullBlock() when reading empty block without any nodata value set\n\n\ndef test_tiff_write_69():\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_69.tif', 32, 32, 1, gdal.GDT_Int16, options=['SPARSE_OK=YES'])\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_69.tif')\n assert ds.GetRasterBand(1).Checksum() == 0\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_69.tif')\n\n###############################################################################\n# Verify GTiffRasterBand::NullBlock() when reading empty block with nodata value set\n\n\ndef test_tiff_write_70():\n\n ref_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_70_ref.tif', 32, 32, 1, gdal.GDT_Int16)\n ref_ds.GetRasterBand(1).Fill(-32768)\n ref_ds = None\n\n ref_ds = gdal.Open('tmp/tiff_write_70_ref.tif')\n expected_cs = ref_ds.GetRasterBand(1).Checksum()\n ref_ds = None\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_70.tif', 32, 32, 1, gdal.GDT_Int16, options=['SPARSE_OK=YES'])\n ds.GetRasterBand(1).SetNoDataValue(0)\n assert os.stat('tmp/tiff_write_70.tif').st_size <= 8, \\\n 'directory should not be crystallized'\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_70.tif', gdal.GA_Update)\n ds.GetRasterBand(1).SetNoDataValue(-32768)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_70.tif')\n assert ds.GetRasterBand(1).Checksum() == expected_cs, 'wrong checksum'\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_70.tif', gdal.GA_Update)\n assert ds.GetRasterBand(1).DeleteNoDataValue() == 0\n assert ds.GetRasterBand(1).GetNoDataValue() is None\n ds = None\n\n with pytest.raises(OSError):\n os.stat('tmp/tiff_write_70.tif.aux.xml')\n \n\n ds = gdal.Open('tmp/tiff_write_70.tif')\n assert ds.GetRasterBand(1).GetNoDataValue() is None\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_70.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_70_ref.tif')\n\n\n###############################################################################\n# Test reading in a real BigTIFF file (on filesystems supporting sparse files)\n\ndef test_tiff_write_71():\n\n import struct\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n # Determine if the filesystem supports sparse files (we don't want to create a real 10 GB\n # file !\n if not gdaltest.filesystem_supports_sparse_files('tmp'):\n pytest.skip()\n\n header = open('data/bigtiff_header_extract.tif', 'rb').read()\n\n f = open('tmp/tiff_write_71.tif', 'wb')\n f.write(header)\n\n # Write StripByteCounts tag\n # 100,000 in little endian\n for _ in range(100000):\n f.write(b'\\xa0\\x86\\x01\\x00\\x00\\x00\\x00\\x00')\n\n # Write StripOffsets tag\n offset = 1600252\n for _ in range(100000):\n f.write(struct.pack('<Q', offset))\n offset = offset + 100000\n\n # Write 0x78 as value of pixel (99999, 99999)\n f.seek(10001600252 - 1, 0)\n f.write(b'\\x78')\n f.close()\n\n ds = gdal.Open('tmp/tiff_write_71.tif')\n data = ds.GetRasterBand(1).ReadRaster(99999, 99999, 1, 1)\n assert struct.unpack('b', data)[0] == 0x78\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_71.tif')\n\n###############################################################################\n# With CreateCopy(), check that TIFF directory is in the first bytes of the file\n# and has not been rewritten later (#3021)\n\n\ndef test_tiff_write_72():\n\n shutil.copyfile('data/byte.tif', 'tmp/byte.tif')\n ds = gdal.Open('tmp/byte.tif', gdal.GA_Update)\n ds.SetMetadata({'TEST_KEY': 'TestValue'})\n ds = None\n\n for profile in ('GDALGeotiff', 'GEOTIFF', 'BASELINE'):\n src_ds = gdal.Open('tmp/byte.tif')\n out_ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_72.tif', src_ds, options=['ENDIANNESS=LITTLE', 'PROFILE=' + profile])\n del out_ds\n src_ds = None\n\n fileobj = open('tmp/tiff_write_72.tif', mode='rb')\n binvalues = array.array('b')\n fileobj.seek(4)\n try:\n binvalues.fromfile(fileobj, 4)\n except:\n binvalues.fromfile(fileobj, 4)\n fileobj.close()\n\n # Directory should be at offset 8 of the file\n assert (binvalues[0] == 0x08 and binvalues[1] == 0x00 and binvalues[2] == 0x00 and binvalues[3] == 0x00), \\\n ('Failed with profile %s' % profile)\n\n gdaltest.tiff_drv.Delete('tmp/byte.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_72.tif')\n\n###############################################################################\n# With Create(), check that TIFF directory is in the first bytes of the file\n# and has not been rewritten later (#3021)\n\n\ndef test_tiff_write_73():\n\n out_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_73.tif', 10, 10, options=['ENDIANNESS=LITTLE'])\n out_ds.SetGeoTransform([1, 0.01, 0, 1, 0, -0.01])\n srs = osr.SpatialReference()\n srs.SetFromUserInput('EPSG:32601')\n out_ds.SetProjection(srs.ExportToWkt())\n out_ds.SetMetadata({'TEST_KEY': 'TestValue'})\n out_ds.BuildOverviews('NONE', [2])\n out_ds.GetRasterBand(1).Fill(255)\n out_ds = None\n\n fileobj = open('tmp/tiff_write_73.tif', mode='rb')\n binvalues = array.array('b')\n fileobj.seek(4)\n try:\n binvalues.fromfile(fileobj, 4)\n except:\n binvalues.fromfile(fileobj, 4)\n fileobj.close()\n\n # Directory should be at offset 8 of the file\n assert (binvalues[0] == 0x08 and binvalues[1] == 0x00 and binvalues[2] == 0x00 and binvalues[3] == 0x00)\n\n # Re-open the file and modify the pixel content\n out_ds = gdal.Open('tmp/tiff_write_73.tif', gdal.GA_Update)\n out_ds.GetRasterBand(1).Fill(0)\n out_ds = None\n\n fileobj = open('tmp/tiff_write_73.tif', mode='rb')\n binvalues = array.array('b')\n fileobj.seek(4)\n try:\n binvalues.fromfile(fileobj, 4)\n except:\n binvalues.fromfile(fileobj, 4)\n fileobj.close()\n\n # Directory should be at offset 8 of the file\n assert (binvalues[0] == 0x08 and binvalues[1] == 0x00 and binvalues[2] == 0x00 and binvalues[3] == 0x00)\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_73.tif')\n\n###############################################################################\n# Verify we can write 12bit jpeg encoded tiff.\n\n\ndef test_tiff_write_74():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n old_accum = gdal.GetConfigOption('CPL_ACCUM_ERROR_MSG', 'OFF')\n gdal.SetConfigOption('CPL_ACCUM_ERROR_MSG', 'ON')\n gdal.ErrorReset()\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n\n try:\n ds = gdal.Open('data/mandrilmini_12bitjpeg.tif')\n ds.GetRasterBand(1).ReadRaster(0, 0, 1, 1)\n except:\n ds = None\n\n gdal.PopErrorHandler()\n gdal.SetConfigOption('CPL_ACCUM_ERROR_MSG', old_accum)\n\n if gdal.GetLastErrorMsg().find(\n 'Unsupported JPEG data precision 12') != -1:\n sys.stdout.write('(12bit jpeg not available) ... ')\n pytest.skip()\n\n for photometric in ('YCBCR', 'RGB'):\n\n drv = gdal.GetDriverByName('GTiff')\n dst_ds = drv.CreateCopy('tmp/test_74.tif', ds,\n options=['COMPRESS=JPEG', 'NBITS=12',\n 'JPEG_QUALITY=95',\n 'PHOTOMETRIC=' + photometric])\n dst_ds = None\n\n dst_ds = gdal.Open('tmp/test_74.tif')\n stats = dst_ds.GetRasterBand(1).GetStatistics(0, 1)\n\n if stats[2] < 2150 or stats[2] > 2180:\n print(photometric)\n pytest.fail('did not get expected mean for band1.')\n\n try:\n compression = dst_ds.GetMetadataItem('COMPRESSION', 'IMAGE_STRUCTURE')\n except:\n md = dst_ds.GetMetadata('IMAGE_STRUCTURE')\n compression = md['COMPRESSION']\n\n if (photometric == 'YCBCR' and compression != 'YCbCr JPEG') or \\\n (photometric == 'RGB' and compression != 'JPEG'):\n print(('COMPRESSION=\"%s\"' % compression))\n pytest.fail('did not get expected COMPRESSION value')\n\n try:\n nbits = dst_ds.GetRasterBand(3).GetMetadataItem('NBITS', 'IMAGE_STRUCTURE')\n except:\n md = dst_ds.GetRasterBand(3).GetMetadata('IMAGE_STRUCTURE')\n nbits = md['NBITS']\n\n if nbits != '12':\n print(photometric)\n pytest.fail('did not get expected NBITS value')\n\n dst_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_74.tif')\n\n \n###############################################################################\n# Verify that FlushCache() alone doesn't cause crash (#3067 )\n\n\ndef test_tiff_write_75():\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_75.tif', 1, 1, 1)\n ds.FlushCache()\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_75.tif')\n\n###############################################################################\n# Test generating a G4 band to use the TIFFWriteScanline()\n\n\ndef test_tiff_write_76():\n\n src_ds = gdal.Open('data/slim_g4.tif')\n compression = src_ds.GetMetadata('IMAGE_STRUCTURE')['COMPRESSION']\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_76.tif', src_ds, options=['BLOCKYSIZE=%d' % src_ds.RasterYSize, 'COMPRESS=' + compression])\n new_ds = None\n new_ds = gdal.Open('tmp/tiff_write_76.tif')\n\n cs = new_ds.GetRasterBand(1).Checksum()\n assert cs == 3322, 'Got wrong checksum'\n\n src_ds = None\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_76.tif')\n\n###############################################################################\n# Test generating & reading a 8bit all-in-one-strip multiband TIFF (#3904)\n\n\ndef test_tiff_write_77():\n\n src_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_77_src.tif', 1, 5000, 3)\n src_ds.GetRasterBand(2).Fill(255)\n\n for interleaving in ('PIXEL', 'BAND'):\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_77.tif', src_ds,\n options=['BLOCKYSIZE=%d' % src_ds.RasterYSize,\n 'COMPRESS=LZW',\n 'INTERLEAVE=' + interleaving])\n\n for attempt in range(2):\n\n # Test reading a few samples to check that random reading works\n band_lines = [(1, 0), (1, 5), (1, 3), (2, 10), (1, 100), (2, 1000), (2, 500),\n (1, 500), (2, 500), (2, 4999), (2, 4999), (3, 4999), (1, 4999)]\n for band_line in band_lines:\n cs = new_ds.GetRasterBand(band_line[0]).Checksum(0, band_line[1], 1, 1)\n if band_line[0] == 2:\n expected_cs = 255 % 7\n else:\n expected_cs = 0 % 7\n assert cs == expected_cs, 'Got wrong checksum'\n\n # Test whole bands\n for i in range(3):\n cs = new_ds.GetRasterBand(i + 1).Checksum()\n expected_cs = src_ds.GetRasterBand(i + 1).Checksum()\n assert cs == expected_cs, 'Got wrong checksum'\n\n if attempt == 0:\n new_ds = None\n new_ds = gdal.Open('tmp/tiff_write_77.tif')\n\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_77.tif')\n\n src_ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_77_src.tif')\n\n###############################################################################\n# Test generating & reading a YCbCr JPEG all-in-one-strip multiband TIFF (#3259)\n\n\ndef test_tiff_write_78():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n src_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_78_src.tif', 16, 2048, 3)\n src_ds.GetRasterBand(2).Fill(255)\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_78.tif', src_ds,\n options=['BLOCKYSIZE=%d' % src_ds.RasterYSize,\n 'COMPRESS=JPEG',\n 'PHOTOMETRIC=YCBCR'])\n\n # Make sure the file is flushed so that we re-read from it rather from cached blocks\n new_ds.FlushCache()\n # new_ds = None\n # new_ds = gdal.Open('tmp/tiff_write_78.tif')\n\n if 'GetBlockSize' in dir(gdal.Band):\n (_, blocky) = new_ds.GetRasterBand(1).GetBlockSize()\n if blocky != 1:\n print('')\n print('using regular band (libtiff <= 3.9.2 or <= 4.0.0beta5, or SplitBand disabled by config option)')\n\n # Test reading a few samples to check that random reading works\n band_lines = [(1, 0), (1, 5), (1, 3), (2, 10), (1, 100), (2, 1000), (2, 500),\n (1, 500), (2, 500), (2, 2047), (2, 2047), (3, 2047), (1, 2047)]\n for band_line in band_lines:\n cs = new_ds.GetRasterBand(band_line[0]).Checksum(0, band_line[1], 1, 1)\n if band_line[0] == 1:\n expected_cs = 0 % 7\n elif band_line[0] == 2:\n expected_cs = 255 % 7\n else:\n # We should expect 0, but due to JPEG YCbCr compression & decompression,\n # this ends up being 1\n expected_cs = 1 % 7\n if cs != expected_cs:\n print(band_line)\n pytest.fail('Got wrong checksum')\n\n # Test whole bands\n for i in range(3):\n cs = new_ds.GetRasterBand(i + 1).Checksum()\n expected_cs = src_ds.GetRasterBand(i + 1).Checksum()\n if i == 2:\n # We should expect 0, but due to JPEG YCbCr compression & decompression,\n # this ends up being 32768\n expected_cs = 32768\n assert cs == expected_cs, 'Got wrong checksum'\n\n new_ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_78.tif')\n\n src_ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_78_src.tif')\n\n###############################################################################\n# Test reading & updating GDALMD_AREA_OR_POINT (#3522)\n\n\ndef test_tiff_write_79():\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_79.tif', 1, 1)\n srs = osr.SpatialReference()\n srs.SetFromUserInput('EPSG:32601')\n ds.SetProjection(srs.ExportToWkt())\n ds = None\n\n for do_projection_ref in [False, True]:\n for check_just_after in [False, True]:\n\n ds = gdal.Open('tmp/tiff_write_79.tif')\n if do_projection_ref:\n ds.GetProjectionRef()\n mdi = ds.GetMetadataItem('AREA_OR_POINT')\n assert mdi == 'Area', \\\n ('(1) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))\n ds = None\n\n # Still read-only.\n ds = gdal.Open('tmp/tiff_write_79.tif')\n if do_projection_ref:\n ds.GetProjectionRef()\n ds.SetMetadataItem('AREA_OR_POINT', 'Point')\n ds = None\n with pytest.raises(OSError, message='got to PAM'):\n # check that it doesn't go to PAM\n os.stat('tmp/tiff_write_79.tif.aux.xml')\n \n\n # So should get 'Area'\n ds = gdal.Open('tmp/tiff_write_79.tif')\n if do_projection_ref:\n ds.GetProjectionRef()\n mdi = ds.GetMetadataItem('AREA_OR_POINT')\n assert mdi == 'Area', \\\n ('(2) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))\n ds = None\n\n # Now update to 'Point'\n ds = gdal.Open('tmp/tiff_write_79.tif', gdal.GA_Update)\n if do_projection_ref:\n ds.GetProjectionRef()\n ds.SetMetadataItem('AREA_OR_POINT', 'Point')\n if check_just_after:\n mdi = ds.GetMetadataItem('AREA_OR_POINT')\n assert mdi == 'Point', \\\n ('(3) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))\n ds = None\n with pytest.raises(OSError, message='got to PAM'):\n # check that it doesn't go to PAM\n os.stat('tmp/tiff_write_79.tif.aux.xml')\n \n\n # Now should get 'Point'\n ds = gdal.Open('tmp/tiff_write_79.tif')\n if do_projection_ref:\n ds.GetProjectionRef()\n mdi = ds.GetMetadataItem('AREA_OR_POINT')\n assert mdi == 'Point', \\\n ('(4) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))\n ds = None\n\n # Now update back to 'Area' through SetMetadata()\n ds = gdal.Open('tmp/tiff_write_79.tif', gdal.GA_Update)\n if do_projection_ref:\n ds.GetProjectionRef()\n md = {}\n md['AREA_OR_POINT'] = 'Area'\n ds.SetMetadata(md)\n if check_just_after:\n mdi = ds.GetMetadataItem('AREA_OR_POINT')\n assert mdi == 'Area', \\\n ('(5) did not get expected value. do_projection_ref = %d, check_just_after = %d' % (do_projection_ref, check_just_after))\n ds = None\n\n # Now should get 'Area'\n ds = gdal.Open('tmp/tiff_write_79.tif')\n if do_projection_ref:\n ds.GetProjectionRef()\n mdi = ds.GetMetadataItem('AREA_OR_POINT')\n assert mdi == 'Area', '(6) did not get expected value'\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_79.tif')\n\n###############################################################################\n# Test SetOffset() & SetScale()\n\n\ndef test_tiff_write_80():\n\n # First part : test storing and retrieving scale & offsets from internal metadata\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_80.tif', 1, 1)\n ds.GetRasterBand(1).SetScale(100)\n ds.GetRasterBand(1).SetOffset(1000)\n ds = None\n\n with pytest.raises(OSError, message='got to PAM, but not expected...'):\n # check that it doesn't go to PAM\n os.stat('tmp/tiff_write_80.tif.aux.xml')\n \n\n ds = gdal.Open('tmp/tiff_write_80.tif')\n scale = ds.GetRasterBand(1).GetScale()\n offset = ds.GetRasterBand(1).GetOffset()\n assert scale == 100 and offset == 1000, \\\n 'did not get expected values in internal case (1)'\n ds = None\n\n # Test CreateCopy()\n src_ds = gdal.Open('tmp/tiff_write_80.tif')\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_80_copy.tif', src_ds)\n src_ds = None\n ds = None\n ds = gdal.Open('tmp/tiff_write_80_copy.tif')\n scale = ds.GetRasterBand(1).GetScale()\n offset = ds.GetRasterBand(1).GetOffset()\n assert scale == 100 and offset == 1000, 'did not get expected values in copy'\n ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_80_copy.tif')\n\n # Second part : test unsetting scale & offsets from internal metadata\n ds = gdal.Open('tmp/tiff_write_80.tif', gdal.GA_Update)\n ds.GetRasterBand(1).SetScale(1)\n ds.GetRasterBand(1).SetOffset(0)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_80.tif')\n scale = ds.GetRasterBand(1).GetScale()\n offset = ds.GetRasterBand(1).GetOffset()\n assert scale == 1 and offset == 0, \\\n 'did not get expected values in internal case (2)'\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_80.tif')\n\n # Third part : test storing and retrieving scale & offsets from PAM metadata\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_80_bis.tif', 1, 1)\n assert ds.GetRasterBand(1).GetScale() is None and ds.GetRasterBand(1).GetOffset() is None, \\\n 'expected None values'\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_80_bis.tif')\n ds.GetRasterBand(1).SetScale(-100)\n ds.GetRasterBand(1).SetOffset(-1000)\n ds = None\n\n try:\n # check that it *goes* to PAM\n os.stat('tmp/tiff_write_80_bis.tif.aux.xml')\n except OSError:\n pytest.fail('did not go to PAM as expected')\n\n ds = gdal.Open('tmp/tiff_write_80_bis.tif')\n scale = ds.GetRasterBand(1).GetScale()\n offset = ds.GetRasterBand(1).GetOffset()\n assert scale == -100 and offset == -1000, \\\n 'did not get expected values in PAM case (1)'\n ds = None\n\n # Fourth part : test unsetting scale & offsets from PAM metadata\n ds = gdal.Open('tmp/tiff_write_80_bis.tif')\n ds.GetRasterBand(1).SetScale(1)\n ds.GetRasterBand(1).SetOffset(0)\n ds = None\n\n with pytest.raises(OSError, message='PAM file should be deleted'):\n # check that there is no more any PAM file\n os.stat('tmp/tiff_write_80_bis.tif.aux.xml')\n \n\n ds = gdal.Open('tmp/tiff_write_80_bis.tif')\n scale = ds.GetRasterBand(1).GetScale()\n offset = ds.GetRasterBand(1).GetOffset()\n assert scale == 1 and offset == 0, 'did not get expected values in PAM case (2)'\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_80_bis.tif')\n\n###############################################################################\n# Test retrieving GCP from PAM\n\n\ndef test_tiff_write_81():\n\n shutil.copyfile('data/byte.tif', 'tmp/tiff_write_81.tif')\n f = open('tmp/tiff_write_81.tif.aux.xml', 'wt')\n f.write(\"\"\"\n<PAMDataset>\n <GCPList Projection=\"PROJCS[&quot;NAD27 / UTM zone 11N&quot;,GEOGCS[&quot;NAD27&quot;,DATUM[&quot;North_American_Datum_1927&quot;,SPHEROID[&quot;Clarke 1866&quot;,6378206.4,294.9786982139006,AUTHORITY[&quot;EPSG&quot;,&quot;7008&quot;]],AUTHORITY[&quot;EPSG&quot;,&quot;6267&quot;]],PRIMEM[&quot;Greenwich&quot;,0],UNIT[&quot;degree&quot;,0.0174532925199433],AUTHORITY[&quot;EPSG&quot;,&quot;4267&quot;]],PROJECTION[&quot;Transverse_Mercator&quot;],PARAMETER[&quot;latitude_of_origin&quot;,0],PARAMETER[&quot;central_meridian&quot;,-117],PARAMETER[&quot;scale_factor&quot;,0.9996],PARAMETER[&quot;false_easting&quot;,500000],PARAMETER[&quot;false_northing&quot;,0],UNIT[&quot;metre&quot;,1,AUTHORITY[&quot;EPSG&quot;,&quot;9001&quot;]],AUTHORITY[&quot;EPSG&quot;,&quot;26711&quot;]]\">\n <GCP Id=\"\" Pixel=\"0.0000\" Line=\"0.0000\" X=\"4.407200000000E+05\" Y=\"3.751320000000E+06\"/>\n <GCP Id=\"\" Pixel=\"100.0000\" Line=\"0.0000\" X=\"4.467200000000E+05\" Y=\"3.751320000000E+06\"/>\n <GCP Id=\"\" Pixel=\"0.0000\" Line=\"100.0000\" X=\"4.407200000000E+05\" Y=\"3.745320000000E+06\"/>\n <GCP Id=\"\" Pixel=\"100.0000\" Line=\"100.0000\" X=\"4.467200000000E+05\" Y=\"3.745320000000E+06\"/>\n </GCPList>\n</PAMDataset>\"\"\")\n f.close()\n\n ds = gdal.Open('tmp/tiff_write_81.tif')\n\n assert (ds.GetGCPProjection().find(\n 'AUTHORITY[\"EPSG\",\"26711\"]') != -1), 'GCP Projection not set properly.'\n\n gcps = ds.GetGCPs()\n assert len(gcps) == 4, 'GCP count wrong.'\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_81.tif')\n\n###############################################################################\n# Test writing & reading a signedbyte 8 bit geotiff\n\n\ndef test_tiff_write_82():\n\n src_ds = gdal.Open('data/byte.tif')\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_82.tif', src_ds, options=['PIXELTYPE=SIGNEDBYTE'])\n src_ds = None\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_82.tif')\n md = ds.GetRasterBand(1).GetMetadata('IMAGE_STRUCTURE')\n assert md['PIXELTYPE'] == 'SIGNEDBYTE', 'did not get SIGNEDBYTE'\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_82.tif')\n\n\n###############################################################################\n# Test writing & reading an indexed GeoTIFF with an extra transparency band (#3547)\n\ndef test_tiff_write_83():\n\n # Test Create() method\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_83.tif', 1, 1, 2)\n ct = gdal.ColorTable()\n ct.SetColorEntry(127, (255, 255, 255, 255))\n ds.GetRasterBand(1).SetRasterColorTable(ct)\n ds.GetRasterBand(1).Fill(127)\n ds.GetRasterBand(2).Fill(255)\n ds = None\n\n # Test CreateCopy() method\n src_ds = gdal.Open('tmp/tiff_write_83.tif')\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_83_2.tif', src_ds)\n src_ds = None\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_83_2.tif')\n ct2 = ds.GetRasterBand(1).GetRasterColorTable()\n assert ct2.GetColorEntry(127) == (255, 255, 255, 255), \\\n 'did not get expected color table'\n ct2 = None\n cs1 = ds.GetRasterBand(1).Checksum()\n assert cs1 == 127 % 7, 'did not get expected checksum for band 1'\n cs2 = ds.GetRasterBand(2).Checksum()\n assert cs2 == 255 % 7, 'did not get expected checksum for band 2'\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_83.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_83_2.tif')\n\n###############################################################################\n# Test propagation of non-standard JPEG quality when the current directory\n# changes in the midst of encoding of tiles (#3539)\n\n\ndef test_tiff_write_84():\n\n md = gdaltest.tiff_drv.GetMetadata()\n\n # Crashes with libtiff < 4.0\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n with gdaltest.SetCacheMax(0):\n ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_84.tif', 128, 128, 3)\n ds = None\n\n try:\n os.remove('tmp/tiff_write_84.tif.ovr')\n except OSError:\n pass\n\n ds = gdal.Open('tmp/tiff_write_84.tif')\n gdal.SetConfigOption('COMPRESS_OVERVIEW', 'JPEG')\n gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', '90')\n ds.BuildOverviews('NEAREST', overviewlist=[2])\n cs = ds.GetRasterBand(2).GetOverview(0).Checksum()\n ds = None\n gdal.SetConfigOption('COMPRESS_OVERVIEW', None)\n gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', None)\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_84.tif')\n\n assert cs == 0, 'did not get expected checksum'\n\n###############################################################################\n# Test SetUnitType()\n\n\ndef test_tiff_write_85():\n\n # First part : test storing and retrieving unittype from internal metadata\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_85.tif', 1, 1)\n ds.GetRasterBand(1).SetUnitType('ft')\n ds = None\n\n with pytest.raises(OSError, message='got to PAM, but not expected...'):\n # check that it doesn't go to PAM\n os.stat('tmp/tiff_write_85.tif.aux.xml')\n \n\n ds = gdal.Open('tmp/tiff_write_85.tif')\n unittype = ds.GetRasterBand(1).GetUnitType()\n assert unittype == 'ft', 'did not get expected values in internal case (1)'\n ds = None\n\n # Test CreateCopy()\n src_ds = gdal.Open('tmp/tiff_write_85.tif')\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_85_copy.tif', src_ds)\n src_ds = None\n ds = None\n ds = gdal.Open('tmp/tiff_write_85_copy.tif')\n unittype = ds.GetRasterBand(1).GetUnitType()\n assert unittype == 'ft', 'did not get expected values in copy'\n ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_85_copy.tif')\n\n # Second part : test unsetting unittype from internal metadata\n ds = gdal.Open('tmp/tiff_write_85.tif', gdal.GA_Update)\n ds.GetRasterBand(1).SetUnitType(None)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_85.tif')\n unittype = ds.GetRasterBand(1).GetUnitType()\n assert unittype == '', 'did not get expected values in internal case (2)'\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_85.tif')\n\n # Third part : test storing and retrieving unittype from PAM metadata\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_85_bis.tif', 1, 1)\n assert not ds.GetRasterBand(1).GetUnitType(), 'expected None values'\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_85_bis.tif')\n ds.GetRasterBand(1).SetUnitType('ft')\n ds = None\n\n try:\n # check that it *goes* to PAM\n os.stat('tmp/tiff_write_85_bis.tif.aux.xml')\n except OSError:\n pytest.fail('did not go to PAM as expected')\n\n ds = gdal.Open('tmp/tiff_write_85_bis.tif')\n unittype = ds.GetRasterBand(1).GetUnitType()\n assert unittype == 'ft', 'did not get expected values in PAM case (1)'\n ds = None\n\n # Fourth part : test unsetting unittype from PAM metadata\n ds = gdal.Open('tmp/tiff_write_85_bis.tif')\n ds.GetRasterBand(1).SetUnitType(None)\n ds = None\n\n with pytest.raises(OSError, message='PAM file should be deleted'):\n # check that there is no more any PAM file\n os.stat('tmp/tiff_write_85_bis.tif.aux.xml')\n \n\n ds = gdal.Open('tmp/tiff_write_85_bis.tif')\n unittype = ds.GetRasterBand(1).GetUnitType()\n assert unittype == '', 'did not get expected values in PAM case (2)'\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_85_bis.tif')\n\n###############################################################################\n# Test special handling of xml:ESRI domain. When the ESRI_XML_PAM config\n# option is set we want to write this to PAM, not into the geotiff itself.\n# This is a special option so that ArcGIS 10 written geotiffs will still work\n# properly with earlier versions of ArcGIS, requested by ESRI.\n\n\ndef test_tiff_write_86():\n\n gdal.SetConfigOption('ESRI_XML_PAM', 'YES')\n\n ds = gdaltest.tiff_drv.Create('tmp/tiff_write_86.tif', 100, 100,\n 1, gdal.GDT_Byte)\n ds.SetMetadata(['<abc></abc>'], 'xml:ESRI')\n ds.SetMetadataItem('BaseTest', 'Value')\n ds = None\n\n # Is the xml:ESRI data available?\n ds = gdal.Open('tmp/tiff_write_86.tif')\n assert ds.GetMetadata('xml:ESRI') == ['<abc />\\n'], \\\n 'did not get expected xml:ESRI metadata.'\n\n if ds.GetMetadataItem('BaseTest') != 'Value':\n gdaltest.post_value('missing metadata(1)')\n pytest.fail()\n ds = None\n\n # After removing the pam file is it gone, but the conventional\n # metadata still available?\n\n os.rename('tmp/tiff_write_86.tif.aux.xml',\n 'tmp/tiff_write_86.tif.aux.xml.hidden')\n\n ds = gdal.Open('tmp/tiff_write_86.tif')\n assert ds.GetMetadata('xml:ESRI') is None, 'unexpectedly got xml:ESRI metadata'\n\n if ds.GetMetadataItem('BaseTest') != 'Value':\n gdaltest.post_value('missing metadata(2)')\n pytest.fail()\n\n ds = None\n\n # now confirm that CreateCopy also preserves things similarly.\n\n os.rename('tmp/tiff_write_86.tif.aux.xml.hidden',\n 'tmp/tiff_write_86.tif.aux.xml')\n\n ds_src = gdal.Open('tmp/tiff_write_86.tif')\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_86_cc.tif', ds_src)\n ds_src = None\n ds = None\n\n # Is the xml:ESRI data available?\n ds = gdal.Open('tmp/tiff_write_86_cc.tif')\n assert ds.GetMetadata('xml:ESRI') == ['<abc />\\n'], \\\n 'did not get expected xml:ESRI metadata (cc).'\n\n if ds.GetMetadataItem('BaseTest') != 'Value':\n gdaltest.post_value('missing metadata(1cc)')\n pytest.fail()\n ds = None\n\n # After removing the pam file is it gone, but the conventional\n # metadata still available?\n\n os.remove('tmp/tiff_write_86_cc.tif.aux.xml')\n\n ds = gdal.Open('tmp/tiff_write_86_cc.tif')\n assert ds.GetMetadata('xml:ESRI') is None, 'unexpectedly got xml:ESRI metadata(2)'\n\n if ds.GetMetadataItem('BaseTest') != 'Value':\n gdaltest.post_value('missing metadata(2cc)')\n pytest.fail()\n\n ds = None\n\n # Cleanup\n\n gdal.SetConfigOption('ESRI_XML_PAM', 'NO')\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_86.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_86_cc.tif')\n\n\n###############################################################################\n# Test COPY_SRC_OVERVIEWS creation option\n\ndef test_tiff_write_87():\n\n gdal.Translate('tmp/tiff_write_87_src.tif', 'data/utmsmall.tif', options='-a_nodata 0')\n\n src_ds = gdal.Open('tmp/tiff_write_87_src.tif', gdal.GA_Update)\n src_ds.BuildOverviews('NEAR', overviewlist=[2, 4])\n expected_cs1 = src_ds.GetRasterBand(1).GetOverview(0).Checksum()\n expected_cs2 = src_ds.GetRasterBand(1).GetOverview(1).Checksum()\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_87_dst.tif', src_ds, options=['COPY_SRC_OVERVIEWS=YES', 'ENDIANNESS=LITTLE'])\n ds = None\n src_ds = None\n\n ds = gdal.Open('tmp/tiff_write_87_dst.tif')\n cs1 = ds.GetRasterBand(1).GetOverview(0).Checksum()\n cs2 = ds.GetRasterBand(1).GetOverview(1).Checksum()\n nodata_ovr_0 = ds.GetRasterBand(1).GetOverview(0).GetNoDataValue()\n nodata_ovr_1 = ds.GetRasterBand(1).GetOverview(1).GetNoDataValue()\n ifd_main = int(ds.GetRasterBand(1).GetMetadataItem('IFD_OFFSET', 'TIFF'))\n ifd_ovr_0 = int(ds.GetRasterBand(1).GetOverview(0).GetMetadataItem('IFD_OFFSET', 'TIFF'))\n ifd_ovr_1 = int(ds.GetRasterBand(1).GetOverview(1).GetMetadataItem('IFD_OFFSET', 'TIFF'))\n data_ovr_1 = int(ds.GetRasterBand(1).GetOverview(1).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF'))\n data_ovr_0 = int(ds.GetRasterBand(1).GetOverview(0).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF'))\n data_main = int(ds.GetRasterBand(1).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF'))\n\n ds = None\n\n import validate_cloud_optimized_geotiff\n try:\n _, errors, _ = validate_cloud_optimized_geotiff.validate('tmp/tiff_write_87_dst.tif', check_tiled=False)\n assert not errors, 'validate_cloud_optimized_geotiff failed'\n except OSError:\n pytest.fail('validate_cloud_optimized_geotiff failed')\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_87_src.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_87_dst.tif')\n\n # Check checksums\n assert cs1 == expected_cs1 and cs2 == expected_cs2, 'did not get expected checksums'\n\n assert nodata_ovr_0 == 0 and nodata_ovr_1 == 0, 'did not get expected nodata values'\n\n assert ifd_main == 8 or(ifd_main < ifd_ovr_0 and ifd_ovr_0 < ifd_ovr_1 and ifd_ovr_1 < data_ovr_1 and data_ovr_1 < data_ovr_0 and data_ovr_0 < data_main)\n\n###############################################################################\n# Test that COPY_SRC_OVERVIEWS creation option has an influence\n# on BIGTIFF creation\n\n\ndef test_tiff_write_88():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n # The file would be > 4.2 GB without SPARSE_OK\n src_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_88_src.tif', 60000, 60000, 1,\n options=['TILED=YES', 'SPARSE_OK=YES'])\n src_ds.BuildOverviews('NONE', overviewlist=[2, 4])\n # Just write one data block so that we can truncate it\n data = src_ds.GetRasterBand(1).GetOverview(1).ReadRaster(0, 0, 128, 128)\n src_ds.GetRasterBand(1).GetOverview(1).WriteRaster(0, 0, 128, 128, data)\n src_ds = None\n\n # Truncate the file to cause an I/O error on reading\n # so that the CreateCopy() aborts quickly\n f = open('tmp/tiff_write_88_src.tif', 'rb')\n f.seek(0, 2)\n length = f.tell()\n f.seek(0, 0)\n data = f.read(length - 1)\n f.close()\n f = open('tmp/tiff_write_88_src.tif', 'wb')\n f.write(data)\n f.close()\n\n src_ds = gdal.Open('tmp/tiff_write_88_src.tif')\n # for testing only. We need to keep the file to check it was a bigtiff\n gdal.SetConfigOption('GTIFF_DELETE_ON_ERROR', 'NO')\n gdal.SetConfigOption('CHECK_DISK_FREE_SPACE', 'NO') # we don't want free space to be an issue here\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_88_dst.tif', src_ds,\n options=['TILED=YES', 'COPY_SRC_OVERVIEWS=YES', 'ENDIANNESS=LITTLE'])\n gdal.PopErrorHandler()\n gdal.SetConfigOption('GTIFF_DELETE_ON_ERROR', None)\n gdal.SetConfigOption('CHECK_DISK_FREE_SPACE', None)\n del ds\n src_ds = None\n\n f = open('tmp/tiff_write_88_dst.tif', 'rb')\n data = f.read(8)\n f.close()\n\n os.remove('tmp/tiff_write_88_src.tif')\n os.remove('tmp/tiff_write_88_dst.tif')\n\n import struct\n ar = struct.unpack('B' * 8, data)\n assert ar[2] == 43, 'not a BIGTIFF file'\n assert ar[4] == 8 and ar[5] == 0 and ar[6] == 0 and ar[7] == 0, \\\n 'first IFD is not at offset 8'\n\n###############################################################################\n# Test JPEG_QUALITY propagation while creating a (default compressed) mask band\n\n\ndef test_tiff_write_89():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n last_size = 0\n for quality in [90, 75, 30]:\n src_ds = gdal.Open('../gdrivers/data/utm.tif')\n\n ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_89.tif', 1024, 1024, 3,\n options=['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'JPEG_QUALITY=%d' % quality])\n\n gdal.SetConfigOption('GDAL_TIFF_INTERNAL_MASK', 'YES')\n ds.CreateMaskBand(gdal.GMF_PER_DATASET)\n gdal.SetConfigOption('GDAL_TIFF_INTERNAL_MASK', None)\n\n data = src_ds.GetRasterBand(1).ReadRaster(0, 0, 512, 512, 1024, 1024)\n ds.GetRasterBand(1).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(2).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(3).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(1).GetMaskBand().Fill(255)\n\n src_ds = None\n ds = None\n\n # older versions of python don't have SEEK_END, add if missing.\n try:\n os.SEEK_END\n except AttributeError:\n os.SEEK_END = 2\n\n f = open('tmp/tiff_write_89.tif', 'rb')\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.close()\n\n # print('quality = %d, size = %d' % (quality, size))\n\n if quality != 90:\n assert size < last_size, 'did not get decreasing file sizes'\n\n last_size = size\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_89.tif')\n\n###############################################################################\n# Test JPEG_QUALITY propagation while creating (internal) overviews\n\n\ndef test_tiff_write_90():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n last_size = 0\n for quality in [90, 75, 30]:\n src_ds = gdal.Open('../gdrivers/data/utm.tif')\n\n ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_90.tif', 1024, 1024, 3,\n options=['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'JPEG_QUALITY=%d' % quality])\n\n data = src_ds.GetRasterBand(1).ReadRaster(0, 0, 512, 512, 1024, 1024)\n ds.GetRasterBand(1).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(2).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(3).WriteRaster(0, 0, 1024, 1024, data)\n ds.BuildOverviews('NEAR', overviewlist=[2, 4])\n\n src_ds = None\n ds = None\n\n f = open('tmp/tiff_write_90.tif', 'rb')\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.close()\n\n # print('quality = %d, size = %d' % (quality, size))\n\n if quality != 90:\n assert size < last_size, 'did not get decreasing file sizes'\n\n last_size = size\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_90.tif')\n\n\n###############################################################################\n# Test JPEG_QUALITY propagation while creating (internal) overviews after re-opening\n\ndef test_tiff_write_91():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n last_size = 0\n for quality in [90, 75, 30]:\n src_ds = gdal.Open('../gdrivers/data/utm.tif')\n\n ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_91.tif', 1024, 1024, 3,\n options=['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'JPEG_QUALITY=%d' % quality])\n\n data = src_ds.GetRasterBand(1).ReadRaster(0, 0, 512, 512, 1024, 1024)\n ds.GetRasterBand(1).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(2).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(3).WriteRaster(0, 0, 1024, 1024, data)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_91.tif', gdal.GA_Update)\n gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', '%d' % quality)\n ds.BuildOverviews('NEAR', overviewlist=[2, 4])\n gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', None)\n\n src_ds = None\n ds = None\n\n f = open('tmp/tiff_write_91.tif', 'rb')\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.close()\n\n # print('quality = %d, size = %d' % (quality, size))\n\n if quality != 90:\n assert size < last_size, 'did not get decreasing file sizes'\n\n last_size = size\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_91.tif')\n\n\n###############################################################################\n# Test the effect of JPEG_QUALITY_OVERVIEW while creating (internal) overviews after re-opening\n# This will test that we correctly guess the quality of the main dataset\n\ndef test_tiff_write_92():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n last_size = 0\n quality = 30\n for jpeg_quality_overview in [False, 30, 40]:\n src_ds = gdal.Open('../gdrivers/data/utm.tif')\n\n ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_92.tif', 1024, 1024, 3,\n options=['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'JPEG_QUALITY=%d' % quality])\n\n data = src_ds.GetRasterBand(1).ReadRaster(0, 0, 512, 512, 1024, 1024)\n ds.GetRasterBand(1).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(2).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(3).WriteRaster(0, 0, 1024, 1024, data)\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_92.tif', gdal.GA_Update)\n if jpeg_quality_overview is not False:\n gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', '%d' % jpeg_quality_overview)\n ds.BuildOverviews('NEAR', overviewlist=[2, 4])\n gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', None)\n\n src_ds = None\n ds = None\n\n f = open('tmp/tiff_write_92.tif', 'rb')\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.close()\n\n # print('quality = %d, size = %d' % (quality, size))\n\n if jpeg_quality_overview == 30:\n assert size == last_size, 'did not get equal file sizes'\n elif jpeg_quality_overview == 40:\n assert size > last_size, 'did not get growing file sizes'\n\n last_size = size\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_92.tif')\n\n###############################################################################\n# Test JPEG_QUALITY_OVERVIEW propagation while creating external overviews\n\n\ndef test_tiff_write_93():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n src_ds = gdal.Open('../gdrivers/data/utm.tif')\n ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_93.tif', 1024, 1024, 3,\n options=['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR'])\n\n data = src_ds.GetRasterBand(1).ReadRaster(0, 0, 512, 512, 1024, 1024)\n ds.GetRasterBand(1).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(2).WriteRaster(0, 0, 1024, 1024, data)\n ds.GetRasterBand(3).WriteRaster(0, 0, 1024, 1024, data)\n ds = None\n\n src_ds = None\n\n last_size = 0\n for quality in [90, 75, 30]:\n\n try:\n os.remove('tmp/tiff_write_93.tif.ovr')\n except OSError:\n pass\n\n ds = gdal.Open('tmp/tiff_write_93.tif')\n gdal.SetConfigOption('COMPRESS_OVERVIEW', 'JPEG')\n gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', '%d' % quality)\n gdal.SetConfigOption('PHOTOMETRIC_OVERVIEW', 'YCBCR')\n ds.BuildOverviews('NEAR', overviewlist=[2, 4])\n gdal.SetConfigOption('COMPRESS_OVERVIEW', None)\n gdal.SetConfigOption('JPEG_QUALITY_OVERVIEW', None)\n gdal.SetConfigOption('PHOTOMETRIC_OVERVIEW', None)\n ds = None\n\n f = open('tmp/tiff_write_93.tif.ovr', 'rb')\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.close()\n\n # print('quality = %d, size = %d' % (quality, size))\n\n if quality != 90:\n assert size < last_size, 'did not get decreasing file sizes'\n\n assert not (quality == 30 and size >= 83000), \\\n 'file larger than expected. should be about 69100. perhaps jpeg quality is not well propagated'\n\n last_size = size\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_93.tif')\n\n\n###############################################################################\n# Test CreateCopy() of a dataset with a mask into a JPEG compressed dataset\n# and check JPEG_QUALITY propagation without warning\n\ndef test_tiff_write_94():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n src_ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_94_src.tif', 1024, 1024, 3)\n gdal.SetConfigOption('GDAL_TIFF_INTERNAL_MASK', 'YES')\n src_ds.CreateMaskBand(gdal.GMF_PER_DATASET)\n gdal.SetConfigOption('GDAL_TIFF_INTERNAL_MASK', None)\n src_ds.GetRasterBand(1).GetMaskBand().WriteRaster(0, 0, 1, 1, '\\xff', 1, 1)\n\n gdal.SetConfigOption('GDAL_TIFF_INTERNAL_MASK', 'YES')\n ds = gdal.GetDriverByName('GTiff').CreateCopy('tmp/tiff_write_94_dst.tif', src_ds,\n options=['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'JPEG_QUALITY=30'])\n gdal.SetConfigOption('GDAL_TIFF_INTERNAL_MASK', None)\n\n src_ds = None\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_94_dst.tif')\n cs = ds.GetRasterBand(1).GetMaskBand().Checksum()\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_94_src.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_94_dst.tif')\n\n assert cs == 3, 'wrong checksum'\n\n###############################################################################\n# Test that COPY_SRC_OVERVIEWS deal well with rounding issues when computing\n# overview levels from the overview size\n\n\ndef test_tiff_write_95():\n\n src_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_95_src.tif', 7171, 6083, options=['SPARSE_OK=YES'])\n src_ds.BuildOverviews('NONE', overviewlist=[2, 4, 8, 16, 32, 64])\n gdal.SetConfigOption('GTIFF_DONT_WRITE_BLOCKS', 'YES')\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_95_dst.tif', src_ds, options=['COPY_SRC_OVERVIEWS=YES'])\n gdal.SetConfigOption('GTIFF_DONT_WRITE_BLOCKS', None)\n ok = ds is not None\n ds = None\n src_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_95_src.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_95_dst.tif')\n\n assert ok\n\n###############################################################################\n# Test that COPY_SRC_OVERVIEWS combined with GDAL_TIFF_INTERNAL_MASK=YES work well\n\n\ndef test_tiff_write_96():\n\n gdal.SetConfigOption('GDAL_TIFF_INTERNAL_MASK', 'YES')\n src_ds = gdaltest.tiff_drv.Create('tmp/tiff_write_96_src.tif', 100, 100)\n src_ds.GetRasterBand(1).Fill(255)\n src_ds.CreateMaskBand(gdal.GMF_PER_DATASET)\n from sys import version_info\n if version_info >= (3, 0, 0):\n exec(\"src_ds.GetRasterBand(1).GetMaskBand().WriteRaster(25,25,50,50,b'\\\\xff',1,1)\")\n else:\n src_ds.GetRasterBand(1).GetMaskBand().WriteRaster(25, 25, 50, 50, '\\xff', 1, 1)\n src_ds.BuildOverviews('NEAR', overviewlist=[2, 4])\n expected_cs = src_ds.GetRasterBand(1).Checksum()\n expected_cs_mask = src_ds.GetRasterBand(1).GetMaskBand().Checksum()\n expected_cs_ovr_1 = src_ds.GetRasterBand(1).GetOverview(0).Checksum()\n expected_cs_ovr_mask_1 = src_ds.GetRasterBand(1).GetOverview(0).GetMaskBand().Checksum()\n expected_cs_ovr_2 = src_ds.GetRasterBand(1).GetOverview(1).Checksum()\n expected_cs_ovr_mask_2 = src_ds.GetRasterBand(1).GetOverview(1).GetMaskBand().Checksum()\n\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_96_dst.tif', src_ds, options=['COPY_SRC_OVERVIEWS=YES'])\n ds = None\n gdal.SetConfigOption('GDAL_TIFF_INTERNAL_MASK', None)\n\n ds = gdal.Open('tmp/tiff_write_96_dst.tif')\n cs = ds.GetRasterBand(1).Checksum()\n cs_mask = ds.GetRasterBand(1).GetMaskBand().Checksum()\n cs_ovr_1 = ds.GetRasterBand(1).GetOverview(0).Checksum()\n cs_ovr_mask_1 = ds.GetRasterBand(1).GetOverview(0).GetMaskBand().Checksum()\n cs_ovr_2 = ds.GetRasterBand(1).GetOverview(1).Checksum()\n cs_ovr_mask_2 = ds.GetRasterBand(1).GetOverview(1).GetMaskBand().Checksum()\n\n ds = None\n src_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_96_src.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_96_dst.tif')\n\n assert [expected_cs, expected_cs_mask, expected_cs_ovr_1, expected_cs_ovr_mask_1, expected_cs_ovr_2, expected_cs_ovr_mask_2] == \\\n [cs, cs_mask, cs_ovr_1, cs_ovr_mask_1, cs_ovr_2, cs_ovr_mask_2], \\\n 'did not get expected checksums'\n\n###############################################################################\n# Create a simple file by copying from an existing one - PixelIsPoint\n\n\ndef test_tiff_write_97():\n\n gdal.SetConfigOption('GTIFF_POINT_GEO_IGNORE', 'FALSE')\n\n src_ds = gdal.Open('data/byte_point.tif')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_97.tif', src_ds)\n\n gt = new_ds.GetGeoTransform()\n md = new_ds.GetMetadataItem('AREA_OR_POINT')\n new_ds = None\n\n gt_expected = (440690.0, 60.0, 0.0, 3751350.0, 0.0, -60.0)\n\n assert gt == gt_expected, 'did not get expected geotransform'\n\n assert md == 'Point', 'did not get expected AREA_OR_POINT value'\n\n gdaltest.tiff_drv.Delete('tmp/test_97.tif')\n\n # Again, but ignoring PixelIsPoint\n\n gdal.SetConfigOption('GTIFF_POINT_GEO_IGNORE', 'TRUE')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_97_2.tif', src_ds)\n\n gt = new_ds.GetGeoTransform()\n md = new_ds.GetMetadataItem('AREA_OR_POINT')\n new_ds = None\n src_ds = None\n\n gt_expected = (440690.0, 60.0, 0.0, 3751350.0, 0.0, -60.0)\n\n assert gt == gt_expected, \\\n 'did not get expected geotransform when ignoring PixelIsPoint'\n\n assert md == 'Point', 'did not get expected AREA_OR_POINT value'\n\n gdal.SetConfigOption('GTIFF_POINT_GEO_IGNORE', None)\n\n # read back this file with pixelispoint behavior enabled.\n\n new_ds = gdal.Open('tmp/test_97_2.tif')\n\n gt = new_ds.GetGeoTransform()\n md = new_ds.GetMetadataItem('AREA_OR_POINT')\n new_ds = None\n\n gt_expected = (440660.0, 60.0, 0.0, 3751380.0, 0.0, -60.0)\n\n assert gt == gt_expected, \\\n 'did not get expected geotransform when ignoring PixelIsPoint (2)'\n\n assert md == 'Point', 'did not get expected AREA_OR_POINT value'\n\n gdaltest.tiff_drv.Delete('tmp/test_97_2.tif')\n\n###############################################################################\n# Create a rotated geotiff file (uses a geomatrix) with - PixelIsPoint\n\n\ndef test_tiff_write_98():\n\n gdal.SetConfigOption('GTIFF_POINT_GEO_IGNORE', 'FALSE')\n\n src_ds = gdal.Open('data/geomatrix.tif')\n\n gdal.SetConfigOption('GTIFF_POINT_GEO_IGNORE', 'TRUE')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_98.tif', src_ds)\n\n gt = new_ds.GetGeoTransform()\n md = new_ds.GetMetadataItem('AREA_OR_POINT')\n new_ds = None\n src_ds = None\n\n gt_expected = (1841001.75, 1.5, -5.0, 1144003.25, -5.0, -1.5)\n\n assert gt == gt_expected, 'did not get expected geotransform'\n\n assert md == 'Point', 'did not get expected AREA_OR_POINT value'\n\n gdal.SetConfigOption('GTIFF_POINT_GEO_IGNORE', 'FALSE')\n\n new_ds = gdal.Open('tmp/test_98.tif')\n\n gt = new_ds.GetGeoTransform()\n md = new_ds.GetMetadataItem('AREA_OR_POINT')\n new_ds = None\n src_ds = None\n\n gt_expected = (1841003.5, 1.5, -5.0, 1144006.5, -5.0, -1.5)\n\n assert gt == gt_expected, 'did not get expected geotransform (2)'\n\n assert md == 'Point', 'did not get expected AREA_OR_POINT value'\n\n gdaltest.tiff_drv.Delete('tmp/test_98.tif')\n\n###############################################################################\n# Create copy into a RGB JPEG-IN-TIFF (#3887)\n\n\ndef test_tiff_write_99():\n\n src_ds = gdal.Open('data/rgbsmall.tif')\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_99.tif', src_ds, options=['COMPRESS=JPEG'])\n del new_ds\n src_ds = None\n\n ds = gdal.Open('tmp/test_99.tif')\n cs1 = ds.GetRasterBand(1).Checksum()\n cs2 = ds.GetRasterBand(2).Checksum()\n cs3 = ds.GetRasterBand(3).Checksum()\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_99.tif')\n\n assert (cs1, cs2, cs3) == (21629, 21651, 21371), ('%d,%d,%d' % (cs1, cs2, cs3))\n\n###############################################################################\n# Create copy into a 2 band JPEG-IN-TIFF (#3887)\n\n\ndef test_tiff_write_100():\n\n src_ds = gdaltest.tiff_drv.Create('/vsimem/test_100_src.tif', 16, 16, 2)\n src_ds.GetRasterBand(1).Fill(255)\n new_ds = gdaltest.tiff_drv.CreateCopy('/vsimem/test_100_dst.tif', src_ds, options=['COMPRESS=JPEG'])\n del new_ds\n src_ds = None\n\n ds = gdal.Open('/vsimem/test_100_dst.tif')\n cs1 = ds.GetRasterBand(1).Checksum()\n cs2 = ds.GetRasterBand(2).Checksum()\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/test_100_src.tif')\n gdaltest.tiff_drv.Delete('/vsimem/test_100_dst.tif')\n\n assert (cs1, cs2) == (3118, 0), ('%d,%d' % (cs1, cs2))\n\n###############################################################################\n# Test CHUNKY_STRIP_READ_SUPPORT (#3894)\n# We use random data so the compressed files are big enough to need partial\n# reloading. tiff_write_78 doesn't produce enough big data to trigger this...\n\n\ndef test_tiff_write_101():\n\n if not gdaltest.run_slow_tests():\n pytest.skip()\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if sys.platform.startswith('linux'):\n # Much faster to use /dev/urandom than python random generator !\n f = open('/dev/urandom', 'rb')\n rand_array = f.read(10 * 1024 * 1024)\n f.close()\n else:\n import random\n rand_array = array.array('B')\n for _ in range(10 * 1024 * 1024):\n rand_array.append(random.randint(0, 255))\n\n f = open('tmp/tiff_write_101.bin', 'wb')\n f.write(rand_array)\n f.close()\n\n f = open('tmp/tiff_write_101.hdr', 'wb')\n f.write(\"\"\"ENVI\nsamples = 2500\nlines = 4000\nbands = 1\nheader offset = 0\nfile type = ENVI Standard\ndata type = 1\ninterleave = bsq\nbyte order = 0\nmap info = {UTM, 1, 1, 440720.000000, 3751320.000000, 60.000000, 60.000000, 11, North}\nband names = {\nBand 1}\"\"\".encode('ascii'))\n f.close()\n\n src_ds = gdal.Open('tmp/tiff_write_101.bin')\n expected_cs = src_ds.GetRasterBand(1).Checksum()\n\n for compression_method in ['DEFLATE', 'LZW', 'JPEG', 'PACKBITS', 'LZMA']:\n if md['DMD_CREATIONOPTIONLIST'].find(compression_method) == -1:\n continue\n\n ds = gdaltest.tiff_drv.CreateCopy('tmp/tiff_write_101.tif', src_ds,\n options=['COMPRESS=' + compression_method, 'BLOCKXSIZE=2500', 'BLOCKYSIZE=4000'])\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_101.tif')\n gdal.ErrorReset()\n cs = ds.GetRasterBand(1).Checksum()\n error_msg = gdal.GetLastErrorMsg()\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_101.tif')\n\n if error_msg != '':\n src_ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_101.bin')\n pytest.fail()\n\n if compression_method != 'JPEG' and cs != expected_cs:\n src_ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_101.bin')\n pytest.fail('for compression method %s, got %d instead of %d' % (compression_method, cs, expected_cs))\n\n src_ds = None\n gdaltest.tiff_drv.Delete('tmp/tiff_write_101.bin')\n\n###############################################################################\n# Test writing and reading back COMPD_CS\n\n\ndef test_tiff_write_102():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_102.tif', 1, 1)\n sr = osr.SpatialReference()\n sr.ImportFromEPSG(7401)\n name = sr.GetAttrValue('COMPD_CS')\n wkt = sr.ExportToWkt()\n ds.SetProjection(wkt)\n ds = None\n\n gdal.SetConfigOption('GTIFF_REPORT_COMPD_CS', 'YES')\n ds = gdal.Open('/vsimem/tiff_write_102.tif')\n wkt1 = ds.GetProjectionRef()\n ds = None\n\n gdal.SetConfigOption('GTIFF_REPORT_COMPD_CS', 'NO')\n ds = gdal.Open('/vsimem/tiff_write_102.tif')\n wkt2 = ds.GetProjectionRef()\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_102.tif')\n\n assert wkt1.startswith('COMPD_CS'), 'expected COMPD_CS, but got something else'\n\n assert not wkt2.startswith('COMPD_CS'), 'got COMPD_CS, but did not expected it'\n\n sr2 = osr.SpatialReference()\n sr2.SetFromUserInput(wkt1)\n got_name = sr2.GetAttrValue('COMPD_CS')\n if got_name != name:\n print(wkt2)\n pytest.fail('dit not get expected COMPD_CS name')\n\n \n###############################################################################\n# Test -co COPY_SRC_OVERVIEWS=YES on a multiband source with external overviews (#3938)\n\n\ndef test_tiff_write_103():\n import test_cli_utilities\n if test_cli_utilities.get_gdaladdo_path() is None:\n pytest.skip()\n\n gdal.Translate('tmp/tiff_write_103_src.tif', 'data/rgbsmall.tif', options='-outsize 260 260')\n gdaltest.runexternal(test_cli_utilities.get_gdaladdo_path() + ' -ro tmp/tiff_write_103_src.tif 2')\n gdal.Translate('tmp/tiff_write_103_dst.tif', 'tmp/tiff_write_103_src.tif', options='-co COPY_SRC_OVERVIEWS=YES')\n\n src_ds = gdal.Open('tmp/tiff_write_103_src.tif')\n dst_ds = gdal.Open('tmp/tiff_write_103_dst.tif')\n src_cs = src_ds.GetRasterBand(1).GetOverview(0).Checksum()\n dst_cs = dst_ds.GetRasterBand(1).GetOverview(0).Checksum()\n src_ds = None\n dst_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tiff_write_103_src.tif')\n gdaltest.tiff_drv.Delete('tmp/tiff_write_103_dst.tif')\n\n assert src_cs == dst_cs, 'did not get expected checksum'\n\n\n###############################################################################\n# Confirm as best we can that we can write geotiff files with detailed\n# projection parameters with the correct linear units set. (#3901)\n\ndef test_tiff_write_104():\n\n src_ds = gdal.Open('data/spaf27_correct.tif')\n dst_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_104.tif', src_ds)\n\n src_ds = None\n del dst_ds\n\n ds = gdal.Open('tmp/test_104.tif')\n wkt = ds.GetProjectionRef()\n ds = None\n\n srs = osr.SpatialReference(wkt)\n fe = srs.GetProjParm(osr.SRS_PP_FALSE_EASTING)\n assert abs(fe - 2000000.0) <= 0.001, 'did not get expected false easting'\n\n gdaltest.tiff_drv.Delete('tmp/test_104.tif')\n\n###############################################################################\n# Confirm as best we can that we can write geotiff files with detailed\n# projection parameters with the correct linear units set. (#3901)\n\n\ndef test_tiff_write_105():\n\n # This hangs forever with libtiff 3.8.2, so skip it\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n shutil.copyfile('data/bug4468.tif', 'tmp/bug4468.tif')\n\n # Update a pixel and close again.\n ds = gdal.Open('tmp/bug4468.tif', gdal.GA_Update)\n data = ds.ReadRaster(0, 0, 1, 1)\n ds.WriteRaster(0, 0, 1, 1, data)\n ds = None\n\n # Now check if the image is still intact.\n ds = gdal.Open('tmp/bug4468.tif')\n cs = ds.GetRasterBand(1).Checksum()\n\n assert cs == 2923, ('Did not get expected checksum, got %d.' % cs)\n\n ds = None\n\n gdaltest.tiff_drv.Delete('tmp/bug4468.tif')\n\n###############################################################################\n# Test the direct copy mechanism of JPEG source\n\n\ndef test_tiff_write_106(filename='../gdrivers/data/byte_with_xmp.jpg', options=None, check_cs=True):\n\n if options is None:\n options = ['COMPRESS=JPEG']\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n src_ds = gdal.Open(filename)\n nbands = src_ds.RasterCount\n src_cs = []\n for i in range(nbands):\n src_cs.append(src_ds.GetRasterBand(i + 1).Checksum())\n\n out_ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_106.tif', src_ds, options=options)\n out_ds = None\n\n out_ds = gdal.Open('/vsimem/tiff_write_106.tif')\n cs = []\n for i in range(nbands):\n cs.append(out_ds.GetRasterBand(i + 1).Checksum())\n out_ds = None\n\n gdal.Unlink('/vsimem/tiff_write_106.tif')\n\n if check_cs:\n for i in range(nbands):\n assert cs[i] == src_cs[i], 'did not get expected checksum'\n else:\n for i in range(nbands):\n assert cs[i] != 0, 'did not get expected checksum'\n\n \n\ndef test_tiff_write_107():\n return test_tiff_write_106(options=['COMPRESS=JPEG', 'BLOCKYSIZE=8'])\n\n\ndef test_tiff_write_108():\n return test_tiff_write_106(options=['COMPRESS=JPEG', 'BLOCKYSIZE=20'])\n\n\ndef test_tiff_write_109():\n return test_tiff_write_106(options=['COMPRESS=JPEG', 'TILED=YES', 'BLOCKYSIZE=16', 'BLOCKXSIZE=16'])\n\n# Strip organization of YCbCr does *NOT* give exact pixels w.r.t. original image\n\n\ndef test_tiff_write_110():\n return test_tiff_write_106(filename='../gdrivers/data/albania.jpg', check_cs=False)\n\n# Whole copy of YCbCr *DOES* give exact pixels w.r.t. original image\n\n\ndef test_tiff_write_111():\n return test_tiff_write_106(filename='../gdrivers/data/albania.jpg', options=['COMPRESS=JPEG', 'BLOCKYSIZE=260'])\n\n\ndef test_tiff_write_111_bis():\n return test_tiff_write_106(filename='../gdrivers/data/albania.jpg', options=['COMPRESS=JPEG', 'BLOCKYSIZE=260', 'INTERLEAVE=PIXEL'])\n\n\ndef test_tiff_write_111_ter():\n return test_tiff_write_106(filename='../gdrivers/data/albania.jpg', options=['COMPRESS=JPEG', 'BLOCKYSIZE=260', 'INTERLEAVE=BAND'], check_cs=False)\n\n# Tiled organization of YCbCr does *NOT* give exact pixels w.r.t. original image\n\n\ndef test_tiff_write_112():\n return test_tiff_write_106(filename='../gdrivers/data/albania.jpg', options=['COMPRESS=JPEG', 'TILED=YES'], check_cs=False)\n\n# The source is a JPEG in RGB colorspace (usually it is YCbCr).\n\n\ndef test_tiff_write_113():\n return test_tiff_write_106(filename='../gdrivers/data/rgbsmall_rgb.jpg', options=['COMPRESS=JPEG', 'BLOCKYSIZE=8'])\n\n###############################################################################\n# Test CreateCopy() interruption\n\n\ndef test_tiff_write_114():\n\n tst = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672)\n\n return tst.testCreateCopy(vsimem=1, interrupt_during_copy=True)\n\n###############################################################################\n# Test writing a pixel interleaved RGBA JPEG-compressed TIFF\n\n\ndef test_tiff_write_115():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n tmpfilename = '/vsimem/tiff_write_115.tif'\n\n src_ds = gdal.Open('data/stefan_full_rgba.tif')\n ds = gdaltest.tiff_drv.CreateCopy(tmpfilename, src_ds, options=['COMPRESS=JPEG'])\n assert ds is not None\n ds = None\n src_ds = None\n\n f = gdal.VSIFOpenL(tmpfilename + '.aux.xml', 'rb')\n if f is not None:\n gdal.VSIFCloseL(f)\n gdal.Unlink(tmpfilename)\n pytest.fail()\n\n ds = gdal.Open(tmpfilename)\n md = ds.GetMetadata('IMAGE_STRUCTURE')\n if md['INTERLEAVE'] != 'PIXEL':\n ds = None\n gdal.Unlink(tmpfilename)\n pytest.fail()\n\n expected_cs = [16404, 62700, 37913, 14174]\n for i in range(4):\n cs = ds.GetRasterBand(i + 1).Checksum()\n if cs != expected_cs[i]:\n ds = None\n gdal.Unlink(tmpfilename)\n pytest.fail()\n\n if ds.GetRasterBand(i + 1).GetRasterColorInterpretation() != gdal.GCI_RedBand + i:\n ds = None\n gdal.Unlink(tmpfilename)\n pytest.fail()\n\n ds = None\n gdal.Unlink(tmpfilename)\n\n###############################################################################\n# Test writing a band interleaved RGBA JPEG-compressed TIFF\n\n\ndef test_tiff_write_116():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n tmpfilename = '/vsimem/tiff_write_116.tif'\n\n src_ds = gdal.Open('data/stefan_full_rgba.tif')\n ds = gdaltest.tiff_drv.CreateCopy(tmpfilename, src_ds, options=['COMPRESS=JPEG', 'INTERLEAVE=BAND'])\n assert ds is not None\n ds = None\n src_ds = None\n\n f = gdal.VSIFOpenL(tmpfilename + '.aux.xml', 'rb')\n if f is not None:\n gdal.VSIFCloseL(f)\n gdal.Unlink(tmpfilename)\n pytest.fail()\n\n ds = gdal.Open(tmpfilename)\n md = ds.GetMetadata('IMAGE_STRUCTURE')\n if md['INTERLEAVE'] != 'BAND':\n ds = None\n gdal.Unlink(tmpfilename)\n pytest.fail()\n\n expected_cs = [16404, 62700, 37913, 14174]\n for i in range(4):\n cs = ds.GetRasterBand(i + 1).Checksum()\n if cs != expected_cs[i]:\n ds = None\n gdal.Unlink(tmpfilename)\n pytest.fail()\n\n if ds.GetRasterBand(i + 1).GetRasterColorInterpretation() != gdal.GCI_RedBand + i:\n ds = None\n gdal.Unlink(tmpfilename)\n pytest.fail()\n\n ds = None\n gdal.Unlink(tmpfilename)\n\n###############################################################################\n# Test bugfix for ticket #4771 (rewriting of a deflate compressed tile, libtiff bug)\n\n\ndef test_tiff_write_117():\n # This will also fail with a libtiff 4.x older than 2012-08-13\n # Might be good to be able to test internal libtiff presence\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n import random\n\n # so that we have always the same random :-)\n random.seed(0)\n\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_117.tif', 512, 256, 2, options=['COMPRESS=DEFLATE', 'TILED=YES'])\n\n # Write first tile so that its byte count of that tile is 2048 (a multiple of 1024)\n adjust = 1254\n data = '0' * (65536 - adjust) + ''.join([('%c' % random.randint(0, 255)) for _ in range(adjust)])\n ds.GetRasterBand(1).WriteRaster(0, 0, 256, 256, data)\n\n # Second tile will be implicitly written at closing, or we could write\n # any content\n\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_117.tif', gdal.GA_Update)\n\n # Will adjust tif_rawdatasize to TIFFroundup_64((uint64)size, 1024) = TIFFroundup_64(2048, 1024) = 2048\n ds.GetRasterBand(1).ReadRaster(0, 0, 256, 256)\n\n # The new bytecount will be greater than 2048\n data = ''.join([('%c' % random.randint(0, 255)) for _ in range(256 * 256)])\n ds.GetRasterBand(1).WriteRaster(0, 0, 256, 256, data)\n\n # Make sure that data is written now\n ds.FlushCache()\n\n # Oops, without fix, the second tile will have been overwritten and an error will be emitted\n data = ds.GetRasterBand(1).ReadRaster(256, 0, 256, 256)\n\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_117.tif')\n\n assert data is not None, \\\n 'if GDAL is configured with external libtiff 4.x, it can fail if it is older than 4.0.3. With internal libtiff, should not fail'\n\n###############################################################################\n# Test bugfix for ticket #4816\n\n\ndef test_tiff_write_118():\n\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_118.tif', 1, 1)\n # Should be rejected in a non-XML domain\n ds.SetMetadata('bla', 'foo')\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_118.tif')\n md = ds.GetMetadata('foo')\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_118.tif')\n\n assert not md\n\n###############################################################################\n# Test bugfix for ticket #4816\n\n\ndef test_tiff_write_119():\n\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_119.tif', 1, 1)\n ds.SetMetadata('foo=bar', 'foo')\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_119.tif')\n md = ds.GetMetadata('foo')\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_119.tif')\n\n assert md['foo'] == 'bar'\n\n###############################################################################\n# Test bugfix for ticket #4816\n\n\ndef test_tiff_write_120():\n\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_120.tif', 1, 1)\n ds.SetMetadata('<foo/>', 'xml:foo')\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_120.tif')\n md = ds.GetMetadata('xml:foo')\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_120.tif')\n\n assert len(md) == 1\n assert md[0] == '<foo/>'\n\n###############################################################################\n# Test error cases of COPY_SRC_OVERVIEWS creation option\n\n\ndef test_tiff_write_121():\n\n # Test when the overview band is NULL\n src_ds = gdal.Open(\"\"\"<VRTDataset rasterXSize=\"20\" rasterYSize=\"20\">\n <VRTRasterBand dataType=\"Byte\" band=\"1\">\n <SimpleSource>\n <SourceFilename relativeToVRT=\"1\">data/byte.tif</SourceFilename>\n <SourceBand>1</SourceBand>\n </SimpleSource>\n <Overview>\n <SourceFilename relativeToVRT=\"0\">non_existing</SourceFilename>\n <SourceBand>1</SourceBand>\n </Overview>\n </VRTRasterBand>\n</VRTDataset>\"\"\")\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_121.tif', src_ds, options=['COPY_SRC_OVERVIEWS=YES'])\n gdal.PopErrorHandler()\n assert ds is None\n src_ds = None\n\n # Test when the overview count isn't the same on all base bands\n src_ds = gdal.Open(\"\"\"<VRTDataset rasterXSize=\"20\" rasterYSize=\"20\">\n <VRTRasterBand dataType=\"Byte\" band=\"1\">\n <SimpleSource>\n <SourceFilename relativeToVRT=\"1\">data/byte.tif</SourceFilename>\n <SourceBand>1</SourceBand>\n </SimpleSource>\n <Overview>\n <SourceFilename relativeToVRT=\"1\">data/byte.tif</SourceFilename>\n <SourceBand>1</SourceBand>\n </Overview>\n </VRTRasterBand>\n <VRTRasterBand dataType=\"Byte\" band=\"2\">\n <SimpleSource>\n <SourceFilename relativeToVRT=\"1\">data/byte.tif</SourceFilename>\n <SourceBand>1</SourceBand>\n </SimpleSource>\n </VRTRasterBand>\n</VRTDataset>\"\"\")\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_121.tif', src_ds, options=['COPY_SRC_OVERVIEWS=YES'])\n gdal.PopErrorHandler()\n assert ds is None\n src_ds = None\n\n # Test when the overview bands of same level have not the same dimensions\n src_ds = gdal.Open(\"\"\"<VRTDataset rasterXSize=\"20\" rasterYSize=\"20\">\n <VRTRasterBand dataType=\"Byte\" band=\"1\">\n <SimpleSource>\n <SourceFilename relativeToVRT=\"1\">data/byte.tif</SourceFilename>\n <SourceBand>1</SourceBand>\n </SimpleSource>\n <Overview>\n <SourceFilename relativeToVRT=\"1\">data/byte.tif</SourceFilename>\n <SourceBand>1</SourceBand>\n </Overview>\n </VRTRasterBand>\n <VRTRasterBand dataType=\"Byte\" band=\"2\">\n <SimpleSource>\n <SourceFilename relativeToVRT=\"1\">data/byte.tif</SourceFilename>\n <SourceBand>1</SourceBand>\n </SimpleSource>\n <Overview>\n <SourceFilename relativeToVRT=\"0\">data/rgbsmall.tif</SourceFilename>\n <SourceBand>1</SourceBand>\n </Overview>\n </VRTRasterBand>\n</VRTDataset>\"\"\")\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_121.tif', src_ds, options=['COPY_SRC_OVERVIEWS=YES'])\n gdal.PopErrorHandler()\n assert ds is None\n src_ds = None\n\n###############################################################################\n# Test write and read of some TIFFTAG_RESOLUTIONUNIT tags where '*'/'' is\n# specified (gdalwarp conflicts)\n# Expected to fail (properly) with older libtiff versions (<=3.8.2 for sure)\n\n\ndef test_tiff_write_122():\n new_ds = gdaltest.tiff_drv.Create('tmp/tags122.tif', 1, 1, 1)\n\n new_ds.SetMetadata({\n 'TIFFTAG_RESOLUTIONUNIT': '*',\n })\n\n new_ds = None\n # hopefully it's closed now!\n\n new_ds = gdal.Open('tmp/tags122.tif')\n md = new_ds.GetMetadata()\n\n if 'TIFFTAG_RESOLUTIONUNIT' not in md:\n pytest.fail('Couldnt find tag TIFFTAG_RESOLUTIONUNIT')\n\n elif md['TIFFTAG_RESOLUTIONUNIT'] != '1 (unitless)':\n pytest.fail(\"Got unexpected tag TIFFTAG_RESOLUTIONUNIT='%s' (expected ='1 (unitless)')\" % md['TIFFTAG_RESOLUTIONUNIT'])\n\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/tags122.tif')\n\n###############################################################################\n# Test implicit photometric interpretation\n\n\ndef test_tiff_write_123():\n\n src_ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_123_src.tif', 1, 1, 5, gdal.GDT_Int16)\n src_ds.GetRasterBand(2).SetColorInterpretation(gdal.GCI_GreenBand)\n src_ds.GetRasterBand(5).SetColorInterpretation(gdal.GCI_AlphaBand)\n src_ds.GetRasterBand(3).SetColorInterpretation(gdal.GCI_BlueBand)\n src_ds.GetRasterBand(1).SetColorInterpretation(gdal.GCI_RedBand)\n src_ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_123_src.tif.aux.xml', gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is None, 'did not expect PAM file'\n src_ds = gdal.Open('/vsimem/tiff_write_123_src.tif')\n assert src_ds.GetMetadataItem('TIFFTAG_GDAL_METADATA', '_DEBUG_') is None, \\\n 'did not expect a TIFFTAG_GDAL_METADATA tag'\n assert src_ds.GetMetadataItem('TIFFTAG_PHOTOMETRIC', '_DEBUG_') == '2'\n assert src_ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand\n assert src_ds.GetRasterBand(4).GetColorInterpretation() == gdal.GCI_Undefined\n assert src_ds.GetRasterBand(5).GetColorInterpretation() == gdal.GCI_AlphaBand\n assert src_ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '0,2'\n\n new_ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_123.tif', src_ds)\n del new_ds\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_123.tif.aux.xml', gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is None, 'did not expect PAM file'\n ds = gdal.Open('/vsimem/tiff_write_123.tif')\n assert ds.GetMetadataItem('TIFFTAG_GDAL_METADATA', '_DEBUG_') is None, \\\n 'did not expect a TIFFTAG_GDAL_METADATA tag'\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand\n assert src_ds.GetRasterBand(4).GetColorInterpretation() == gdal.GCI_Undefined\n assert src_ds.GetRasterBand(5).GetColorInterpretation() == gdal.GCI_AlphaBand\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '0,2'\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_src.tif')\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123.tif')\n\n # From implicit RGB to BGR (with Photometric = MinIsBlack)\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_123_bgr.tif', 1, 1, 3, gdal.GDT_Byte)\n assert ds.GetMetadataItem('TIFFTAG_PHOTOMETRIC', '_DEBUG_') == '2'\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') is None\n ds.GetRasterBand(1).SetColorInterpretation(gdal.GCI_BlueBand)\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_BlueBand\n ds.GetRasterBand(2).SetColorInterpretation(gdal.GCI_GreenBand)\n ds.GetRasterBand(3).SetColorInterpretation(gdal.GCI_RedBand)\n ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_123_bgr.tif.aux.xml',\n gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is None, 'did not expect a PAM file'\n ds = gdal.Open('/vsimem/tiff_write_123_bgr.tif')\n assert ds.GetMetadataItem('TIFFTAG_PHOTOMETRIC', '_DEBUG_') == '1'\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '0,0'\n assert ds.GetMetadataItem('TIFFTAG_GDAL_METADATA', '_DEBUG_') is not None, \\\n 'expected a TIFFTAG_GDAL_METADATA tag'\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_BlueBand\n assert ds.GetRasterBand(2).GetColorInterpretation() == gdal.GCI_GreenBand\n assert ds.GetRasterBand(3).GetColorInterpretation() == gdal.GCI_RedBand\n ds = None\n\n # Test overriding internal color interpretation with PAM one (read-only mode)\n ds = gdal.Open('/vsimem/tiff_write_123_bgr.tif')\n ds.GetRasterBand(1).SetColorInterpretation(gdal.GCI_RedBand)\n ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_123_bgr.tif.aux.xml',\n gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is not None, 'expected a PAM file'\n ds = gdal.Open('/vsimem/tiff_write_123_bgr.tif')\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand\n assert ds.GetRasterBand(2).GetColorInterpretation() == gdal.GCI_GreenBand\n assert ds.GetRasterBand(3).GetColorInterpretation() == gdal.GCI_RedBand\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_bgr.tif')\n\n # Create a BGR with PROFILE=BASELINE --> no TIFFTAG_GDAL_METADATA tag, but .aux.xml instead\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_123_bgr.tif', 1, 1, 3,\n options=['PROFILE=BASELINE'])\n ds.GetRasterBand(1).SetColorInterpretation(gdal.GCI_BlueBand)\n ds.GetRasterBand(2).SetColorInterpretation(gdal.GCI_GreenBand)\n ds.GetRasterBand(3).SetColorInterpretation(gdal.GCI_RedBand)\n ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_123_bgr.tif.aux.xml',\n gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is not None, 'expected a PAM file'\n ds = gdal.Open('/vsimem/tiff_write_123_bgr.tif')\n assert ds.GetMetadataItem('TIFFTAG_GDAL_METADATA', '_DEBUG_') is None, \\\n 'did not expect a TIFFTAG_GDAL_METADATA tag'\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_BlueBand\n assert ds.GetRasterBand(2).GetColorInterpretation() == gdal.GCI_GreenBand\n assert ds.GetRasterBand(3).GetColorInterpretation() == gdal.GCI_RedBand\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_bgr.tif')\n\n # From implicit RGBA to MINISBLACK\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_123_rgba.tif', 1, 1, 4, gdal.GDT_Byte)\n assert ds.GetMetadataItem('TIFFTAG_PHOTOMETRIC', '_DEBUG_') == '2'\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand\n assert ds.GetRasterBand(4).GetColorInterpretation() == gdal.GCI_AlphaBand\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '2'\n\n ds.GetRasterBand(1).SetColorInterpretation(gdal.GCI_Undefined)\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_Undefined\n assert ds.GetMetadataItem('TIFFTAG_PHOTOMETRIC', '_DEBUG_') == '1'\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') == '0,0,2'\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_rgba.tif')\n\n # From that implicit RGBA to Gray,Undefined,Undefined,Alpha doesn't\n # produce PAM file\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_123_guua.tif', 1, 1, 4, gdal.GDT_Byte)\n ds.GetRasterBand(1).SetColorInterpretation(gdal.GCI_GrayIndex)\n ds.GetRasterBand(2).SetColorInterpretation(gdal.GCI_Undefined)\n ds.GetRasterBand(3).SetColorInterpretation(gdal.GCI_Undefined)\n ds.GetRasterBand(4).SetColorInterpretation(gdal.GCI_AlphaBand)\n ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_123_guua.tif.aux.xml', gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is None, 'did not expect PAM file'\n ds = gdal.Open('/vsimem/tiff_write_123_guua.tif')\n assert ds.GetMetadataItem('TIFFTAG_GDAL_METADATA', '_DEBUG_') is None, \\\n 'did not expect TIFFTAG_GDAL_METADATA tag'\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_GrayIndex\n assert ds.GetRasterBand(4).GetColorInterpretation() == gdal.GCI_AlphaBand\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_guua.tif')\n\n # Test that CreateCopy() from a RGB UInt16 doesn't generate ExtraSamples\n src_ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_123_rgb_src.tif',\n 1, 1, 3, gdal.GDT_UInt16, options=['PHOTOMETRIC=RGB'])\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_123_rgb.tif', src_ds)\n src_ds = None\n assert ds.GetMetadataItem('TIFFTAG_PHOTOMETRIC', '_DEBUG_') == '2'\n assert ds.GetMetadataItem('TIFFTAG_EXTRASAMPLES', '_DEBUG_') is None\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_rgb_src.tif')\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_rgb.tif')\n\n # Test that PHOTOMETRIC=RGB overrides the source color interpretation of the\n # first 3 bands\n src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1, 3)\n gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_123_rgb.tif', src_ds,\n options=['PHOTOMETRIC=RGB'])\n ds = gdal.Open('/vsimem/tiff_write_123_rgb.tif')\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_rgb.tif')\n\n src_ds = gdal.GetDriverByName('MEM').Create('', 1, 1, 5)\n src_ds.GetRasterBand(5).SetColorInterpretation(gdal.GCI_AlphaBand)\n gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_123_rgbua.tif', src_ds,\n options=['PHOTOMETRIC=RGB'])\n ds = gdal.Open('/vsimem/tiff_write_123_rgbua.tif')\n assert ds.GetRasterBand(1).GetColorInterpretation() == gdal.GCI_RedBand\n assert ds.GetRasterBand(4).GetColorInterpretation() == gdal.GCI_Undefined\n assert ds.GetRasterBand(5).GetColorInterpretation() == gdal.GCI_AlphaBand\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_rgbua.tif')\n\n # Test updating alpha to undefined\n gdaltest.tiff_drv.Create('/vsimem/tiff_write_123_rgba_to_undefined.tif', 1, 1, 4,\n options=['PHOTOMETRIC=RGB', 'ALPHA=YES'])\n ds = gdal.Open('/vsimem/tiff_write_123_rgba_to_undefined.tif', gdal.GA_Update)\n ds.GetRasterBand(4).SetColorInterpretation(gdal.GCI_Undefined)\n ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_123_rgba_to_undefined.tif.aux.xml', gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is None, 'did not expect PAM file'\n ds = gdal.Open('/vsimem/tiff_write_123_rgba_to_undefined.tif')\n assert ds.GetRasterBand(4).GetColorInterpretation() == gdal.GCI_Undefined\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_123_rgba_to_undefined.tif')\n\n###############################################################################\n# Test error cases with palette creation\n\n\ndef test_tiff_write_124():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_124.tif', 1, 1, 3, gdal.GDT_Byte)\n\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n # Test \"SetColorTable() can only be called on band 1\"\n ret = ds.GetRasterBand(2).SetColorTable(gdal.ColorTable())\n gdal.PopErrorHandler()\n assert ret != 0\n\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n # Test \"SetColorTable() not supported for multi-sample TIFF files\"\n ret = ds.GetRasterBand(1).SetColorTable(gdal.ColorTable())\n gdal.PopErrorHandler()\n assert ret != 0\n\n ds = None\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_124.tif', 1, 1, 1, gdal.GDT_UInt32)\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n # Test \"SetColorTable() only supported for Byte or UInt16 bands in TIFF format.\"\n ret = ds.GetRasterBand(1).SetColorTable(gdal.ColorTable())\n gdal.PopErrorHandler()\n assert ret != 0\n ds = None\n\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n # Test \"SetColorTable() only supported for Byte or UInt16 bands in TIFF format.\"\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_124.tif', 1, 1, 1, gdal.GDT_UInt32, options=['PHOTOMETRIC=PALETTE'])\n gdal.PopErrorHandler()\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_124.tif')\n\n###############################################################################\n# Test out-of-memory conditions with SplitBand and SplitBitmapBand\n\n\ndef test_tiff_write_125():\n\n if gdal.GetConfigOption('SKIP_MEM_INTENSIVE_TEST') is not None:\n pytest.skip()\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_125.tif', 2147000000, 5000, 65535, options=['SPARSE_OK=YES', 'BLOCKYSIZE=5000', 'COMPRESS=LZW', 'BIGTIFF=NO'])\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_125.tif')\n # Will not open on 32-bit due to overflow\n if ds is not None:\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds.GetRasterBand(1).ReadBlock(0, 0)\n gdal.PopErrorHandler()\n\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_125.tif', 2147000000, 5000, 1, options=['NBITS=1', 'SPARSE_OK=YES', 'BLOCKYSIZE=5000', 'COMPRESS=LZW', 'BIGTIFF=NO'])\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_125.tif')\n # Will not open on 32-bit due to overflow\n if ds is not None:\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n ds.GetRasterBand(1).ReadBlock(0, 0)\n gdal.PopErrorHandler()\n\n gdal.Unlink('/vsimem/tiff_write_125.tif')\n\n###############################################################################\n# Test implicit JPEG-in-TIFF overviews\n\n\ndef test_tiff_write_126():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n src_ds = gdal.Open('../gdrivers/data/small_world_400pct.vrt')\n\n options_list = [(['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR'], [48788, 56561], [61397, 2463, 2454], [29605, 33654], [10904, 10453]),\n (['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'JPEGTABLESMODE=0'], [48788, 56561], [61397, 2463, 2454], [29605, 33654], [10904, 10453]),\n (['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'TILED=YES'], [48788, 56561], [61397, 2463, 2454], [29605, 33654], [10904, 10453]),\n (['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'BLOCKYSIZE=800'], [48788, 56561], [61397, 2463, 2454], [29605, 33654], [10904, 10453]),\n (['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'BLOCKYSIZE=64'], [48788, 56561], [61397, 2463, 2454], [29605, 33654], [10904, 10453]),\n (['COMPRESS=JPEG'], [49887, 58937], [59311, 2826], [30829, 34806], [11664, 58937]),\n (['COMPRESS=JPEG', 'INTERLEAVE=BAND'], [49887, 58937], [59311, 2826], [30829, 34806], [11664, 58937]),\n (['COMPRESS=JPEG', 'INTERLEAVE=BAND', 'TILED=YES'], [49887, 58937], [59311, 2826], [30829, 34806], [11664, 58937]),\n (['COMPRESS=JPEG', 'INTERLEAVE=BAND', 'BLOCKYSIZE=800'], [49887, 58937], [59311, 2826], [30829, 34806], [11664, 58937]),\n (['COMPRESS=JPEG', 'INTERLEAVE=BAND', 'BLOCKYSIZE=32'], [49887, 58937], [59311, 2826], [30829, 34806], [11664, 58937]),\n (['COMPRESS=JPEG', 'BLOCKYSIZE=8'], [49887, 58937], [59311, 2826], [30829, 34806], [11664, 58937]),\n ]\n\n for (options, cs1, cs2, cs3, cs4) in options_list:\n os.environ['JPEGMEM'] = '500M'\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_126.tif', src_ds, options=options)\n ds = None\n del os.environ['JPEGMEM']\n\n ds = gdal.Open('/vsimem/tiff_write_126.tif')\n # Officially we have 0 public overviews...\n assert ds.GetRasterBand(1).GetOverviewCount() == 0, options\n # But they do exist...\n cs = ds.GetRasterBand(1).GetOverview(0).Checksum()\n assert cs in cs1, options\n cs = ds.GetRasterBand(2).GetOverview(0).Checksum()\n assert cs in cs2, options\n cs = ds.GetRasterBand(1).GetOverview(1).Checksum()\n assert cs in cs3, options\n cs = ds.GetRasterBand(1).GetOverview(2).Checksum()\n assert cs in cs4, options\n assert ds.GetRasterBand(1).GetOverview(-1) is None, options\n assert ds.GetRasterBand(1).GetOverview(3) is None, options\n ovr_1_data = ds.GetRasterBand(1).GetOverview(1).GetDataset().ReadRaster(0, 0, 400, 200)\n subsampled_data = ds.ReadRaster(0, 0, 1600, 800, 400, 200)\n assert ovr_1_data == subsampled_data, options\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_126.tif')\n\n src_ds = gdal.Open('../gdrivers/data/small_world_400pct_1band.vrt')\n\n options_list = [(['COMPRESS=JPEG'], [49887, 58937], [30829, 34806], [11664, 58937]),\n (['COMPRESS=JPEG', 'TILED=YES'], [49887, 58937], [30829, 34806], [11664, 58937]),\n (['COMPRESS=JPEG', 'BLOCKYSIZE=800'], [49887, 58937], [30829, 34806], [11664, 58937]),\n (['COMPRESS=JPEG', 'BLOCKYSIZE=32'], [49887, 58937], [30829, 34806], [11664, 58937]),\n ]\n\n for (options, cs1, cs3, cs4) in options_list:\n os.environ['JPEGMEM'] = '500M'\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_126.tif', src_ds, options=options)\n ds = None\n del os.environ['JPEGMEM']\n\n ds = gdal.Open('/vsimem/tiff_write_126.tif')\n # Officially we have 0 public overviews...\n assert ds.GetRasterBand(1).GetOverviewCount() == 0, options\n # But they do exist...\n cs = ds.GetRasterBand(1).GetOverview(0).Checksum()\n assert cs in cs1, options\n cs = ds.GetRasterBand(1).GetOverview(1).Checksum()\n assert cs in cs3, options\n cs = ds.GetRasterBand(1).GetOverview(2).Checksum()\n assert cs in cs4, options\n ovr_1_data = ds.GetRasterBand(1).GetOverview(1).GetDataset().ReadRaster(0, 0, 400, 200)\n subsampled_data = ds.ReadRaster(0, 0, 1600, 800, 400, 200)\n assert ovr_1_data == subsampled_data, options\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_126.tif')\n\n # Test single-strip, opened as split band\n src_ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_126_src.tif', 8, 2001)\n src_ds.GetRasterBand(1).Fill(255)\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_126.tif', src_ds, options=['COMPRESS=JPEG', 'BLOCKYSIZE=2001'])\n src_ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_126_src.tif')\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_126.tif')\n assert ds.GetRasterBand(1).GetBlockSize() == [8, 1]\n ovr_ds = ds.GetRasterBand(1).GetOverview(1).GetDataset()\n ovr_1_data = ovr_ds.ReadRaster(0, 0, ovr_ds.RasterXSize, ovr_ds.RasterYSize, 1, 1)\n subsampled_data = ds.ReadRaster(0, 0, ds.RasterXSize, ds.RasterYSize, 1, 1)\n assert ovr_1_data == subsampled_data\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_126.tif')\n\n # We need libtiff 4.0.4 (unreleased at that time)\n if md['LIBTIFF'] != 'INTERNAL':\n print('skipping tests that will fail without internal libtiff')\n return\n\n # Test with completely sparse file\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_126.tif', 1024, 1024, options=['COMPRESS=JPEG', 'SPARSE_OK=YES'])\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_126.tif')\n assert ds.GetRasterBand(1).GetOverview(0) is not None\n assert ds.GetRasterBand(1).GetMetadataItem('JPEGTABLES', 'TIFF') is not None\n assert ds.GetRasterBand(1).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF') is None\n assert ds.GetRasterBand(1).GetMetadataItem('BLOCK_SIZE_0_0', 'TIFF') is None\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_126.tif')\n\n # Test with partially sparse file\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_126.tif', 1024, 1024, 3, options=['COMPRESS=JPEG', 'SPARSE_OK=YES', 'INTERLEAVE=BAND'])\n # Fill band 3, but let blocks of band 1 unwritten.\n ds.GetRasterBand(3).Fill(0)\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_126.tif')\n cs = ds.GetRasterBand(1).GetOverview(0).Checksum()\n assert cs == 0\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_126.tif')\n\n###############################################################################\n# Test setting/unsetting metadata in update mode (#5628)\n\n\ndef test_tiff_write_127():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_127.tif', 1, 1)\n ds = None\n\n for i in range(2):\n\n ds = gdal.Open('/vsimem/tiff_write_127.tif', gdal.GA_Update)\n obj = ds if i == 0 else ds.GetRasterBand(1)\n obj.SetMetadata({'key': 'value'})\n obj = None\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_127.tif', gdal.GA_Update)\n obj = ds if i == 0 else ds.GetRasterBand(1)\n if obj.GetMetadataItem('key') != 'value':\n print(i)\n pytest.fail(obj.GetMetadata())\n obj.SetMetadata({})\n obj = None\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_127.tif', gdal.GA_Update)\n obj = ds if i == 0 else ds.GetRasterBand(1)\n assert not obj.GetMetadata(), i\n obj.SetMetadataItem('key', 'value')\n obj = None\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_127.tif', gdal.GA_Update)\n obj = ds if i == 0 else ds.GetRasterBand(1)\n assert obj.GetMetadataItem('key') == 'value', i\n obj.SetMetadataItem('key', None)\n obj = None\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_127.tif', gdal.GA_Update)\n obj = ds if i == 0 else ds.GetRasterBand(1)\n assert not obj.GetMetadata(), i\n obj = None\n ds = None\n\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_127.tif.aux.xml')\n if statBuf is not None:\n print(i)\n pytest.fail('unexpected PAM file')\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_127.tif')\n\n###############################################################################\n# Test lossless copying of a CMYK JPEG into JPEG-in-TIFF (#5712)\n\n\ndef test_tiff_write_128():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n gdal.SetConfigOption('GDAL_JPEG_TO_RGB', 'NO')\n src_ds = gdal.Open('../gdrivers/data/rgb_ntf_cmyk.jpg')\n gdal.SetConfigOption('GDAL_JPEG_TO_RGB', None)\n\n # Will received implicitly CMYK photometric interpretation.\n old_val = gdal.GetConfigOption('GDAL_PAM_ENABLED')\n gdal.SetConfigOption('GDAL_PAM_ENABLED', 'NO')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_128.tif', src_ds, options=['COMPRESS=JPEG'])\n ds = None\n gdal.SetConfigOption('GDAL_PAM_ENABLED', old_val)\n\n # We need to reopen in raw to avoig automatic CMYK->RGBA to trigger\n ds = gdal.Open('GTIFF_RAW:/vsimem/tiff_write_128.tif')\n for i in range(4):\n assert src_ds.GetRasterBand(i + 1).GetColorInterpretation() == ds.GetRasterBand(i + 1).GetColorInterpretation()\n assert src_ds.GetRasterBand(i + 1).Checksum() == ds.GetRasterBand(i + 1).Checksum()\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_128.tif')\n\n # Try with explicit CMYK photometric interpretation\n old_val = gdal.GetConfigOption('GDAL_PAM_ENABLED')\n gdal.SetConfigOption('GDAL_PAM_ENABLED', 'NO')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_128.tif', src_ds, options=['COMPRESS=JPEG', 'PHOTOMETRIC=CMYK'])\n ds = None\n gdal.SetConfigOption('GDAL_PAM_ENABLED', old_val)\n\n # We need to reopen in raw to avoig automatic CMYK->RGBA to trigger\n ds = gdal.Open('GTIFF_RAW:/vsimem/tiff_write_128.tif')\n for i in range(4):\n assert src_ds.GetRasterBand(i + 1).GetColorInterpretation() == ds.GetRasterBand(i + 1).GetColorInterpretation()\n assert src_ds.GetRasterBand(i + 1).Checksum() == ds.GetRasterBand(i + 1).Checksum()\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_128.tif')\n\n # Try with more neutral colorspace in the case the source JPEG is not really CMYK (yes that happens !)\n old_val = gdal.GetConfigOption('GDAL_PAM_ENABLED')\n gdal.SetConfigOption('GDAL_PAM_ENABLED', 'NO')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_128.tif', src_ds, options=['COMPRESS=JPEG', 'PHOTOMETRIC=MINISBLACK', 'PROFILE=BASELINE'])\n ds = None\n gdal.SetConfigOption('GDAL_PAM_ENABLED', old_val)\n\n # Here we can reopen without GTIFF_RAW trick\n ds = gdal.Open('/vsimem/tiff_write_128.tif')\n for i in range(4):\n # The color interpretation will NOT be CMYK\n assert src_ds.GetRasterBand(i + 1).GetColorInterpretation() != ds.GetRasterBand(i + 1).GetColorInterpretation()\n assert src_ds.GetRasterBand(i + 1).Checksum() == ds.GetRasterBand(i + 1).Checksum()\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_128.tif')\n\n###############################################################################\n# Check effective guessing of existing JPEG quality\n\n\ndef test_tiff_write_129():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n for jpegtablesmode in ['1', '3']:\n for photometric in ['RGB', 'YCBCR']:\n cs_ref = 0\n for i in range(2):\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_129.tif', 64, 32, 3,\n options=['COMPRESS=JPEG', 'TILED=YES', 'BLOCKXSIZE=32', 'BLOCKYSIZE=32', 'JPEG_QUALITY=50', 'PHOTOMETRIC=' + photometric, 'JPEGTABLESMODE=' + jpegtablesmode])\n src_ds = gdal.Open('data/rgbsmall.tif')\n data = src_ds.ReadRaster(0, 0, 32, 32)\n ds.WriteRaster(0, 0, 32, 32, data)\n\n # In second pass, we re-open the dataset\n if i == 1:\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_129.tif', gdal.GA_Update)\n ds.WriteRaster(32, 0, 32, 32, data)\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_129.tif')\n with gdaltest.SetCacheMax(0):\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_129.tif')\n\n if i == 0:\n cs_ref = cs\n elif cs != cs_ref:\n print(photometric)\n print(i)\n pytest.fail(jpegtablesmode)\n\n \n###############################################################################\n# Test cases where JPEG quality will fail\n\n\ndef test_tiff_write_130():\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n shutil.copyfile('data/byte_jpg_unusual_jpegtable.tif', 'tmp/byte_jpg_unusual_jpegtable.tif')\n ds = gdal.Open('tmp/byte_jpg_unusual_jpegtable.tif', gdal.GA_Update)\n assert ds.GetRasterBand(1).Checksum() == 4771\n src_ds = gdal.Open('data/byte.tif', gdal.GA_Update)\n ds.WriteRaster(0, 0, 20, 20, src_ds.ReadRaster())\n src_ds = None\n ds = None\n ds = gdal.Open('tmp/byte_jpg_unusual_jpegtable.tif')\n assert ds.GetRasterBand(1).Checksum() == 4743\n ds = None\n os.unlink('tmp/byte_jpg_unusual_jpegtable.tif')\n\n shutil.copyfile('data/byte_jpg_tablesmodezero.tif', 'tmp/byte_jpg_tablesmodezero.tif')\n ds = gdal.Open('tmp/byte_jpg_tablesmodezero.tif', gdal.GA_Update)\n assert ds.GetRasterBand(1).Checksum() == 4743\n src_ds = gdal.Open('data/byte.tif', gdal.GA_Update)\n ds.WriteRaster(0, 0, 20, 20, src_ds.ReadRaster())\n src_ds = None\n ds = None\n ds = gdal.Open('tmp/byte_jpg_tablesmodezero.tif')\n assert ds.GetRasterBand(1).Checksum() == 4743\n ds = None\n os.unlink('tmp/byte_jpg_tablesmodezero.tif')\n\n###############################################################################\n# Test LZMA compression\n\n\ndef test_tiff_write_131(level=1):\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LZMA') == -1:\n pytest.skip()\n\n filename = '/vsimem/tiff_write_131.tif'\n src_ds = gdal.Open('data/byte.tif')\n ds = gdaltest.tiff_drv.CreateCopy(filename, src_ds,\n options=['COMPRESS=LZMA', 'LZMA_PRESET=' + str(level)])\n assert ds.GetRasterBand(1).Checksum() == 4672\n ds = None\n\n # LZMA requires an howful amount of memory even on small files\n if gdal.GetLastErrorMsg().find('cannot allocate memory') >= 0:\n gdal.Unlink(filename)\n pytest.skip()\n\n ds = gdal.Open(filename)\n assert ds.GetRasterBand(1).Checksum() == 4672\n ds = None\n\n gdal.Unlink(filename)\n\n\ndef test_tiff_write_131_level_9():\n return test_tiff_write_131(level=9)\n\n\n###############################################################################\n# Test that PAM metadata is cleared when internal metadata is set (#5807)\n\n\ndef test_tiff_write_132():\n\n for i in range(2):\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_132.tif', 1, 1)\n ds = None\n\n # Open in read-only\n ds = gdal.Open('/vsimem/tiff_write_132.tif')\n ds.SetMetadataItem('FOO', 'BAR')\n ds.GetRasterBand(1).SetMetadataItem('FOO', 'BAR')\n ds = None\n\n # Check that PAM file exists\n assert gdal.VSIStatL('/vsimem/tiff_write_132.tif.aux.xml') is not None\n\n # Open in read-write\n ds = gdal.Open('/vsimem/tiff_write_132.tif', gdal.GA_Update)\n if i == 0:\n ds.SetMetadataItem('FOO', 'BAZ')\n ds.GetRasterBand(1).SetMetadataItem('FOO', 'BAZ')\n else:\n ds.SetMetadata({'FOO': 'BAZ'})\n ds.GetRasterBand(1).SetMetadata({'FOO': 'BAZ'})\n ds = None\n\n # Check that PAM file no longer exists\n assert gdal.VSIStatL('/vsimem/tiff_write_132.tif.aux.xml') is None, i\n\n ds = gdal.Open('/vsimem/tiff_write_132.tif')\n assert ds.GetMetadataItem('FOO') == 'BAZ' and ds.GetRasterBand(1).GetMetadataItem('FOO') == 'BAZ'\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_132.tif')\n\n \n###############################################################################\n# Test streaming capabilities\n\n\ndef test_tiff_write_133():\n\n src_ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_133.tif', 1024, 1000, 3, options=['STREAMABLE_OUTPUT=YES'])\n src_ds.SetGeoTransform([1, 2, 0, 3, 0, -2])\n srs = osr.SpatialReference()\n srs.SetFromUserInput('EPSG:32601')\n src_ds.SetProjection(srs.ExportToWkt())\n src_ds.SetMetadataItem('FOO', 'BAR')\n src_ds.GetRasterBand(1).SetNoDataValue(127)\n src_ds.GetRasterBand(1).Fill(64)\n src_ds.GetRasterBand(2).Fill(127)\n src_ds.GetRasterBand(3).Fill(184)\n\n src_ds.FlushCache()\n gdal.PushErrorHandler()\n ret = src_ds.SetProjection(srs.ExportToWkt())\n gdal.PopErrorHandler()\n assert ret != 0\n gdal.PushErrorHandler()\n ret = src_ds.SetGeoTransform([1, 2, 0, 3, 0, -4])\n gdal.PopErrorHandler()\n assert ret != 0\n gdal.PushErrorHandler()\n ret = src_ds.SetMetadataItem('FOO', 'BAZ')\n gdal.PopErrorHandler()\n assert ret != 0\n gdal.PushErrorHandler()\n ret = src_ds.SetMetadata({})\n gdal.PopErrorHandler()\n assert ret != 0\n gdal.PushErrorHandler()\n ret = src_ds.GetRasterBand(1).SetMetadataItem('FOO', 'BAZ')\n gdal.PopErrorHandler()\n assert ret != 0\n gdal.PushErrorHandler()\n ret = src_ds.GetRasterBand(1).SetMetadata({})\n gdal.PopErrorHandler()\n assert ret != 0\n gdal.PushErrorHandler()\n ret = src_ds.GetRasterBand(1).SetNoDataValue(0)\n gdal.PopErrorHandler()\n assert ret != 0\n\n # Pixel interleaved\n out_ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_133_dst.tif', src_ds, options=['STREAMABLE_OUTPUT=YES', 'BLOCKYSIZE=32'])\n out_ds = None\n\n gdal.SetConfigOption('TIFF_READ_STREAMING', 'YES')\n ds = gdal.Open('/vsimem/tiff_write_133_dst.tif')\n gdal.SetConfigOption('TIFF_READ_STREAMING', None)\n assert ds.GetProjectionRef().find('32601') >= 0\n assert ds.GetGeoTransform() == (1.0, 2.0, 0.0, 3.0, 0.0, -2.0)\n assert ds.GetMetadataItem('FOO') == 'BAR'\n assert ds.GetMetadataItem('UNORDERED_BLOCKS', 'TIFF') is None\n\n with gdaltest.SetCacheMax(0):\n for y in range(1000):\n got_data = ds.ReadRaster(0, y, 1024, 1)\n assert got_data is not None\n\n ds.FlushCache()\n for y in range(1000):\n gdal.PushErrorHandler()\n got_data = ds.ReadRaster(0, y, 1024, 1)\n gdal.PopErrorHandler()\n assert got_data is None\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_133_dst.tif')\n\n # Tiled\n out_ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_133_dst.tif', src_ds, options=['STREAMABLE_OUTPUT=YES', 'TILED=YES'])\n out_ds = None\n\n gdal.SetConfigOption('TIFF_READ_STREAMING', 'YES')\n ds = gdal.Open('/vsimem/tiff_write_133_dst.tif')\n gdal.SetConfigOption('TIFF_READ_STREAMING', None)\n assert ds.GetProjectionRef().find('32601') >= 0\n assert ds.GetGeoTransform() == (1.0, 2.0, 0.0, 3.0, 0.0, -2.0)\n assert ds.GetMetadataItem('FOO') == 'BAR'\n assert ds.GetMetadataItem('UNORDERED_BLOCKS', 'TIFF') is None\n\n with gdaltest.SetCacheMax(0):\n for yblock in range(int((1000 + 256 - 1) / 256)):\n y = 256 * yblock\n ysize = 256\n if y + ysize > ds.RasterYSize:\n ysize = ds.RasterYSize - y\n for xblock in range(int((1024 + 256 - 1) / 256)):\n x = 256 * xblock\n xsize = 256\n if x + xsize > ds.RasterXSize:\n xsize = ds.RasterXSize - x\n got_data = ds.ReadRaster(x, y, xsize, ysize)\n assert got_data is not None\n\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_133_dst.tif')\n\n # Band interleaved\n out_ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_133_dst.tif', src_ds, options=['STREAMABLE_OUTPUT=YES', 'INTERLEAVE=BAND'])\n out_ds = None\n\n gdal.SetConfigOption('TIFF_READ_STREAMING', 'YES')\n ds = gdal.Open('/vsimem/tiff_write_133_dst.tif')\n gdal.SetConfigOption('TIFF_READ_STREAMING', None)\n assert ds.GetMetadataItem('UNORDERED_BLOCKS', 'TIFF') is None\n\n with gdaltest.SetCacheMax(0):\n for band in range(3):\n for y in range(1000):\n got_data = ds.GetRasterBand(band + 1).ReadRaster(0, y, 1024, 1)\n assert got_data is not None\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_133_dst.tif')\n\n # BIGTIFF\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') >= 0:\n out_ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_133_dst.tif', src_ds, options=['STREAMABLE_OUTPUT=YES', 'BIGTIFF=YES'])\n out_ds = None\n\n gdal.SetConfigOption('TIFF_READ_STREAMING', 'YES')\n ds = gdal.Open('/vsimem/tiff_write_133_dst.tif')\n gdal.SetConfigOption('TIFF_READ_STREAMING', None)\n assert ds.GetMetadataItem('UNORDERED_BLOCKS', 'TIFF') is None\n\n with gdaltest.SetCacheMax(0):\n for y in range(1000):\n got_data = ds.ReadRaster(0, y, 1024, 1)\n assert got_data is not None\n\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_133_dst.tif')\n\n # Compression not supported\n gdal.PushErrorHandler()\n out_ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_133_dst.tif', src_ds, options=['STREAMABLE_OUTPUT=YES', 'COMPRESS=DEFLATE'])\n gdal.PopErrorHandler()\n assert out_ds is None\n\n # Test writing into a non authorized file\n ds = gdaltest.tiff_drv.Create('/foo/bar', 1024, 1000, 3, options=['STREAMABLE_OUTPUT=YES', 'BLOCKYSIZE=1'])\n assert ds is None\n\n gdal.PushErrorHandler()\n out_ds = gdaltest.tiff_drv.CreateCopy('/foo/bar', src_ds, options=['STREAMABLE_OUTPUT=YES'])\n gdal.PopErrorHandler()\n assert out_ds is None\n\n src_ds = None\n\n # Classical TIFF with IFD not at offset 8\n gdal.SetConfigOption('TIFF_READ_STREAMING', 'YES')\n gdal.PushErrorHandler()\n ds = gdal.Open('data/byte.tif')\n gdal.PopErrorHandler()\n gdal.SetConfigOption('TIFF_READ_STREAMING', None)\n assert ds is None\n\n # BigTIFF with IFD not at offset 16\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') >= 0:\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_133.tif', 1024, 1000, 3, options=['BIGTIFF=YES'])\n ds.GetRasterBand(1).Fill(0)\n ds.FlushCache()\n ds.SetGeoTransform([1, 2, 0, 3, 0, -2])\n ds = None\n\n gdal.SetConfigOption('TIFF_READ_STREAMING', 'YES')\n gdal.PushErrorHandler()\n ds = gdal.Open('/vsimem/tiff_write_133.tif')\n gdal.PopErrorHandler()\n gdal.SetConfigOption('TIFF_READ_STREAMING', None)\n assert ds is None\n\n # Test reading strips in not increasing order\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_133.tif', 1024, 1000, 3, options=['BLOCKYSIZE=1'])\n for y in range(1000):\n ds.WriteRaster(0, 1000 - y - 1, 1024, 1, 'a' * (3 * 1024))\n ds.FlushCache()\n ds = None\n\n gdal.SetConfigOption('TIFF_READ_STREAMING', 'YES')\n gdal.PushErrorHandler()\n ds = gdal.Open('/vsimem/tiff_write_133.tif')\n gdal.PopErrorHandler()\n gdal.SetConfigOption('TIFF_READ_STREAMING', None)\n assert ds.GetMetadataItem('UNORDERED_BLOCKS', 'TIFF') == 'YES'\n\n with gdaltest.SetCacheMax(0):\n for y in range(1000):\n got_data = ds.ReadRaster(0, 1000 - y - 1, 1024, 1)\n assert got_data is not None\n\n # Test writing strips in not increasing order in a streamable output\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_133.tif', 1024, 1000, 3, options=['STREAMABLE_OUTPUT=YES', 'BLOCKYSIZE=1'])\n gdal.ErrorReset()\n gdal.PushErrorHandler()\n ret = ds.WriteRaster(0, 999, 1024, 1, 'a' * (3 * 1024))\n ds.FlushCache()\n gdal.PopErrorHandler()\n assert gdal.GetLastErrorMsg() != ''\n ds = None\n\n # Test writing tiles in not increasing order in a streamable output\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_133.tif', 1024, 1000, 3, options=['STREAMABLE_OUTPUT=YES', 'TILED=YES'])\n gdal.ErrorReset()\n gdal.PushErrorHandler()\n ret = ds.WriteRaster(256, 256, 256, 256, 'a' * (3 * 256 * 256))\n ds.FlushCache()\n gdal.PopErrorHandler()\n assert gdal.GetLastErrorMsg() != ''\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_133.tif')\n\n###############################################################################\n# Test DISCARD_LSB\n\n\ndef test_tiff_write_134():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_134.tif', 1, 1, 3, options=['DISCARD_LSB=0,1,3'])\n ds.GetRasterBand(1).Fill(127)\n ds.GetRasterBand(2).Fill(127)\n ds.GetRasterBand(3).Fill(127)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_134.tif')\n cs1 = ds.GetRasterBand(1).Checksum()\n cs2 = ds.GetRasterBand(2).Checksum()\n cs3 = ds.GetRasterBand(3).Checksum()\n assert cs1 == 1 and cs2 == 0 and cs3 == 5\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_134.tif')\n\n src_ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_134_src.tif', 1, 1, 3)\n src_ds.GetRasterBand(1).Fill(127)\n src_ds.GetRasterBand(2).Fill(127)\n src_ds.GetRasterBand(3).Fill(127)\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_134.tif', src_ds, options=['DISCARD_LSB=0,1,3'])\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_134.tif')\n cs1 = ds.GetRasterBand(1).Checksum()\n cs2 = ds.GetRasterBand(2).Checksum()\n cs3 = ds.GetRasterBand(3).Checksum()\n assert cs1 == 1 and cs2 == 0 and cs3 == 5\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_134_src.tif')\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_134.tif')\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_134.tif', 1, 1, 3, options=['DISCARD_LSB=3'])\n ds.GetRasterBand(1).Fill(127)\n ds.GetRasterBand(2).Fill(127)\n ds.GetRasterBand(3).Fill(127)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_134.tif')\n cs1 = ds.GetRasterBand(1).Checksum()\n cs2 = ds.GetRasterBand(2).Checksum()\n cs3 = ds.GetRasterBand(3).Checksum()\n assert cs1 == 5 and cs2 == 5 and cs3 == 5\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_134.tif')\n\n###############################################################################\n# Test clearing GCPs (#5945)\n\n\ndef test_tiff_write_135():\n\n # Simple clear\n src_ds = gdal.Open('data/gcps.vrt')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_135.tif', src_ds)\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_135.tif', gdal.GA_Update)\n ds.SetGCPs([], '')\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_135.tif')\n assert not ds.GetGCPs()\n assert ds.GetGCPProjection() == ''\n ds = None\n\n # Double clear\n src_ds = gdal.Open('data/gcps.vrt')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_135.tif', src_ds)\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_135.tif', gdal.GA_Update)\n ds.SetGCPs([], '')\n ds.SetGCPs([], '')\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_135.tif')\n assert not ds.GetGCPs()\n assert ds.GetGCPProjection() == ''\n ds = None\n\n # Clear + set geotransform and new projection\n src_ds = gdal.Open('data/gcps.vrt')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_135.tif', src_ds)\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_135.tif', gdal.GA_Update)\n ds.SetGCPs([], '')\n ds.SetGeoTransform([1, 2, 3, 4, 5, -6])\n srs = osr.SpatialReference()\n srs.SetFromUserInput('EPSG:32601')\n ds.SetProjection(srs.ExportToWkt())\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_135.tif')\n assert not ds.GetGCPs()\n assert ds.GetGeoTransform() == (1, 2, 3, 4, 5, -6)\n assert ds.GetProjectionRef().find('32601') >= 0\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_135.tif')\n\n###############################################################################\n# Test writing a single-strip mono-bit dataset\n\n\ndef test_tiff_write_136():\n\n src_ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_136_src.tif', 8, 2001)\n src_ds.GetRasterBand(1).Fill(1)\n expected_cs = src_ds.GetRasterBand(1).Checksum()\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_136.tif', src_ds, options=['NBITS=1', 'COMPRESS=DEFLATE', 'BLOCKYSIZE=2001'])\n src_ds = None\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_136.tif')\n cs = ds.GetRasterBand(1).Checksum()\n assert cs == expected_cs\n\n gdal.Unlink('/vsimem/tiff_write_136_src.tif')\n gdal.Unlink('/vsimem/tiff_write_136.tif')\n gdal.Unlink('/vsimem/tiff_write_136.tif.aux.xml')\n\n###############################################################################\n# Test multi-threaded writing\n\n\ndef test_tiff_write_137():\n\n src_ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_137_src.tif', 4000, 4000)\n src_ds.GetRasterBand(1).Fill(1)\n data = src_ds.GetRasterBand(1).ReadRaster()\n expected_cs = src_ds.GetRasterBand(1).Checksum()\n\n # Test NUM_THREADS as creation option\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_137.tif', src_ds,\n options=['BLOCKYSIZE=16', 'COMPRESS=DEFLATE', 'NUM_THREADS=ALL_CPUS'])\n src_ds = None\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_137.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n assert cs == expected_cs\n\n # Test NUM_THREADS as creation option with Create()\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_137.tif', 4000, 4000, 1,\n options=['BLOCKYSIZE=16', 'COMPRESS=DEFLATE', 'NUM_THREADS=ALL_CPUS'])\n ds.GetRasterBand(1).WriteRaster(0, 0, 4000, 4000, data)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_137.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n assert cs == expected_cs\n\n # Test NUM_THREADS as open option\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_137.tif', 4000, 4000,\n options=['TILED=YES', 'COMPRESS=DEFLATE', 'PREDICTOR=2', 'SPARSE_OK=YES'])\n ds = None\n ds = gdal.OpenEx('/vsimem/tiff_write_137.tif', gdal.OF_UPDATE, open_options=['NUM_THREADS=4'])\n ds.GetRasterBand(1).Fill(1)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_137.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n assert cs == expected_cs\n\n # Ask data immediately while the block is compressed\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_137.tif', 4000, 4000,\n options=['BLOCKYSIZE=3999', 'COMPRESS=DEFLATE', 'NUM_THREADS=4'])\n ds.WriteRaster(0, 0, 1, 1, 'A')\n ds.FlushCache()\n val = ds.ReadRaster(0, 0, 1, 1).decode('ascii')\n assert val == 'A'\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_137_src.tif')\n gdal.Unlink('/vsimem/tiff_write_137.tif')\n\n # Test NUM_THREADS with raster == tile\n src_ds = gdal.Open('data/byte.tif')\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_137.tif', src_ds,\n options=['BLOCKYSIZE=20', 'COMPRESS=DEFLATE', 'NUM_THREADS=ALL_CPUS'])\n src_ds = None\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_137.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n assert cs == 4672, expected_cs\n gdal.Unlink('/vsimem/tiff_write_137.tif')\n\n###############################################################################\n# Test that pixel-interleaved writing generates optimal size\n\n\ndef test_tiff_write_138():\n\n # Test that consecutive IWriteBlock() calls for the same block but in\n # different bands only generate a single tile write, and not 3 rewrites\n ds = gdal.GetDriverByName('GTiff').Create(\n '/vsimem/tiff_write_138.tif', 10, 1, 3, options=['COMPRESS=DEFLATE'])\n ds.GetRasterBand(1).WriteRaster(0, 0, 10, 1, 'A', buf_xsize=1, buf_ysize=1)\n ds.GetRasterBand(1).FlushCache()\n ds.GetRasterBand(2).WriteRaster(0, 0, 10, 1, 'A', buf_xsize=1, buf_ysize=1)\n ds.GetRasterBand(2).FlushCache()\n ds.GetRasterBand(3).WriteRaster(0, 0, 10, 1, 'A', buf_xsize=1, buf_ysize=1)\n ds.GetRasterBand(3).FlushCache()\n ds = None\n size = gdal.VSIStatL('/vsimem/tiff_write_138.tif').size\n assert size == 181\n\n # Test fix for #5999\n\n # Create a file with a huge block that will saturate the block cache.\n with gdaltest.SetCacheMax(1000000):\n tmp_ds = gdal.GetDriverByName('GTiff').Create(\n '/vsimem/tiff_write_138_saturate.tif', gdal.GetCacheMax(), 1)\n tmp_ds = None\n\n ds = gdal.GetDriverByName('GTiff').Create(\n '/vsimem/tiff_write_138.tif', 10, 1, 3, options=['COMPRESS=DEFLATE'])\n ds.GetRasterBand(1).WriteRaster(0, 0, 10, 1, 'A', buf_xsize=1, buf_ysize=1)\n ds.GetRasterBand(2).WriteRaster(0, 0, 10, 1, 'A', buf_xsize=1, buf_ysize=1)\n ds.GetRasterBand(3).WriteRaster(0, 0, 10, 1, 'A', buf_xsize=1, buf_ysize=1)\n # When internalizing the huge block, check that the 3 above dirty blocks\n # get written as a single tile write.\n tmp_ds = gdal.Open('/vsimem/tiff_write_138_saturate.tif')\n tmp_ds.GetRasterBand(1).Checksum()\n tmp_ds = None\n ds = None\n size = gdal.VSIStatL('/vsimem/tiff_write_138.tif').size\n assert size == 181\n\n gdal.Unlink('/vsimem/tiff_write_138.tif')\n gdal.Unlink('/vsimem/tiff_write_138_saturate.tif')\n\n###############################################################################\n# Test that pixel-interleaved writing generates optimal size\n\n\ndef test_tiff_write_139():\n import struct\n\n drv = gdal.GetDriverByName('GTiff')\n # Only post 4.0.5 has the fix for non-byte swabing case\n has_inverted_swab_fix = drv.GetMetadataItem('LIBTIFF') == 'INTERNAL'\n\n # In the byte case, there are optimizations for the 3 and 4 case. 1 is the general case\n for nbands in (1, 3, 4):\n\n ds = drv.Create('/vsimem/tiff_write_139.tif', 4, 1, nbands,\n options=['PREDICTOR=2', 'COMPRESS=DEFLATE'])\n ref_content = struct.pack('B' * 4, 255, 0, 255, 0)\n for i in range(nbands):\n ds.GetRasterBand(i + 1).WriteRaster(0, 0, 4, 1, ref_content)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_139.tif')\n for i in range(nbands):\n content = ds.GetRasterBand(i + 1).ReadRaster()\n assert ref_content == content\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_139.tif')\n\n # Int16\n for endianness in ['NATIVE', 'INVERTED']:\n\n if endianness == 'INVERTED' and not has_inverted_swab_fix:\n continue\n\n ds = drv.Create('/vsimem/tiff_write_139.tif', 6, 1, 1, gdal.GDT_Int16,\n options=['PREDICTOR=2', 'COMPRESS=DEFLATE', 'ENDIANNESS=%s' % endianness])\n ref_content = struct.pack('h' * 6, -32768, 32767, -32768, 32767, -32768, 32767)\n ds.GetRasterBand(1).WriteRaster(0, 0, 6, 1, ref_content)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_139.tif')\n content = ds.GetRasterBand(1).ReadRaster()\n if ref_content != content:\n print(endianness)\n pytest.fail(struct.unpack('h' * 6, content))\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_139.tif')\n\n # UInt16 (same code path)\n for endianness in ['NATIVE', 'INVERTED']:\n\n if endianness == 'INVERTED' and not has_inverted_swab_fix:\n continue\n\n ds = drv.Create('/vsimem/tiff_write_139.tif', 6, 1, 1, gdal.GDT_UInt16,\n options=['PREDICTOR=2', 'COMPRESS=DEFLATE', 'ENDIANNESS=%s' % endianness])\n ref_content = struct.pack('H' * 6, 0, 65535, 0, 65535, 0, 65535)\n ds.GetRasterBand(1).WriteRaster(0, 0, 6, 1, ref_content)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_139.tif')\n content = ds.GetRasterBand(1).ReadRaster()\n if ref_content != content:\n print(endianness)\n pytest.fail(struct.unpack('H' * 6, content))\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_139.tif')\n\n # Int32\n for endianness in ['NATIVE', 'INVERTED']:\n\n if endianness == 'INVERTED' and not has_inverted_swab_fix:\n continue\n\n ds = drv.Create('/vsimem/tiff_write_139.tif', 6, 1, 1, gdal.GDT_UInt32,\n options=['PREDICTOR=2', 'COMPRESS=DEFLATE', 'ENDIANNESS=%s' % endianness])\n ref_content = struct.pack('I' * 6, 0, 2000000000, 0, 2000000000, 0, 2000000000)\n ds.GetRasterBand(1).WriteRaster(0, 0, 6, 1, ref_content)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_139.tif')\n content = ds.GetRasterBand(1).ReadRaster()\n if ref_content != content:\n print(endianness)\n pytest.fail(struct.unpack('I' * 6, content))\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_139.tif')\n\n # Test floating-point predictor\n # Seems to be broken with ENDIANNESS=INVERTED\n ds = drv.Create('/vsimem/tiff_write_139.tif', 4, 1, 1, gdal.GDT_Float64,\n options=['PREDICTOR=3', 'COMPRESS=DEFLATE'])\n ref_content = struct.pack('d' * 4, 1, -1e100, 1e10, -1e5)\n ds.GetRasterBand(1).WriteRaster(0, 0, 4, 1, ref_content)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_139.tif')\n content = ds.GetRasterBand(1).ReadRaster()\n assert ref_content == content, struct.unpack('d' * 4, content)\n ds = None\n\n gdal.Unlink('/vsimem/tiff_write_139.tif')\n\n###############################################################################\n# Test setting a band to alpha\n\n\ndef test_tiff_write_140():\n\n # Nominal case: set alpha to last band\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_140.tif', 1, 1, 5)\n ds.GetRasterBand(5).SetColorInterpretation(gdal.GCI_AlphaBand)\n ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_140.tif.aux.xml', gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is None, 'did not expect PAM file'\n ds = gdal.Open('/vsimem/tiff_write_140.tif')\n assert ds.GetRasterBand(5).GetColorInterpretation() == gdal.GCI_AlphaBand\n ds = None\n\n # Strange case: set alpha to a band, but it is already set on another one\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_140.tif', 1, 1, 5)\n ds.GetRasterBand(2).SetColorInterpretation(gdal.GCI_AlphaBand)\n # Should emit a warning\n gdal.ErrorReset()\n with gdaltest.error_handler():\n ret = ds.GetRasterBand(5).SetColorInterpretation(gdal.GCI_AlphaBand)\n assert gdal.GetLastErrorMsg() != ''\n assert ret == 0\n ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_140.tif.aux.xml', gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is None, 'did not expect PAM file'\n ds = gdal.Open('/vsimem/tiff_write_140.tif')\n assert ds.GetRasterBand(2).GetColorInterpretation() == gdal.GCI_AlphaBand\n assert ds.GetRasterBand(5).GetColorInterpretation() == gdal.GCI_AlphaBand\n ds = None\n\n # Strange case: set alpha to a band, but it is already set on another one (because of ALPHA=YES)\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_140.tif', 1, 1, 5, options=['ALPHA=YES'])\n # Should emit a warning mentioning ALPHA creation option.\n gdal.ErrorReset()\n with gdaltest.error_handler():\n ret = ds.GetRasterBand(5).SetColorInterpretation(gdal.GCI_AlphaBand)\n assert gdal.GetLastErrorMsg().find('ALPHA') >= 0\n assert ret == 0\n ds = None\n statBuf = gdal.VSIStatL('/vsimem/tiff_write_140.tif.aux.xml', gdal.VSI_STAT_EXISTS_FLAG | gdal.VSI_STAT_NATURE_FLAG | gdal.VSI_STAT_SIZE_FLAG)\n assert statBuf is None, 'did not expect PAM file'\n ds = gdal.Open('/vsimem/tiff_write_140.tif')\n assert ds.GetRasterBand(2).GetColorInterpretation() == gdal.GCI_AlphaBand\n assert ds.GetRasterBand(5).GetColorInterpretation() == gdal.GCI_AlphaBand\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_140.tif')\n\n###############################################################################\n# Test GEOTIFF_KEYS_FLAVOR=ESRI_PE with EPSG:3857\n\n\ndef test_tiff_write_141():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_141.tif', 1, 1, options=['GEOTIFF_KEYS_FLAVOR=ESRI_PE'])\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(3857)\n ds.SetProjection(srs.ExportToWkt())\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_141.tif')\n wkt = ds.GetProjectionRef()\n ds = None\n\n assert wkt.startswith('PROJCS[\"WGS_1984_Web_Mercator_Auxiliary_Sphere\"')\n\n assert 'EXTENSION[\"PROJ4\"' in wkt\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_141.tif')\n\n\n###############################################################################\n# Test PixelIsPoint without SRS (#6225)\n\ndef test_tiff_write_142():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_142.tif', 1, 1)\n ds.SetMetadataItem('AREA_OR_POINT', 'Point')\n ds.SetGeoTransform([10, 1, 0, 100, 0, -1])\n ds = None\n\n src_ds = gdal.Open('/vsimem/tiff_write_142.tif')\n gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_142_2.tif', src_ds)\n src_ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_142_2.tif')\n gt = ds.GetGeoTransform()\n md = ds.GetMetadataItem('AREA_OR_POINT')\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_142.tif')\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_142_2.tif')\n\n gt_expected = (10, 1, 0, 100, 0, -1)\n assert gt == gt_expected, 'did not get expected geotransform'\n\n assert md == 'Point', 'did not get expected AREA_OR_POINT value'\n\n###############################################################################\n# Check that we detect that free space isn't sufficient\n\n\ndef test_tiff_write_143():\n\n with gdaltest.error_handler():\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_143.tif', 1000000000, 1000000000)\n assert ds is None\n\n###############################################################################\n# Test creating a real BigTIFF file > 4 GB with multiple directories (on filesystems supporting sparse files)\n\n\ndef test_tiff_write_144():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('BigTIFF') == -1:\n pytest.skip()\n\n # Determine if the filesystem supports sparse files (we don't want to create a real 10 GB\n # file !\n if not gdaltest.filesystem_supports_sparse_files('tmp'):\n pytest.skip()\n\n ds = gdal.GetDriverByName('GTiff').Create('tmp/tiff_write_144.tif', 20, 20, 1, options=['BIGTIFF=YES'])\n ds.GetRasterBand(1).Fill(255)\n ds = None\n\n # Extend the file to 4 GB\n f = open('tmp/tiff_write_144.tif', 'rb+')\n f.seek(4294967296, 0)\n f.write(' '.encode('ascii'))\n f.close()\n\n ds = gdal.Open('tmp/tiff_write_144.tif', gdal.GA_Update)\n ds.BuildOverviews('NEAR', [2])\n ds = None\n\n ds = gdal.Open('tmp/tiff_write_144.tif')\n got_cs = ds.GetRasterBand(1).Checksum()\n got_cs_ovr = ds.GetRasterBand(1).GetOverview(0).Checksum()\n ds = None\n\n gdal.Unlink('tmp/tiff_write_144.tif')\n\n assert got_cs == 4873 and got_cs_ovr == 1218\n\n###############################################################################\n# Test various warnings / errors of Create()\n\n\ndef test_tiff_write_145():\n\n options_list = [{'bands': 65536, 'expected_failure': True},\n {'creation_options': ['INTERLEAVE=foo'], 'expected_failure': True},\n {'creation_options': ['COMPRESS=foo'], 'expected_failure': False},\n {'creation_options': ['STREAMABLE_OUTPUT=YES', 'SPARSE_OK=YES'], 'expected_failure': True},\n {'creation_options': ['STREAMABLE_OUTPUT=YES', 'COPY_SRC_OVERVIEWS=YES'], 'expected_failure': True},\n {'use_tmp': True, 'xsize': 100000, 'ysize': 100000, 'creation_options': ['BIGTIFF=NO'], 'expected_failure': True},\n {'creation_options': ['ENDIANNESS=foo'], 'expected_failure': False},\n {'creation_options': ['NBITS=9'], 'expected_failure': False},\n {'datatype': gdal.GDT_Float32, 'creation_options': ['NBITS=8'], 'expected_failure': False},\n {'datatype': gdal.GDT_UInt16, 'creation_options': ['NBITS=8'], 'expected_failure': False},\n {'datatype': gdal.GDT_UInt16, 'creation_options': ['NBITS=17'], 'expected_failure': False},\n {'datatype': gdal.GDT_UInt32, 'creation_options': ['NBITS=16'], 'expected_failure': False},\n {'datatype': gdal.GDT_UInt32, 'creation_options': ['NBITS=33'], 'expected_failure': False},\n {'bands': 3, 'creation_options': ['PHOTOMETRIC=YCBCR'], 'expected_failure': True},\n {'bands': 3, 'creation_options': ['PHOTOMETRIC=YCBCR', 'COMPRESS=JPEG', 'INTERLEAVE=BAND'], 'expected_failure': True},\n {'bands': 1, 'creation_options': ['PHOTOMETRIC=YCBCR', 'COMPRESS=JPEG'], 'expected_failure': True},\n {'creation_options': ['PHOTOMETRIC=foo'], 'expected_failure': False},\n {'creation_options': ['PHOTOMETRIC=RGB'], 'expected_failure': False},\n {'creation_options': ['TILED=YES', 'BLOCKSIZE=1', 'BLOCKYSIZE=1'], 'expected_failure': True},\n ]\n\n for options in options_list:\n xsize = options.get('xsize', 1)\n ysize = options.get('ysize', 1)\n bands = options.get('bands', 1)\n datatype = options.get('datatype', gdal.GDT_Byte)\n use_tmp = options.get('use_tmp', False)\n if use_tmp:\n filename = 'tmp/tiff_write_145.tif'\n else:\n filename = '/vsimem/tiff_write_145.tif'\n creation_options = options.get('creation_options', [])\n gdal.Unlink(filename)\n gdal.ErrorReset()\n with gdaltest.error_handler():\n ds = gdaltest.tiff_drv.Create(filename, xsize, ysize, bands, datatype, options=creation_options)\n if ds is not None and options.get('expected_failure', False):\n print(options)\n pytest.fail('expected failure, but did not get it')\n elif ds is None and not options.get('expected_failure', False):\n print(options)\n pytest.fail('got failure, but did not expect it')\n ds = None\n # print(gdal.GetLastErrorMsg())\n if gdal.GetLastErrorMsg() == '':\n print(options)\n pytest.fail('did not get any warning/error')\n gdal.Unlink(filename)\n\n \n###############################################################################\n# Test implicit JPEG-in-TIFF overviews with RGBA (not completely sure this\n# is a legal formulation since 4 bands should probably be seen as CMYK)\n\n\ndef test_tiff_write_146():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n tmp_ds = gdal.Translate('', 'data/stefan_full_rgba.tif', format='MEM')\n original_stats = [tmp_ds.GetRasterBand(i + 1).ComputeStatistics(True) for i in range(4)]\n gdal.Translate('/vsimem/tiff_write_146.tif', 'data/stefan_full_rgba.tif', options='-outsize 1000% 1000% -co COMPRESS=JPEG')\n out_ds = gdal.Open('/vsimem/tiff_write_146.tif')\n got_stats = [out_ds.GetRasterBand(i + 1).GetOverview(2).ComputeStatistics(True) for i in range(4)]\n out_ds = None\n gdal.GetDriverByName('GTiff').Delete('/vsimem/tiff_write_146.tif')\n\n for i in range(4):\n for j in range(4):\n assert i == 2 or j < 2 or abs(original_stats[i][j] - got_stats[i][j]) <= 5, \\\n 'did not get expected statistics'\n\n \n###############################################################################\n# Test that we don't use implicit JPEG-in-TIFF overviews with CMYK when converting\n# to RGBA\n\n\ndef test_tiff_write_147():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n gdal.SetConfigOption('GDAL_JPEG_TO_RGB', 'NO')\n gdal.SetConfigOption('GDAL_PAM_ENABLED', 'NO')\n gdal.Translate('/vsimem/tiff_write_147.tif', '../gdrivers/data/rgb_ntf_cmyk.jpg', options='-outsize 1000% 1000% -co COMPRESS=JPEG -co PHOTOMETRIC=CMYK')\n gdal.SetConfigOption('GDAL_JPEG_TO_RGB', None)\n gdal.SetConfigOption('GDAL_PAM_ENABLED', None)\n out_ds = gdal.Open('/vsimem/tiff_write_147.tif')\n assert out_ds.GetRasterBand(1).GetOverview(0) is None, 'did not expected overview'\n out_ds = None\n gdal.GetDriverByName('GTiff').Delete('/vsimem/tiff_write_147.tif')\n\n###############################################################################\n# Test that we can use implicit JPEG-in-TIFF overviews with CMYK in raw mode\n\n\ndef test_tiff_write_148():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n\n gdal.SetConfigOption('GDAL_JPEG_TO_RGB', 'NO')\n tmp_ds = gdal.Translate('', '../gdrivers/data/rgb_ntf_cmyk.jpg', format='MEM')\n gdal.SetConfigOption('GDAL_JPEG_TO_RGB', None)\n original_stats = [tmp_ds.GetRasterBand(i + 1).ComputeStatistics(True) for i in range(4)]\n gdal.SetConfigOption('GDAL_JPEG_TO_RGB', 'NO')\n gdal.SetConfigOption('GDAL_PAM_ENABLED', 'NO')\n gdal.Translate('/vsimem/tiff_write_148.tif', '../gdrivers/data/rgb_ntf_cmyk.jpg', options='-outsize 1000% 1000% -co COMPRESS=JPEG -co PHOTOMETRIC=CMYK')\n gdal.SetConfigOption('GDAL_JPEG_TO_RGB', None)\n gdal.SetConfigOption('GDAL_PAM_ENABLED', None)\n out_ds = gdal.Open('GTIFF_RAW:/vsimem/tiff_write_148.tif')\n got_stats = [out_ds.GetRasterBand(i + 1).GetOverview(0).ComputeStatistics(True) for i in range(4)]\n out_ds = None\n gdal.GetDriverByName('GTiff').Delete('/vsimem/tiff_write_148.tif')\n\n for i in range(4):\n for j in range(4):\n assert j < 2 or abs(original_stats[i][j] - got_stats[i][j]) <= 5, \\\n 'did not get expected statistics'\n\n \n###############################################################################\n# Test filling missing blocks with nodata\n\n\ndef test_tiff_write_149():\n\n # Power-of-two bit depth\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_149.tif', 1, 1)\n ds.GetRasterBand(1).SetNoDataValue(127)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_149.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n assert cs == 1\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_149.tif')\n\n # Test implicit blocks\n expected_cs = 13626\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_149.tif', 40, 30, 2, gdal.GDT_UInt16, options=['NBITS=12', 'TILED=YES', 'BLOCKXSIZE=16', 'BLOCKYSIZE=16', 'INTERLEAVE=BAND', 'SPARSE_OK=YES'])\n ds.GetRasterBand(1).SetNoDataValue(127)\n ds.GetRasterBand(2).SetNoDataValue(127)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_149.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n assert cs == expected_cs\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_149.tif')\n\n # NBITS=12, SEPARATE. Checksum must be the same as in the implicit blocks case\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_149.tif', 40, 30, 2, gdal.GDT_UInt16, options=['NBITS=12', 'TILED=YES', 'BLOCKXSIZE=16', 'BLOCKYSIZE=16', 'INTERLEAVE=BAND'])\n ds.GetRasterBand(1).SetNoDataValue(127)\n ds.GetRasterBand(2).SetNoDataValue(127)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_149.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n assert cs == expected_cs\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_149.tif')\n\n # NBITS=12, CONTIG. Checksum must be the same as in the implicit blocks case\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_149.tif', 40, 30, 2, gdal.GDT_UInt16, options=['NBITS=12', 'TILED=YES', 'BLOCKXSIZE=16', 'BLOCKYSIZE=16', 'INTERLEAVE=PIXEL'])\n ds.GetRasterBand(1).SetNoDataValue(127)\n ds.GetRasterBand(2).SetNoDataValue(127)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_149.tif')\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n assert cs == expected_cs\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_149.tif')\n\n###############################################################################\n# Test failure when loading block from disk in IWriteBlock()\n\n\ndef test_tiff_write_150():\n\n shutil.copy('data/tiled_bad_offset.tif', 'tmp/tiled_bad_offset.tif')\n ds = gdal.Open('tmp/tiled_bad_offset.tif', gdal.GA_Update)\n ds.GetRasterBand(1).Fill(0)\n gdal.ErrorReset()\n with gdaltest.error_handler():\n ds.FlushCache()\n assert gdal.GetLastErrorMsg() != ''\n ds = None\n gdaltest.tiff_drv.Delete('tmp/tiled_bad_offset.tif')\n\n###############################################################################\n# Test IWriteBlock() with more than 10 bands\n\n\ndef test_tiff_write_151():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_151.tif', 1, 1, 11)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_151.tif', gdal.GA_Update)\n ds.GetRasterBand(1).Fill(1)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_151.tif', gdal.GA_Update)\n ds.GetRasterBand(1).Checksum()\n ds.GetRasterBand(2).Fill(1)\n ds.GetRasterBand(3).Fill(1)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_151.tif')\n assert ds.GetRasterBand(1).Checksum() == 1\n assert ds.GetRasterBand(2).Checksum() == 1\n assert ds.GetRasterBand(3).Checksum() == 1\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_151.tif')\n\n###############################################################################\n# Test flushing of blocks in a contig multi band file with Create()\n\n\ndef test_tiff_write_152():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_152.tif', 1, 1, 2, options=['NBITS=2'])\n ds.GetRasterBand(2).SetNoDataValue(3)\n ds.GetRasterBand(2).Fill(1)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_152.tif')\n assert ds.GetRasterBand(1).Checksum() == 0\n assert ds.GetRasterBand(2).Checksum() == 1\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_152.tif')\n\n###############################################################################\n# Test that empty blocks are created in a filesystem sparse way\n\n\ndef test_tiff_write_153():\n\n target_dir = 'tmp'\n\n if gdal.VSISupportsSparseFiles(target_dir) == 0:\n pytest.skip()\n\n gdaltest.tiff_drv.Create(target_dir + '/tiff_write_153.tif', 500, 500)\n\n f = gdal.VSIFOpenL(target_dir + '/tiff_write_153.tif', 'rb')\n ret = gdal.VSIFGetRangeStatusL(f, 500 * 500, 1)\n gdal.VSIFCloseL(f)\n\n gdaltest.tiff_drv.Delete(target_dir + '/tiff_write_153.tif')\n\n assert ret != gdal.VSI_RANGE_STATUS_DATA\n\n###############################################################################\n# Test empty block writing skipping and SPARSE_OK in CreateCopy() and Open()\n\n\ndef test_tiff_write_154():\n\n import struct\n\n src_ds = gdal.GetDriverByName('MEM').Create('', 500, 500)\n\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_154.tif', src_ds, options=['BLOCKYSIZE=256'])\n ds.FlushCache()\n # At that point empty blocks have not yet been flushed\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 162\n ds = None\n # Now they are and that's done in a filesystem sparse way. TODO: check this\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 256162\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_154.tif')\n\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_154.tif', src_ds, options=['BLOCKYSIZE=256', 'COMPRESS=DEFLATE'])\n ds.FlushCache()\n # With compression, empty blocks are written right away\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 462\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 462\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_154.tif')\n\n # SPARSE_OK in CreateCopy(): blocks are not written\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_154.tif', src_ds, options=['SPARSE_OK=YES', 'BLOCKYSIZE=256'])\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 162\n # SPARSE_OK in Open()/update: blocks are not written\n ds = gdal.OpenEx('/vsimem/tiff_write_154.tif', gdal.OF_UPDATE, open_options=['SPARSE_OK=YES'])\n ds.GetRasterBand(1).Fill(0)\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 162\n ds = None\n # Default behaviour in Open()/update: blocks are written\n ds = gdal.OpenEx('/vsimem/tiff_write_154.tif', gdal.OF_UPDATE)\n ds.GetRasterBand(1).Fill(0)\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 250162\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_154.tif')\n\n # SPARSE_OK in CreateCopy() in compressed case (strips): blocks are not written\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_154.tif', src_ds, options=['SPARSE_OK=YES', 'BLOCKYSIZE=256', 'COMPRESS=DEFLATE'])\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 174\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_154.tif')\n\n # SPARSE_OK in CreateCopy() in compressed case (tiling): blocks are not written\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_154.tif', src_ds, options=['SPARSE_OK=YES', 'TILED=YES'])\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 190\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_154.tif')\n\n # Test detection of 0 blocks for all data types\n for dt in ['signedbyte', gdal.GDT_Int16, gdal.GDT_UInt16,\n gdal.GDT_Int32, gdal.GDT_UInt32,\n gdal.GDT_Float32, gdal.GDT_Float64]:\n # SPARSE_OK in CreateCopy(): blocks are not written\n if dt == 'signedbyte':\n src_ds = gdal.GetDriverByName('MEM').Create('', 500, 500, 1, gdal.GDT_Byte)\n options = ['SPARSE_OK=YES', 'BLOCKYSIZE=256', 'PIXELTYPE=SIGNEDBYTE']\n else:\n src_ds = gdal.GetDriverByName('MEM').Create('', 500, 500, 1, dt)\n options = ['SPARSE_OK=YES', 'BLOCKYSIZE=256']\n gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_154.tif', src_ds, options=options)\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 162, dt\n\n # Test detection of nodata blocks with nodata != 0 for all data types\n for dt in ['signedbyte', gdal.GDT_Int16, gdal.GDT_UInt16,\n gdal.GDT_Int32, gdal.GDT_UInt32,\n gdal.GDT_Float32, gdal.GDT_Float64]:\n # SPARSE_OK in CreateCopy(): blocks are not written\n if dt == 'signedbyte':\n src_ds = gdal.GetDriverByName('MEM').Create('', 500, 500, 1, gdal.GDT_Byte)\n options = ['SPARSE_OK=YES', 'BLOCKYSIZE=256', 'PIXELTYPE=SIGNEDBYTE']\n else:\n src_ds = gdal.GetDriverByName('MEM').Create('', 500, 500, 1, dt)\n options = ['SPARSE_OK=YES', 'BLOCKYSIZE=256']\n src_ds.GetRasterBand(1).Fill(1)\n src_ds.GetRasterBand(1).SetNoDataValue(1)\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_154.tif', src_ds, options=options)\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 174, dt\n\n # Test optimized detection when nodata==0, and with the last pixel != 0\n src_ds = gdal.GetDriverByName('MEM').Create('', 100, 1, 1)\n src_ds.GetRasterBand(1).Fill(0)\n src_ds.GetRasterBand(1).WriteRaster(99, 0, 1, 1, struct.pack('B' * 1, 1))\n gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_154.tif', src_ds, options=['SPARSE_OK=YES'])\n assert gdal.VSIStatL('/vsimem/tiff_write_154.tif').size == 246\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_154.tif')\n\n # Test that setting nodata doesn't prevent blocks to be written (#6706)\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_154.tif', 1, 100, 1)\n ds.GetRasterBand(1).SetNoDataValue(1)\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_154.tif')\n offset = ds.GetRasterBand(1).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF')\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_154.tif')\n assert not (offset is None or int(offset) == 0)\n\n###############################################################################\n# Test reading and writing band description\n\n\ndef test_tiff_write_155():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_155.tif', 1, 1)\n ds.GetRasterBand(1).SetDescription('foo')\n ds = None\n\n assert gdal.VSIStatL('/vsimem/tiff_write_155.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/tiff_write_155.tif')\n assert ds.GetRasterBand(1).GetDescription() == 'foo'\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_155.tif')\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_155.tif', 1, 1, options=['PROFILE=GeoTIFF'])\n ds.GetRasterBand(1).SetDescription('foo')\n ds = None\n\n assert gdal.VSIStatL('/vsimem/tiff_write_155.tif.aux.xml') is not None\n\n ds = gdal.Open('/vsimem/tiff_write_155.tif')\n assert ds.GetRasterBand(1).GetDescription() == 'foo'\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_155.tif')\n\n###############################################################################\n# Test GetDataCoverageStatus()\n\n\ndef test_tiff_write_156():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_156.tif', 64, 64, options=['SPARSE_OK=YES', 'TILED=YES', 'BLOCKXSIZE=32', 'BLOCKYSIZE=32'])\n ds.GetRasterBand(1).WriteRaster(0, 0, 1, 1, 'X')\n\n (flags, pct) = ds.GetRasterBand(1).GetDataCoverageStatus(0, 0, 32, 32)\n assert flags == gdal.GDAL_DATA_COVERAGE_STATUS_DATA and pct == 100.0\n\n (flags, pct) = ds.GetRasterBand(1).GetDataCoverageStatus(32, 0, 32, 32)\n assert flags == gdal.GDAL_DATA_COVERAGE_STATUS_EMPTY and pct == 0.0\n\n (flags, pct) = ds.GetRasterBand(1).GetDataCoverageStatus(16, 16, 32, 32)\n assert flags == gdal.GDAL_DATA_COVERAGE_STATUS_DATA | gdal.GDAL_DATA_COVERAGE_STATUS_EMPTY and pct == 25.0\n\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_156.tif')\n\n # Test fix for #6703\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_156.tif', 1, 512, options=['SPARSE_OK=YES', 'BLOCKYSIZE=1'])\n ds.GetRasterBand(1).WriteRaster(0, 100, 1, 1, 'X')\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_156.tif')\n flags, _ = ds.GetRasterBand(1).GetDataCoverageStatus(0, 100, 1, 1)\n assert flags == gdal.GDAL_DATA_COVERAGE_STATUS_DATA\n ds = None\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_156.tif')\n\n###############################################################################\n# Test Float16\n\n\ndef test_tiff_write_157():\n\n import struct\n\n # Write controlled values of Float16\n vals = struct.pack('H' * 14,\n 0x0000, # Positive zero\n 0x8000, # Negative zero\n 0x7C00, # Positive infinity\n 0xFC00, # Negative infinity\n 0x7E00, # Some positive quiet NaN\n 0xFE00, # Some negative quiet NaN\n 0x3D00, # 1.25\n 0xBD00, # -1.25\n 0x0001, # Smallest positive denormalized value\n 0x8001, # Smallest negative denormalized value\n 0x03FF, # Largest positive denormalized value\n 0x83FF, # Largest negative denormalized value\n 0x0400, # Smallest positive normalized value\n 0x8400, # Smallest negative normalized value\n )\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_157.tif', 14, 1, 1, gdal.GDT_Float32, options=['NBITS=16'])\n ds = None\n ds = gdal.Open('/vsimem/tiff_write_157.tif')\n offset = int(ds.GetRasterBand(1).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF'))\n ds = None\n\n f = gdal.VSIFOpenL('/vsimem/tiff_write_157.tif', 'rb+')\n gdal.VSIFSeekL(f, offset, 0)\n gdal.VSIFWriteL(vals, 1, len(vals), f)\n gdal.VSIFCloseL(f)\n\n # Check that we properly deserialize Float16 values\n ds = gdal.Open('/vsimem/tiff_write_157.tif')\n assert ds.GetRasterBand(1).GetMetadataItem('NBITS', 'IMAGE_STRUCTURE') == '16'\n got = struct.unpack('f' * 14, ds.ReadRaster())\n expected = [0.0, -0.0, gdaltest.posinf(), -gdaltest.posinf(), gdaltest.NaN(), gdaltest.NaN(), 1.25, -1.25, 5.9604644775390625e-08, -5.9604644775390625e-08, 6.0975551605224609e-05, -6.0975551605224609e-05, 6.103515625e-05, -6.103515625e-05]\n for i in range(14):\n if i == 4 or i == 5:\n assert got[i] != got[i]\n elif abs(got[i] - expected[i]) > 1e-15:\n print(got[i])\n print(expected[i])\n pytest.fail(i)\n\n # Check that we properly decode&re-encode Float16 values\n gdal.Translate('/vsimem/tiff_write_157_dst.tif', ds)\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_157_dst.tif')\n offset = int(ds.GetRasterBand(1).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF'))\n ds = None\n\n f = gdal.VSIFOpenL('/vsimem/tiff_write_157_dst.tif', 'rb')\n gdal.VSIFSeekL(f, offset, 0)\n vals_copied = gdal.VSIFReadL(1, 14 * 2, f)\n gdal.VSIFCloseL(f)\n\n if vals != vals_copied:\n print(struct.unpack('H' * 14, vals))\n pytest.fail(struct.unpack('H' * 14, vals_copied))\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_157.tif')\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_157_dst.tif')\n\n # Now try Float32 -> Float16 conversion\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_157.tif', 18, 1, 1, gdal.GDT_Float32, options=['NBITS=16'])\n vals = struct.pack('I' * 18,\n 0x00000000, # Positive zero\n 0x80000000, # Negative zero\n 0x7f800000, # Positive infinity\n 0xff800000, # Negative infinity\n 0x7fc00000, # Some positive quiet NaN\n 0xffc00000, # Some negative quiet NaN\n 0x7f800001, # Some positive signaling NaN with significant that will get lost\n 0xff800001, # Some negative signaling NaN with significant that will get lost\n 0x3fa00000, # 1.25\n 0xbfa00000, # -1.25\n 0x00000001, # Smallest positive denormalized value\n 0x80000001, # Smallest negative denormalized value\n 0x007fffff, # Largest positive denormalized value\n 0x807fffff, # Largest negative denormalized value\n 0x00800000, # Smallest positive normalized value\n 0x80800000, # Smallest negative normalized value\n 0x33800000, # 5.9604644775390625e-08 = Smallest number that can be converted as a float16 denormalized value\n 0x47800000, # 65536 --> converted to infinity\n )\n ds.GetRasterBand(1).WriteRaster(0, 0, 18, 1, vals, buf_type=gdal.GDT_Float32)\n with gdaltest.error_handler():\n ds.FlushCache()\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_157.tif')\n got = struct.unpack('f' * 18, ds.ReadRaster())\n ds = None\n expected = (0.0, -0.0, gdaltest.posinf(), -gdaltest.posinf(),\n gdaltest.NaN(), gdaltest.NaN(), gdaltest.NaN(), gdaltest.NaN(),\n 1.25, -1.25, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 5.9604644775390625e-08, gdaltest.posinf())\n for i in range(18):\n if i == 4 or i == 5:\n assert got[i] != got[i]\n elif abs(got[i] - expected[i]) > 1e-15:\n print(got[i])\n print(expected[i])\n pytest.fail(i)\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_157.tif')\n\n # Test pixel interleaved\n gdal.Translate('/vsimem/tiff_write_157.tif', '../gdrivers/data/small_world.tif', options='-co NBITS=16 -ot Float32')\n ds = gdal.Open('/vsimem/tiff_write_157.tif')\n cs = ds.GetRasterBand(1).Checksum()\n assert cs == 30111\n cs = ds.GetRasterBand(2).Checksum()\n assert cs == 32302\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_157.tif')\n\n###############################################################################\n# Test GetActualBlockSize() (perhaps not the best place for that...)\n\n\ndef test_tiff_write_158():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_158.tif', 20, 40, 1, options=['TILED=YES', 'BLOCKXSIZE=16', 'BLOCKYSIZE=32'])\n (w, h) = ds.GetRasterBand(1).GetActualBlockSize(0, 0)\n assert (w, h) == (16, 32)\n (w, h) = ds.GetRasterBand(1).GetActualBlockSize(1, 1)\n assert (w, h) == (4, 8)\n res = ds.GetRasterBand(1).GetActualBlockSize(2, 0)\n assert res is None\n res = ds.GetRasterBand(1).GetActualBlockSize(0, 2)\n assert res is None\n res = ds.GetRasterBand(1).GetActualBlockSize(-1, 0)\n assert res is None\n res = ds.GetRasterBand(1).GetActualBlockSize(0, -1)\n assert res is None\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_158.tif')\n\n###############################################################################\n# Test that COPY_SRC_OVERVIEWS creation option with JPEG compression\n# result in a https://trac.osgeo.org/gdal/wiki/CloudOptimizedGeoTIFF\n\n\ndef test_tiff_write_159():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('JPEG') == -1:\n pytest.skip()\n if md['DMD_CREATIONOPTIONLIST'].find('BIGTIFF') == -1:\n pytest.skip()\n\n prev_table = ''\n for options in [[], ['JPEG_QUALITY=50'], ['PHOTOMETRIC=YCBCR']]:\n\n src_ds = gdal.Translate('', '../gdrivers/data/small_world.tif', format='MEM')\n src_ds.BuildOverviews('NEAR', overviewlist=[2, 4])\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_159.tif', src_ds,\n options=['COPY_SRC_OVERVIEWS=YES', 'COMPRESS=JPEG'] + options)\n ds = None\n src_ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_159.tif')\n cs0 = ds.GetRasterBand(1).Checksum()\n cs1 = ds.GetRasterBand(1).GetOverview(0).Checksum()\n cs2 = ds.GetRasterBand(1).GetOverview(1).Checksum()\n assert not (cs0 == 0 or cs1 == 0 or cs2 == 0), options\n ifd_main = int(ds.GetRasterBand(1).GetMetadataItem('IFD_OFFSET', 'TIFF'))\n ifd_ovr_0 = int(ds.GetRasterBand(1).GetOverview(0).GetMetadataItem('IFD_OFFSET', 'TIFF'))\n ifd_ovr_1 = int(ds.GetRasterBand(1).GetOverview(1).GetMetadataItem('IFD_OFFSET', 'TIFF'))\n data_ovr_1 = int(ds.GetRasterBand(1).GetOverview(1).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF'))\n data_ovr_0 = int(ds.GetRasterBand(1).GetOverview(0).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF'))\n data_main = int(ds.GetRasterBand(1).GetMetadataItem('BLOCK_OFFSET_0_0', 'TIFF'))\n assert (ifd_main < ifd_ovr_0 and ifd_ovr_0 < ifd_ovr_1 and ifd_ovr_1 < data_ovr_1 and data_ovr_1 < data_ovr_0 and data_ovr_0 < data_main), \\\n options\n table_main = ds.GetRasterBand(1).GetMetadataItem('JPEGTABLES', 'TIFF')\n table_ovr_0 = ds.GetRasterBand(1).GetOverview(0).GetMetadataItem('JPEGTABLES', 'TIFF')\n table_ovr_1 = ds.GetRasterBand(1).GetOverview(1).GetMetadataItem('JPEGTABLES', 'TIFF')\n assert table_main == table_ovr_0 and table_ovr_0 == table_ovr_1, options\n # Check that the JPEG tables are different in the 3 modes\n assert table_main != prev_table, options\n prev_table = table_main\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_159.tif')\n\n for value in range(4):\n\n src_ds = gdal.Translate('', 'data/byte.tif', format='MEM')\n src_ds.BuildOverviews('NEAR', overviewlist=[2])\n ds = gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_159.tif', src_ds,\n options=['COPY_SRC_OVERVIEWS=YES', 'COMPRESS=JPEG', 'JPEGTABLESMODE=%d' % value])\n ds = None\n src_ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_159.tif')\n cs0 = ds.GetRasterBand(1).Checksum()\n cs1 = ds.GetRasterBand(1).GetOverview(0).Checksum()\n assert cs0 == 4743 and cs1 == 1133, value\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_159.tif')\n\n \n\n###############################################################################\n# Test the Create() interface with a BLOCKYSIZE > image height\n\ndef test_tiff_write_160():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_160.tif', 10, 10, options=['BLOCKYSIZE=11'])\n ds.GetRasterBand(1).Fill(255)\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_160.tif')\n cs = ds.GetRasterBand(1).Checksum()\n assert cs == 1218\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_160.tif')\n\n###############################################################################\n# Test setting GCPs on an image with already a geotransform and vice-versa (#6751)\n\n\ndef test_tiff_write_161():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/tiff_write_161.tif', 1, 1)\n ds.SetGeoTransform([0, 1, 2, 3, 4, 5])\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_161.tif', gdal.GA_Update)\n src_ds = gdal.Open('data/gcps.vrt')\n with gdaltest.error_handler():\n assert ds.SetGCPs(src_ds.GetGCPs(), '') == 0\n assert ds.GetGeoTransform(can_return_null=True) is None\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_161.tif', gdal.GA_Update)\n assert ds.GetGCPs()\n assert ds.GetGeoTransform(can_return_null=True) is None\n with gdaltest.error_handler():\n assert ds.SetGeoTransform([0, 1, 2, 3, 4, 5]) == 0\n assert ds.GetGeoTransform() == (0.0, 1.0, 2.0, 3.0, 4.0, 5.0)\n assert not ds.GetGCPs()\n ds = None\n\n ds = gdal.Open('/vsimem/tiff_write_161.tif', gdal.GA_Update)\n assert not ds.GetGCPs()\n assert ds.GetGeoTransform() == (0.0, 1.0, 2.0, 3.0, 4.0, 5.0)\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_161.tif')\n\n###############################################################################\n# Test creating a JPEG compressed file with big tiles (#6757)\n\n\ndef test_tiff_write_162():\n\n src_ds = gdal.GetDriverByName('MEM').Create('', 512, 512, 3)\n\n options = ['TILED=YES', 'BLOCKXSIZE=512', 'BLOCKYSIZE=512', 'COMPRESS=JPEG']\n\n gdaltest.tiff_drv.CreateCopy('/vsimem/tiff_write_162.tif', src_ds,\n options=options)\n\n assert gdal.GetLastErrorMsg() == ''\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_162.tif')\n\n###############################################################################\n# Test creating a file that would trigger strip chopping (#6924)\n\n\ndef test_tiff_write_163():\n\n # Was a libtiff 4.0.8 regression\n if gdaltest.tiff_drv.GetMetadataItem('LIBTIFF').find('4.0.8') >= 0:\n pytest.skip('Test broken with libtiff 4.0.8')\n\n gdal.Translate('/vsimem/tiff_write_163.tif', 'data/byte.tif',\n options='-outsize 1 20000 -co BLOCKYSIZE=20000 -co PROFILE=BASELINE')\n ds = gdal.Open('/vsimem/tiff_write_163.tif')\n cs = ds.GetRasterBand(1).Checksum()\n assert cs == 47567\n # Check that IsBlockAvailable() works properly in that mode\n offset_0_2 = ds.GetRasterBand(1).GetMetadataItem('BLOCK_OFFSET_0_2', 'TIFF')\n assert offset_0_2 == str(146 + 2 * 8192)\n ds = None\n\n gdaltest.tiff_drv.Delete('/vsimem/tiff_write_163.tif')\n\n###############################################################################\n# Test that we handle [0,1,0,0,0,1] geotransform as a regular geotransform\n\n\ndef test_tiff_write_164():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/test.tif', 1, 1)\n ds.SetGeoTransform([0, 1, 0, 0, 0, 1])\n ds = None\n\n ds = gdal.Open('/vsimem/test.tif')\n gt = ds.GetGeoTransform(can_return_null=True)\n ds = None\n\n assert gt == (0, 1, 0, 0, 0, 1)\n\n # Test [0,1,0,0,0,-1] as well\n ds = gdaltest.tiff_drv.Create('/vsimem/test.tif', 1, 1)\n ds.SetGeoTransform([0, 1, 0, 0, 0, -1])\n ds = None\n\n ds = gdal.Open('/vsimem/test.tif')\n gt = ds.GetGeoTransform(can_return_null=True)\n ds = None\n\n assert gt == (0, 1, 0, 0, 0, -1)\n\n gdal.Unlink('/vsimem/test.tif')\n\n###############################################################################\n# Test the current behaviour of per-band nodata vs per-dataset serialization\n\n\ndef test_tiff_write_165():\n\n ds = gdaltest.tiff_drv.Create('/vsimem/test.tif', 1, 1, 3)\n ret = ds.GetRasterBand(1).SetNoDataValue(100)\n assert ret == 0\n\n with gdaltest.error_handler():\n ret = ds.GetRasterBand(2).SetNoDataValue(200)\n assert gdal.GetLastErrorMsg() != '', 'warning expected, but not emitted'\n assert ret == 0\n\n nd = ds.GetRasterBand(1).GetNoDataValue()\n assert nd == 100\n\n nd = ds.GetRasterBand(2).GetNoDataValue()\n assert nd == 200\n\n ds = None\n\n ds = gdal.Open('/vsimem/test.tif')\n nd = ds.GetRasterBand(1).GetNoDataValue()\n ds = None\n\n assert nd == 200\n\n gdal.Unlink('/vsimem/test.tif')\n\n###############################################################################\n# Test reading & writing Z dimension for ModelTiepointTag and ModelPixelScaleTag (#7093)\n\n\ndef test_tiff_write_166():\n\n ds = gdal.Open('data/tiff_vertcs_scale_offset.tif')\n assert ds.GetRasterBand(1).GetScale() == 2.0\n\n assert ds.GetRasterBand(1).GetOffset() == 10.0\n\n # Scale + offset through CreateCopy()\n gdal.Translate('/vsimem/tiff_write_166.tif', 'data/byte.tif',\n options='-a_srs EPSG:26711+5773 -a_scale 2.0 -a_offset 10 -co PROFILE=GEOTIFF')\n assert gdal.VSIStatL('/vsimem/tiff_write_166.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/tiff_write_166.tif')\n assert ds.GetRasterBand(1).GetScale() == 2.0\n\n assert ds.GetRasterBand(1).GetOffset() == 10.0\n ds = None\n gdal.Unlink('/vsimem/tiff_write_166.tif')\n\n # Offset only through CreateCopy()\n gdal.Translate('/vsimem/tiff_write_166.tif', 'data/byte.tif',\n options='-a_srs EPSG:26711+5773 -a_offset 10 -co PROFILE=GEOTIFF')\n assert gdal.VSIStatL('/vsimem/tiff_write_166.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/tiff_write_166.tif')\n assert ds.GetRasterBand(1).GetScale() == 1.0\n\n assert ds.GetRasterBand(1).GetOffset() == 10.0\n ds = None\n gdal.Unlink('/vsimem/tiff_write_166.tif')\n\n # Scale + offset through Create()\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_166.tif', 1, 1, options=['PROFILE=GEOTIFF'])\n sr = osr.SpatialReference()\n sr.SetFromUserInput('EPSG:26711+5773')\n ds.SetProjection(sr.ExportToWkt())\n ds.SetGeoTransform([440720, 60, 0, 3751320, 0, -60])\n ds.GetRasterBand(1).SetScale(2)\n ds.GetRasterBand(1).SetOffset(10)\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_166.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/tiff_write_166.tif')\n assert ds.GetRasterBand(1).GetScale() == 2.0\n assert ds.GetRasterBand(1).GetOffset() == 10.0\n ds = None\n gdal.Unlink('/vsimem/tiff_write_166.tif')\n\n # Scale only through Create()\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_166.tif', 1, 1, options=['PROFILE=GEOTIFF'])\n sr = osr.SpatialReference()\n sr.SetFromUserInput('EPSG:26711+5773')\n ds.SetProjection(sr.ExportToWkt())\n ds.SetGeoTransform([440720, 60, 0, 3751320, 0, -60])\n ds.GetRasterBand(1).SetScale(2)\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_166.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/tiff_write_166.tif')\n assert ds.GetRasterBand(1).GetScale() == 2.0\n assert ds.GetRasterBand(1).GetOffset() == 0.0\n ds = None\n gdal.Unlink('/vsimem/tiff_write_166.tif')\n\n # Offset only through through Create()\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/tiff_write_166.tif', 1, 1, options=['PROFILE=GEOTIFF'])\n sr = osr.SpatialReference()\n sr.SetFromUserInput('EPSG:26711+5773')\n ds.SetProjection(sr.ExportToWkt())\n ds.SetGeoTransform([440720, 60, 0, 3751320, 0, -60])\n ds.GetRasterBand(1).SetOffset(10)\n ds = None\n assert gdal.VSIStatL('/vsimem/tiff_write_166.tif.aux.xml') is None\n\n ds = gdal.Open('/vsimem/tiff_write_166.tif')\n assert ds.GetRasterBand(1).GetScale() == 1.0\n assert ds.GetRasterBand(1).GetOffset() == 10.0\n ds = None\n gdal.Unlink('/vsimem/tiff_write_166.tif')\n\n###############################################################################\n\n\ndef test_tiff_write_167_deflate_zlevel():\n\n src_ds = gdal.Open('data/byte.tif')\n gdal.GetDriverByName('GTiff').CreateCopy('/vsimem/out.tif', src_ds,\n options=['COMPRESS=DEFLATE',\n 'ZLEVEL=1'])\n size1 = gdal.VSIStatL('/vsimem/out.tif').size\n\n gdal.GetDriverByName('GTiff').CreateCopy('/vsimem/out.tif', src_ds,\n options=['COMPRESS=DEFLATE',\n 'NUM_THREADS=2',\n 'ZLEVEL=9'])\n size2 = gdal.VSIStatL('/vsimem/out.tif').size\n gdal.Unlink('/vsimem/out.tif')\n\n assert size2 < size1\n\n ds = gdal.GetDriverByName('GTiff').Create('/vsimem/out.tif', 20, 20, 1,\n options=['COMPRESS=DEFLATE',\n 'ZLEVEL=9'])\n ds.SetProjection(src_ds.GetProjectionRef())\n ds.SetGeoTransform(src_ds.GetGeoTransform())\n ds.WriteRaster(0, 0, 20, 20, src_ds.ReadRaster())\n ds = None\n\n size2_create = gdal.VSIStatL('/vsimem/out.tif').size\n gdal.Unlink('/vsimem/out.tif')\n\n assert size2 == size2_create\n\n###############################################################################\n# Test CCITTFAX3\n\n\ndef test_tiff_write_168_ccitfax3():\n\n ut = gdaltest.GDALTest('GTiff', 'oddsize1bit.tif', 1, 5918,\n options=['NBITS=1', 'COMPRESS=CCITTFAX3'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test CCITTRLE\n\n\ndef test_tiff_write_169_ccitrle():\n\n ut = gdaltest.GDALTest('GTiff', 'oddsize1bit.tif', 1, 5918,\n options=['NBITS=1', 'COMPRESS=CCITTRLE'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test invalid compression method\n\n\ndef test_tiff_write_170_invalid_compresion():\n\n src_ds = gdal.Open('data/byte.tif')\n with gdaltest.error_handler():\n gdal.GetDriverByName('GTiff').CreateCopy('/vsimem/out.tif', src_ds,\n options=['COMPRESS=INVALID'])\n assert gdal.GetLastErrorMsg() != ''\n gdal.Unlink('/vsimem/out.tif')\n\n###############################################################################\n# Test ZSTD compression\n\n\ndef test_tiff_write_171_zstd():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('ZSTD') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672,\n options=['COMPRESS=ZSTD', 'ZSTD_LEVEL=1'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test ZSTD compression with PREDICTOR = 2\n\n\ndef test_tiff_write_171_zstd_predictor():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('ZSTD') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672,\n options=['COMPRESS=ZSTD', 'ZSTD_LEVEL=1', 'PREDICTOR=2'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test WEBP compression\n\n\ndef test_tiff_write_webp():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('WEBP') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'md_ge_rgb_0010000.tif', 0, None,\n options=['COMPRESS=WEBP'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test WEBP compression with internal tiling\n\n\ndef test_tiff_write_tiled_webp():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('WEBP') == -1:\n pytest.skip()\n\n if md['DMD_CREATIONOPTIONLIST'].find('WEBP_LOSSLESS') == -1:\n pytest.skip()\n\n filename = '/vsimem/tiff_write_tiled_webp.tif'\n src_ds = gdal.Open('data/md_ge_rgb_0010000.tif')\n gdaltest.tiff_drv.CreateCopy(filename, src_ds,\n options=['COMPRESS=WEBP',\n 'WEBP_LOSSLESS=true',\n 'TILED=true'])\n ds = gdal.Open(filename)\n cs = [ds.GetRasterBand(i+1).Checksum() for i in range(3)]\n assert cs == [21212, 21053, 21349]\n\n gdaltest.tiff_drv.Delete(filename)\n gdal.Unlink('data/md_ge_rgb_0010000.tif.aux.xml')\n\n###############################################################################\n# Test WEBP compression with huge single strip\n\n\ndef test_tiff_write_webp_huge_single_strip():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('WEBP') == -1:\n pytest.skip()\n\n filename = '/vsimem/tif_webp_huge_single_strip.tif'\n src_ds = gdal.Open('data/tif_webp_huge_single_strip.tif')\n gdaltest.tiff_drv.CreateCopy(filename, src_ds,\n options=['COMPRESS=WEBP',\n 'BLOCKYSIZE=2001'])\n ds = gdal.Open(filename)\n original_stats = [src_ds.GetRasterBand(i + 1).ComputeStatistics(True) for i in range(3)]\n got_stats = [ds.GetRasterBand(i + 1).ComputeStatistics(True) for i in range(3)]\n ds = None\n src_ds = None\n\n for i in range(3):\n for j in range(4):\n assert abs(original_stats[i][j] - got_stats[i][j]) <= 1e-1 * abs(original_stats[i][j]), \\\n 'did not get expected statistics'\n\n gdaltest.tiff_drv.Delete(filename)\n gdal.Unlink('data/tif_webp_huge_single_strip.tif.aux.xml')\n\n\n###############################################################################\n# GeoTIFF DGIWG tags\n\n\ndef test_tiff_write_172_geometadata_tiff_rsid():\n\n tmpfilename = '/vsimem/tiff_write_172_geometadata_tiff_rsid.tiff'\n ds = gdal.GetDriverByName('GTiff').Create(tmpfilename, 1, 1)\n ds.SetMetadataItem('GEO_METADATA', 'foo')\n ds.SetMetadataItem('TIFF_RSID', 'bar')\n ds = None\n\n ds = gdal.Open(tmpfilename, gdal.GA_Update)\n assert ds.GetMetadataItem('GEO_METADATA') == 'foo', ds.GetMetadata()\n assert ds.GetMetadataItem('TIFF_RSID') == 'bar', ds.GetMetadata()\n ds.SetMetadata({})\n ds = None\n\n ds = gdal.Open(tmpfilename)\n assert ds.GetMetadataItem('GEO_METADATA') is None, ds.GetMetadata()\n assert ds.GetMetadataItem('TIFF_RSID') is None, ds.GetMetadata()\n ds = None\n\n gdal.Unlink(tmpfilename)\n\n###############################################################################\n# Test LERC compression\n\n\ndef test_tiff_write_173_lerc():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672,\n options=['COMPRESS=LERC'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test LERC_DEFLATE compression\n\n\ndef test_tiff_write_174_lerc_deflate():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC_DEFLATE') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672,\n options=['COMPRESS=LERC_DEFLATE'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test LERC_DEFLATE compression\n\n\ndef test_tiff_write_174_lerc_deflate_with_level():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC_DEFLATE') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672,\n options=['COMPRESS=LERC_DEFLATE', 'ZLEVEL=1'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test LERC_ZSTD compression\n\n\ndef test_tiff_write_175_lerc_zstd():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC_ZSTD') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672,\n options=['COMPRESS=LERC_ZSTD'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test LERC_ZSTD compression\n\n\ndef test_tiff_write_175_lerc_zstd_with_level():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC_ZSTD') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4672,\n options=['COMPRESS=LERC_ZSTD', 'ZSTD_LEVEL=1'])\n return ut.testCreateCopy()\n\n###############################################################################\n# Test LERC compression with MAX_Z_ERROR\n\n\ndef test_tiff_write_176_lerc_max_z_error():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC') == -1:\n pytest.skip()\n\n ut = gdaltest.GDALTest('GTiff', 'byte.tif', 1, 4529,\n options=['COMPRESS=LERC', 'MAX_Z_ERROR=1'])\n return ut.testCreateCopy(skip_preclose_test=1)\n\n###############################################################################\n# Test LERC compression with several bands and tiling\n\n\ndef test_tiff_write_177_lerc_several_bands_tiling():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC') == -1:\n pytest.skip()\n\n filename = '/vsimem/tiff_write_177_lerc_several_bands_tiling.tif'\n gdal.Translate(filename, '../gdrivers/data/small_world.tif',\n creationOptions=['COMPRESS=LERC', 'TILED=YES'])\n ds = gdal.Open(filename)\n cs = [ds.GetRasterBand(i+1).Checksum() for i in range(3)]\n ds = None\n gdal.Unlink(filename)\n assert cs == [30111, 32302, 40026]\n\n###############################################################################\n# Test LERC compression with alpha band\n\n\ndef test_tiff_write_178_lerc_with_alpha():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC') == -1:\n pytest.skip()\n\n filename = '/vsimem/tiff_write_178_lerc_with_alpha.tif'\n gdal.Translate(filename, 'data/stefan_full_rgba.tif',\n creationOptions=['COMPRESS=LERC'])\n ds = gdal.Open(filename)\n cs = [ds.GetRasterBand(i+1).Checksum() for i in range(4)]\n ds = None\n gdal.Unlink(filename)\n assert cs == [12603, 58561, 36064, 10807]\n\n###############################################################################\n# Test LERC compression with alpha band with only 0 and 255\n\n\ndef test_tiff_write_178_lerc_with_alpha_0_and_255():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC') == -1:\n pytest.skip()\n\n filename = '/vsimem/tiff_write_178_lerc_with_alpha_0_and_255.tif'\n gdal.Translate(filename, 'data/rgba_with_alpha_0_and_255.tif',\n creationOptions=['COMPRESS=LERC'])\n ds = gdal.Open(filename)\n cs = [ds.GetRasterBand(i+1).Checksum() for i in range(4)]\n ds = None\n gdal.Unlink(filename)\n assert cs == [13, 13, 13, 13]\n\n###############################################################################\n# Test LERC compression with different data types\n\n\ndef test_tiff_write_179_lerc_data_types():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC') == -1:\n pytest.skip()\n\n filename = '/vsimem/tiff_write_179_lerc_data_types.tif'\n for src_filename in ['uint16.tif', 'int16.tif', 'uint32.tif', 'int32.tif',\n 'float32.tif', 'float64.tif']:\n gdal.Translate(filename, 'data/' + src_filename,\n creationOptions=['COMPRESS=LERC'])\n ds = gdal.Open(filename)\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n gdal.Unlink(filename)\n assert cs == 4672\n\n filename_tmp = filename + \".tmp.tif\"\n gdal.Translate(filename_tmp, 'data/byte.tif',\n creationOptions=['PIXELTYPE=SIGNEDBYTE'])\n gdal.Translate(filename, filename_tmp, creationOptions=['COMPRESS=LERC'])\n gdal.Unlink(filename_tmp)\n ds = gdal.Open(filename)\n cs = ds.GetRasterBand(1).Checksum()\n ds = None\n gdal.Unlink(filename)\n assert cs == 4672\n\n gdal.ErrorReset()\n with gdaltest.error_handler():\n gdal.Translate(filename, 'data/cfloat32.tif', creationOptions=['COMPRESS=LERC'])\n assert gdal.GetLastErrorMsg() != ''\n gdal.Unlink(filename)\n\n###############################################################################\n# Test LERC compression with several bands and separate\n\n\ndef test_tiff_write_180_lerc_separate():\n\n md = gdaltest.tiff_drv.GetMetadata()\n if md['DMD_CREATIONOPTIONLIST'].find('LERC') == -1:\n pytest.skip()\n\n filename = '/vsimem/tiff_write_180_lerc_separate.tif'\n gdal.Translate(filename, '../gdrivers/data/small_world.tif',\n creationOptions=['COMPRESS=LERC', 'INTERLEAVE=BAND'])\n ds = gdal.Open(filename)\n cs = [ds.GetRasterBand(i+1).Checksum() for i in range(3)]\n ds = None\n gdal.Unlink(filename)\n assert cs == [30111, 32302, 40026]\n\n###############################################################################\n# Test set XMP metadata\n\n\ndef test_tiff_write_181_xmp():\n\n src_ds = gdal.Open('data/utmsmall.tif')\n\n new_ds = gdaltest.tiff_drv.CreateCopy('tmp/test_181.tif', src_ds)\n src_ds = None\n\n xmp_ds = gdal.Open('../gdrivers/data/byte_with_xmp.tif')\n xmp = xmp_ds.GetMetadata('xml:XMP')\n xmp_ds = None\n assert 'W5M0MpCehiHzreSzNTczkc9d' in xmp[0], 'Wrong input file without XMP'\n\n new_ds.SetMetadata(xmp, 'xml:XMP')\n new_ds = None\n\n # hopefully it's closed now!\n\n new_ds = gdal.Open('tmp/test_181.tif')\n read_xmp = new_ds.GetMetadata('xml:XMP')\n assert read_xmp and 'W5M0MpCehiHzreSzNTczkc9d' in read_xmp[0], \\\n 'No XMP data written in output file'\n new_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_181.tif')\n\n###############################################################################\n# Test delete XMP from a dataset\n\n\ndef test_tiff_write_182_xmp_delete():\n\n shutil.copyfile('../gdrivers/data/byte_with_xmp.tif', 'tmp/test_182.tif')\n\n chg_ds = gdal.Open('tmp/test_182.tif', gdal.GA_Update)\n read_xmp = chg_ds.GetMetadata('xml:XMP')\n assert read_xmp and 'W5M0MpCehiHzreSzNTczkc9d' in read_xmp[0], \\\n 'No XMP data written in output file'\n chg_ds.SetMetadata(None, 'xml:XMP')\n chg_ds = None\n\n again_ds = gdal.Open('tmp/test_182.tif')\n read_xmp = again_ds.GetMetadata('xml:XMP')\n assert not read_xmp, 'XMP data not removed'\n again_ds = None\n\n gdaltest.tiff_drv.Delete('tmp/test_182.tif')\n\n###############################################################################\n\n\ndef test_tiff_write_183_createcopy_append_subdataset():\n\n tmpfilename = '/vsimem/test_tiff_write_183_createcopy_append_subdataset.tif'\n gdal.Translate(tmpfilename, 'data/byte.tif')\n gdal.Translate(tmpfilename, 'data/utmsmall.tif',\n creationOptions=['APPEND_SUBDATASET=YES'])\n\n ds = gdal.Open(tmpfilename)\n assert ds.GetRasterBand(1).Checksum() == 4672\n\n ds = gdal.Open('GTIFF_DIR:2:' + tmpfilename)\n assert ds.GetRasterBand(1).Checksum() == 50054\n\n ds = None\n gdal.Unlink(tmpfilename)\n\n###############################################################################\n\n\ndef test_tiff_write_184_create_append_subdataset():\n\n tmpfilename = '/vsimem/test_tiff_write_184_create_append_subdataset.tif'\n gdal.Translate(tmpfilename, 'data/byte.tif')\n ds = gdal.GetDriverByName('GTiff').Create(tmpfilename, 1, 1,\n options=['APPEND_SUBDATASET=YES'])\n ds.GetRasterBand(1).Fill(255)\n ds = None\n\n ds = gdal.Open(tmpfilename)\n assert ds.GetRasterBand(1).Checksum() == 4672\n\n ds = gdal.Open('GTIFF_DIR:2:' + tmpfilename)\n assert ds.GetRasterBand(1).Checksum() == 3\n\n ds = None\n gdal.Unlink(tmpfilename)\n\n###############################################################################\n# Ask to run again tests with GDAL_API_PROXY=YES\n\n\ndef tiff_write_api_proxy():\n\n if not run_tiff_write_api_proxy:\n pytest.skip()\n\n import test_py_scripts\n ret = test_py_scripts.run_py_script_as_external_script('.', 'tiff_write', ' -api_proxy', display_live_on_parent_stdout=True)\n\n assert ret.find('Failed: 0') != -1\n\n###############################################################################\n\n\ndef test_tiff_write_cleanup():\n gdaltest.tiff_drv = None\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sumiya-NJU/da-faster-rcnn-PyTorch
[ "62a7286d8e40c6625f32de8d49039c7f623909bd" ]
[ "lib/model/faster_rcnn/faster_rcnn.py" ]
[ "import random\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nimport torchvision.models as models\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\nfrom model.utils.config import cfg\r\nfrom model.rpn.rpn import _RPN\r\nfrom model.roi_pooling.modules.roi_pool import _RoIPooling\r\nfrom model.roi_crop.modules.roi_crop import _RoICrop\r\nfrom model.roi_align.modules.roi_align import RoIAlignAvg\r\nfrom model.rpn.proposal_target_layer_cascade import _ProposalTargetLayer\r\nimport time\r\nimport pdb\r\nfrom model.utils.net_utils import _smooth_l1_loss, _crop_pool_layer, _affine_grid_gen, _affine_theta\r\n\r\nclass _fasterRCNN(nn.Module):\r\n \"\"\" faster RCNN \"\"\"\r\n def __init__(self, classes, class_agnostic):\r\n super(_fasterRCNN, self).__init__()\r\n self.classes = classes\r\n self.n_classes = len(classes)\r\n self.class_agnostic = class_agnostic\r\n # loss\r\n self.RCNN_loss_cls = 0\r\n self.RCNN_loss_bbox = 0\r\n\r\n # define rpn\r\n self.RCNN_rpn = _RPN(self.dout_base_model)\r\n self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)\r\n self.RCNN_roi_pool = _RoIPooling(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\r\n self.RCNN_roi_align = RoIAlignAvg(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)\r\n\r\n self.grid_size = cfg.POOLING_SIZE * 2 if cfg.CROP_RESIZE_WITH_MAX_POOL else cfg.POOLING_SIZE\r\n self.RCNN_roi_crop = _RoICrop()\r\n\r\n def forward(self, im_data, im_info, gt_boxes, num_boxes):\r\n batch_size = im_data.size(0)\r\n\r\n im_info = im_info.data #(size1,size2, image ratio(new image / source image) )\r\n gt_boxes = gt_boxes.data\r\n num_boxes = num_boxes.data\r\n\r\n\r\n # feed image data to base model to obtain base feature map\r\n base_feat = self.RCNN_base(im_data)\r\n\r\n # feed base feature map tp RPN to obtain rois\r\n rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)\r\n\r\n # if it is training phrase, then use ground trubut bboxes for refining\r\n if self.training:\r\n roi_data = self.RCNN_proposal_target(rois, gt_boxes, num_boxes)\r\n rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = roi_data\r\n\r\n rois_label = Variable(rois_label.view(-1).long())\r\n rois_target = Variable(rois_target.view(-1, rois_target.size(2)))\r\n rois_inside_ws = Variable(rois_inside_ws.view(-1, rois_inside_ws.size(2)))\r\n rois_outside_ws = Variable(rois_outside_ws.view(-1, rois_outside_ws.size(2)))\r\n else:\r\n rois_label = None\r\n rois_target = None\r\n rois_inside_ws = None\r\n rois_outside_ws = None\r\n rpn_loss_cls = 0\r\n rpn_loss_bbox = 0\r\n\r\n rois = Variable(rois)\r\n # do roi pooling based on predicted rois\r\n\r\n if cfg.POOLING_MODE == 'crop':\r\n # pdb.set_trace()\r\n # pooled_feat_anchor = _crop_pool_layer(base_feat, rois.view(-1, 5))\r\n grid_xy = _affine_grid_gen(rois.view(-1, 5), base_feat.size()[2:], self.grid_size)\r\n grid_yx = torch.stack([grid_xy.data[:,:,:,1], grid_xy.data[:,:,:,0]], 3).contiguous()\r\n pooled_feat = self.RCNN_roi_crop(base_feat, Variable(grid_yx).detach())\r\n if cfg.CROP_RESIZE_WITH_MAX_POOL:\r\n pooled_feat = F.max_pool2d(pooled_feat, 2, 2)\r\n elif cfg.POOLING_MODE == 'align':\r\n pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))\r\n elif cfg.POOLING_MODE == 'pool':\r\n pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))\r\n\r\n # feed pooled features to top model\r\n pooled_feat = self._head_to_tail(pooled_feat)\r\n\r\n # compute bbox offset\r\n bbox_pred = self.RCNN_bbox_pred(pooled_feat)\r\n if self.training and not self.class_agnostic:\r\n # select the corresponding columns according to roi labels\r\n bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)\r\n bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))\r\n bbox_pred = bbox_pred_select.squeeze(1)\r\n\r\n # compute object classification probability\r\n cls_score = self.RCNN_cls_score(pooled_feat)\r\n cls_prob = F.softmax(cls_score, 1)\r\n\r\n RCNN_loss_cls = 0\r\n RCNN_loss_bbox = 0\r\n\r\n if self.training:\r\n # classification loss\r\n RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)\r\n\r\n # bounding box regression L1 loss\r\n RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)\r\n\r\n\r\n cls_prob = cls_prob.view(batch_size, rois.size(1), -1)\r\n bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)\r\n\r\n return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label\r\n\r\n def _init_weights(self):\r\n def normal_init(m, mean, stddev, truncated=False):\r\n \"\"\"\r\n weight initalizer: truncated normal and random normal.\r\n \"\"\"\r\n # x is a parameter\r\n if truncated:\r\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\r\n else:\r\n m.weight.data.normal_(mean, stddev)\r\n m.bias.data.zero_()\r\n\r\n normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)\r\n normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\r\n normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)\r\n normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)\r\n normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)\r\n\r\n def create_architecture(self):\r\n self._init_modules()\r\n self._init_weights()\r\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.functional.cross_entropy", "torch.stack", "torch.nn.functional.max_pool2d", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yjc9696/cci_PPP
[ "69cbf059c2f2c2d0de9ecba6865202f7e5e09998" ]
[ "out/production/cci/datasets/tissue.py" ]
[ "import pandas as pd\nimport dgl\nfrom time import time\nimport torch\nfrom sklearn.decomposition import PCA\nimport numpy as np\nfrom torchlight import set_seed\n\n\ndef load_tissue(params=None):\n random_seed = params.random_seed\n dense_dim = params.dense_dim \n set_seed(random_seed)\n # 400 0.7895\n # 200 0.5117\n # 100 0.3203\n # 50 0.2083\n \"\"\"\n root = '../data/mammary_gland'\n num = 2915\n data_path = f'{root}/mouse_Mammary_gland{num}_data.csv'\n type_path = f'{root}/mouse_Mammary_gland{num}_celltype.csv'\n \"\"\"\n data_path = '../data/cell_cell_interaction/mouse_small_intestine_1189_data.csv'\n type_path = '../data/cell_cell_interaction/mouse_brain_2915_celltype.csv'\n\n # load celltype file then update labels accordingly\n cell2type = pd.read_csv(type_path, index_col=0)\n cell2type.columns = ['cell', 'type']\n\n id2label = cell2type['type'].drop_duplicates(keep='first').tolist()\n label2id = {label: idx for idx, label in enumerate(id2label)}\n print(f'{len(id2label)} classes in total')\n \n cell2type['id'] = cell2type['type'].map(label2id)\n assert not cell2type['id'].isnull().any(), 'something wrong about celltype file.'\n\n # load data file\n data = pd.read_csv(data_path, index_col=0)\n data = data.transpose(copy=True)\n assert cell2type['cell'].tolist() == data.index.tolist()\n print(f'{data.shape[0]} cells, {data.shape[1]} genes.')\n # genes\n id2gene = data.columns.tolist()\n gene2id = {gene: idx for idx, gene in enumerate(id2gene)}\n\n # construct graph and add nodes and edges\n graph = dgl.DGLGraph()\n start = time()\n # 1. add all genes as nodes\n num_genes = len(id2gene)\n graph.add_nodes(num_genes)\n # maintain a kind of sparse idx for Graph\n row_idx, col_idx = data.to_numpy().nonzero()\n row_idx = row_idx + num_genes\n # 2. add cell nodes and edges\n num_cells = data.shape[0]\n graph.add_nodes(num_cells)\n graph.add_edges(row_idx, col_idx)\n graph.add_edges(col_idx, row_idx)\n print(f'Added {num_cells} nodes and {len(row_idx)} edges.')\n print(f'#Nodes: {graph.number_of_nodes()}, #Edges: {graph.number_of_edges()}.')\n print(data.head())\n\n # reduce sparse features to dense features\n cell_pca = PCA(n_components=dense_dim, random_state=random_seed)\n cell_pca.fit(data.values)\n cell_feat = cell_pca.transform(data.values)\n cell_feat = torch.FloatTensor(cell_feat)\n\n gene_pca = PCA(n_components=dense_dim, random_state=random_seed)\n gene_pca.fit(data.T.values)\n gene_feat = gene_pca.transform(data.T.values)\n gene_feat = torch.FloatTensor(gene_feat)\n\n feat = torch.cat([gene_feat, cell_feat], dim=0)\n # feat = torch.zeros(graph.number_of_nodes(), dense_dim).normal_()\n\n cell_evr = sum(cell_pca.explained_variance_ratio_) * 100\n gene_evr = sum(gene_pca.explained_variance_ratio_) * 100\n print(f'[PCA] Cell EVR: {cell_evr:.2f}%. Gene EVR: {gene_evr:.2f} %.')\n # generate labels for training and testing\n labels = torch.LongTensor(cell2type['id'].tolist())\n train_mask = torch.zeros(num_cells, dtype=torch.bool)\n train_randidx = torch.randperm(num_cells)[:int(num_cells * 0.8)]\n # generate mask\n train_mask[train_randidx] = True\n test_mask = ~train_mask\n return num_cells, num_genes, graph, feat, labels, train_mask, test_mask\n\n\nif __name__=='__main__':\n load_tissue()\n" ]
[ [ "pandas.read_csv", "torch.zeros", "torch.cat", "torch.randperm", "torch.FloatTensor", "sklearn.decomposition.PCA" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
gozian2811/slic_multilevel
[ "af0b1132e055bb95512f11a28ee55ee51b2f3295" ]
[ "toolbox/View_CT.py" ]
[ "import numpy as np\nfrom CTViewer import view_CT\n\nfilename = \"nodule_cubes/train/npy_random/LKDS-00249_cc_0_random.npy\"\nvolume = np.load(filename)\nview_CT(volume)\n" ]
[ [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
makemebitter/Panorama-UCSD
[ "bdb89d00472e449318dae322eab42b0376d6e1f3" ]
[ "panorama/data/data_splitter.py" ]
[ "# Copyright 2019 Yuhao Zhang\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nfrom lxml import etree\n\nfrom shutil import copy\nfrom PIL import Image\nimport panorama.data.pathmagic # noqa\nfrom sklearn.model_selection import train_test_split\nfrom panorama.data.utilities import CONSTANTS\nfrom panorama.data.utilities import make_dirs\nfrom panorama.data.utilities import base_from_path\nfrom panorama.data.utilities import root\nfrom panorama.data.utilities import obj_to_xml\nfrom collections import defaultdict\nfrom collections import OrderedDict\n\n\nclass DataSplitter():\n splitter_name = None\n final_dir_names = None\n destination = None\n\n def __init__(self):\n raise NotImplementedError\n\n def detect_all(self):\n raise NotImplementedError\n\n def run(self):\n make_dirs(self.final_dir_names)\n image = Image.open(self.all_file_list[0])\n self.width, self.height = image.size\n self.detect_all()\n self.total_detected_list = sorted(self.filename_bbx_label_raw.keys())\n self.generate_annotations()\n self.split()\n self.move_all()\n self.get_class_count()\n self.write_summary()\n\n def generate_annotations(self):\n self.filename_annotation = {}\n self.filename_label = defaultdict(set)\n for img_path in self.filename_bbx_label_raw.keys():\n basename = os.path.basename(img_path)\n annotation = root(\n CONSTANTS.PASCAL,\n basename, self.height, self.width, self.splitter_name)\n for bb, predicted_class \\\n in self.filename_bbx_label_raw[img_path]:\n annotation.append(obj_to_xml(bb, predicted_class))\n self.filename_label[img_path].add(predicted_class)\n self.filename_annotation[img_path] = annotation\n\n def split(self):\n self.in_voc, self.out_voc = self.in_out_voc_split()\n self.trian_file_list, self.test_file_list = train_test_split(\n self.total_detected_list,\n test_size=0.20, shuffle=False,\n random_state=CONSTANTS.RANDOM_STATE)\n self.trian_file_list, self.val_file_list = train_test_split(\n self.trian_file_list,\n test_size=0.25, shuffle=False,\n random_state=CONSTANTS.RANDOM_STATE)\n self.all_lists = self.in_out_data_split()\n\n def in_out_voc_split(self):\n self.total_vocabulary = set.union(*self.filename_label.values())\n self.total_vocabulary = sorted(list(self.total_vocabulary))\n in_voc, out_voc = train_test_split(\n self.total_vocabulary,\n test_size=0.20, shuffle=False, random_state=CONSTANTS.RANDOM_STATE)\n return in_voc, out_voc\n\n def move_all(self):\n for i, file_list in enumerate(self.all_lists):\n out_ann_dir = self.final_dir_names[2 * i]\n out_jpg_dir = self.final_dir_names[2 * i + 1]\n for filename in file_list:\n annotation = self.filename_annotation[filename]\n base = base_from_path(filename)\n annot_filename = os.extsep.join([base, 'xml'])\n ann_output_path = os.path.join(out_ann_dir, annot_filename)\n copy(filename, out_jpg_dir)\n etree.ElementTree(annotation).write(ann_output_path)\n\n def in_out_data_split_helper(self, filename_list):\n in_voc_filename_list = []\n out_voc_filename_list = []\n for filename in filename_list:\n labels = self.filename_label[filename]\n if labels.intersection(self.out_voc):\n out_voc_filename_list.append(filename)\n else:\n in_voc_filename_list.append(filename)\n return in_voc_filename_list, out_voc_filename_list\n\n def in_out_data_split(self):\n all_lists = []\n for filename_list in [\n self.trian_file_list, self.val_file_list, self.test_file_list]:\n in_voc_filename_list, \\\n out_voc_filename_list = self.in_out_data_split_helper(\n filename_list)\n all_lists.append(in_voc_filename_list)\n all_lists.append(out_voc_filename_list)\n return all_lists\n\n def get_class_count(self):\n self.class_count = {}\n for k, vs in self.filename_bbx_label_raw.items():\n for _, class_name in vs:\n if class_name in self.class_count:\n self.class_count[class_name] += 1\n else:\n self.class_count[class_name] = 1\n sorted_dict_tuples = [(x, self.class_count[x])\n for x in sorted(self.class_count.keys())]\n self.class_count = OrderedDict(sorted_dict_tuples)\n\n def write_summary(self):\n summary_file = os.path.join(self.destination, 'summary.txt')\n total_count = len(self.total_detected_list)\n in_voc_train_count = len(self.all_lists[0])\n out_voc_train_count = len(self.all_lists[1])\n in_voc_val_count = len(self.all_lists[2])\n out_voc_val_count = len(self.all_lists[3])\n in_voc_test_count = len(self.all_lists[4])\n out_voc_test_count = len(self.all_lists[5])\n with open(summary_file, 'w+') as f:\n f.write(\"\"\"\n Total voc:{self.total_vocabulary}\\n\n In voc:{self.in_voc}\\n\n Out voc:{self.out_voc}\\n\n Total count:{total_count}\\n\n In voc train count:{in_voc_train_count}\\n\n Out voc train count:{out_voc_train_count}\\n\n In voc val count:{in_voc_val_count}\\n\n Out voc val count:{out_voc_val_count}\\n\n In voc test count:{in_voc_test_count}\\n\n Out voc test count:{out_voc_test_count}\\n\n Each class count:{self.class_count}\\n\n \"\"\".format(**locals()))\n" ]
[ [ "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Muktan/pandas
[ "ffa6e20d7dadd262d9035a647dffed9903fc5929" ]
[ "pandas/core/arrays/timedeltas.py" ]
[ "from __future__ import annotations\n\nfrom datetime import timedelta\nfrom typing import (\n List,\n Optional,\n Union,\n)\n\nimport numpy as np\n\nfrom pandas._libs import (\n lib,\n tslibs,\n)\nfrom pandas._libs.tslibs import (\n BaseOffset,\n NaT,\n NaTType,\n Period,\n Tick,\n Timedelta,\n Timestamp,\n iNaT,\n to_offset,\n)\nfrom pandas._libs.tslibs.conversion import (\n ensure_timedelta64ns,\n precision_from_unit,\n)\nfrom pandas._libs.tslibs.fields import get_timedelta_field\nfrom pandas._libs.tslibs.timedeltas import (\n array_to_timedelta64,\n ints_to_pytimedelta,\n parse_timedelta_unit,\n)\nfrom pandas._typing import NpDtype\nfrom pandas.compat.numpy import function as nv\n\nfrom pandas.core.dtypes.cast import astype_td64_unit_conversion\nfrom pandas.core.dtypes.common import (\n DT64NS_DTYPE,\n TD64NS_DTYPE,\n is_categorical_dtype,\n is_dtype_equal,\n is_float_dtype,\n is_integer_dtype,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n is_timedelta64_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.generic import (\n ABCSeries,\n ABCTimedeltaIndex,\n)\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core import nanops\nfrom pandas.core.algorithms import checked_add_with_arr\nfrom pandas.core.arrays import (\n IntegerArray,\n datetimelike as dtl,\n)\nfrom pandas.core.arrays._ranges import generate_regular_range\nimport pandas.core.common as com\nfrom pandas.core.construction import extract_array\nfrom pandas.core.ops.common import unpack_zerodim_and_defer\n\n\ndef _field_accessor(name: str, alias: str, docstring: str):\n def f(self) -> np.ndarray:\n values = self.asi8\n result = get_timedelta_field(values, alias)\n if self._hasnans:\n result = self._maybe_mask_results(\n result, fill_value=None, convert=\"float64\"\n )\n\n return result\n\n f.__name__ = name\n f.__doc__ = f\"\\n{docstring}\\n\"\n return property(f)\n\n\nclass TimedeltaArray(dtl.TimelikeOps):\n \"\"\"\n Pandas ExtensionArray for timedelta data.\n\n .. versionadded:: 0.24.0\n\n .. warning::\n\n TimedeltaArray is currently experimental, and its API may change\n without warning. In particular, :attr:`TimedeltaArray.dtype` is\n expected to change to be an instance of an ``ExtensionDtype``\n subclass.\n\n Parameters\n ----------\n values : array-like\n The timedelta data.\n\n dtype : numpy.dtype\n Currently, only ``numpy.dtype(\"timedelta64[ns]\")`` is accepted.\n freq : Offset, optional\n copy : bool, default False\n Whether to copy the underlying array of data.\n\n Attributes\n ----------\n None\n\n Methods\n -------\n None\n \"\"\"\n\n _typ = \"timedeltaarray\"\n _scalar_type = Timedelta\n _recognized_scalars = (timedelta, np.timedelta64, Tick)\n _is_recognized_dtype = is_timedelta64_dtype\n _infer_matches = (\"timedelta\", \"timedelta64\")\n\n __array_priority__ = 1000\n # define my properties & methods for delegation\n _other_ops: List[str] = []\n _bool_ops: List[str] = []\n _object_ops = [\"freq\"]\n _field_ops = [\"days\", \"seconds\", \"microseconds\", \"nanoseconds\"]\n _datetimelike_ops = _field_ops + _object_ops + _bool_ops\n _datetimelike_methods = [\n \"to_pytimedelta\",\n \"total_seconds\",\n \"round\",\n \"floor\",\n \"ceil\",\n ]\n\n # Note: ndim must be defined to ensure NaT.__richcmp__(TimedeltaArray)\n # operates pointwise.\n\n def _box_func(self, x) -> Union[Timedelta, NaTType]:\n return Timedelta(x, unit=\"ns\")\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"\n The dtype for the TimedeltaArray.\n\n .. warning::\n\n A future version of pandas will change dtype to be an instance\n of a :class:`pandas.api.extensions.ExtensionDtype` subclass,\n not a ``numpy.dtype``.\n\n Returns\n -------\n numpy.dtype\n \"\"\"\n return TD64NS_DTYPE\n\n # ----------------------------------------------------------------\n # Constructors\n\n _freq = None\n\n def __init__(self, values, dtype=TD64NS_DTYPE, freq=lib.no_default, copy=False):\n values = extract_array(values)\n\n inferred_freq = getattr(values, \"_freq\", None)\n explicit_none = freq is None\n freq = freq if freq is not lib.no_default else None\n\n if isinstance(values, type(self)):\n if explicit_none:\n # dont inherit from values\n pass\n elif freq is None:\n freq = values.freq\n elif freq and values.freq:\n freq = to_offset(freq)\n freq, _ = dtl.validate_inferred_freq(freq, values.freq, False)\n values = values._ndarray\n\n if not isinstance(values, np.ndarray):\n msg = (\n f\"Unexpected type '{type(values).__name__}'. 'values' must be a \"\n \"TimedeltaArray ndarray, or Series or Index containing one of those.\"\n )\n raise ValueError(msg)\n if values.ndim not in [1, 2]:\n raise ValueError(\"Only 1-dimensional input arrays are supported.\")\n\n if values.dtype == \"i8\":\n # for compat with datetime/timedelta/period shared methods,\n # we can sometimes get here with int64 values. These represent\n # nanosecond UTC (or tz-naive) unix timestamps\n values = values.view(TD64NS_DTYPE)\n\n _validate_td64_dtype(values.dtype)\n dtype = _validate_td64_dtype(dtype)\n\n if freq == \"infer\":\n msg = (\n \"Frequency inference not allowed in TimedeltaArray.__init__. \"\n \"Use 'pd.array()' instead.\"\n )\n raise ValueError(msg)\n\n if copy:\n values = values.copy()\n if freq:\n freq = to_offset(freq)\n\n self._ndarray = values\n self._dtype = dtype\n self._freq = freq\n\n if inferred_freq is None and freq is not None:\n type(self)._validate_frequency(self, freq)\n\n @classmethod\n def _simple_new(\n cls, values, freq: Optional[BaseOffset] = None, dtype=TD64NS_DTYPE\n ) -> TimedeltaArray:\n assert dtype == TD64NS_DTYPE, dtype\n assert isinstance(values, np.ndarray), type(values)\n if values.dtype != TD64NS_DTYPE:\n assert values.dtype == \"i8\"\n values = values.view(TD64NS_DTYPE)\n\n result = object.__new__(cls)\n result._ndarray = values\n result._freq = to_offset(freq)\n result._dtype = TD64NS_DTYPE\n return result\n\n @classmethod\n def _from_sequence(\n cls, data, *, dtype=TD64NS_DTYPE, copy: bool = False\n ) -> TimedeltaArray:\n if dtype:\n _validate_td64_dtype(dtype)\n\n data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=None)\n freq, _ = dtl.validate_inferred_freq(None, inferred_freq, False)\n\n return cls._simple_new(data, freq=freq)\n\n @classmethod\n def _from_sequence_not_strict(\n cls,\n data,\n dtype=TD64NS_DTYPE,\n copy: bool = False,\n freq=lib.no_default,\n unit=None,\n ) -> TimedeltaArray:\n if dtype:\n _validate_td64_dtype(dtype)\n\n explicit_none = freq is None\n freq = freq if freq is not lib.no_default else None\n\n freq, freq_infer = dtl.maybe_infer_freq(freq)\n\n data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit)\n freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)\n if explicit_none:\n freq = None\n\n result = cls._simple_new(data, freq=freq)\n\n if inferred_freq is None and freq is not None:\n # this condition precludes `freq_infer`\n cls._validate_frequency(result, freq)\n\n elif freq_infer:\n # Set _freq directly to bypass duplicative _validate_frequency\n # check.\n result._freq = to_offset(result.inferred_freq)\n\n return result\n\n @classmethod\n def _generate_range(cls, start, end, periods, freq, closed=None):\n\n periods = dtl.validate_periods(periods)\n if freq is None and any(x is None for x in [periods, start, end]):\n raise ValueError(\"Must provide freq argument if no data is supplied\")\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the four parameters: start, end, periods, \"\n \"and freq, exactly three must be specified\"\n )\n\n if start is not None:\n start = Timedelta(start)\n\n if end is not None:\n end = Timedelta(end)\n\n left_closed, right_closed = dtl.validate_endpoints(closed)\n\n if freq is not None:\n index = generate_regular_range(start, end, periods, freq)\n else:\n index = np.linspace(start.value, end.value, periods).astype(\"i8\")\n\n if not left_closed:\n index = index[1:]\n if not right_closed:\n index = index[:-1]\n\n return cls._simple_new(index, freq=freq)\n\n # ----------------------------------------------------------------\n # DatetimeLike Interface\n\n def _unbox_scalar(self, value, setitem: bool = False) -> np.timedelta64:\n if not isinstance(value, self._scalar_type) and value is not NaT:\n raise ValueError(\"'value' should be a Timedelta.\")\n self._check_compatible_with(value, setitem=setitem)\n return np.timedelta64(value.value, \"ns\")\n\n def _scalar_from_string(self, value):\n return Timedelta(value)\n\n def _check_compatible_with(self, other, setitem: bool = False):\n # we don't have anything to validate.\n pass\n\n # ----------------------------------------------------------------\n # Array-Like / EA-Interface Methods\n\n def astype(self, dtype, copy: bool = True):\n # We handle\n # --> timedelta64[ns]\n # --> timedelta64\n # DatetimeLikeArrayMixin super call handles other cases\n dtype = pandas_dtype(dtype)\n\n if dtype.kind == \"m\":\n return astype_td64_unit_conversion(self._ndarray, dtype, copy=copy)\n\n return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy)\n\n def __iter__(self):\n if self.ndim > 1:\n for i in range(len(self)):\n yield self[i]\n else:\n # convert in chunks of 10k for efficiency\n data = self.asi8\n length = len(self)\n chunksize = 10000\n chunks = (length // chunksize) + 1\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, length)\n converted = ints_to_pytimedelta(data[start_i:end_i], box=True)\n yield from converted\n\n # ----------------------------------------------------------------\n # Reductions\n\n def sum(\n self,\n *,\n axis=None,\n dtype: Optional[NpDtype] = None,\n out=None,\n keepdims: bool = False,\n initial=None,\n skipna: bool = True,\n min_count: int = 0,\n ):\n nv.validate_sum(\n (), {\"dtype\": dtype, \"out\": out, \"keepdims\": keepdims, \"initial\": initial}\n )\n\n result = nanops.nansum(\n self._ndarray, axis=axis, skipna=skipna, min_count=min_count\n )\n return self._wrap_reduction_result(axis, result)\n\n def std(\n self,\n *,\n axis=None,\n dtype: Optional[NpDtype] = None,\n out=None,\n ddof: int = 1,\n keepdims: bool = False,\n skipna: bool = True,\n ):\n nv.validate_stat_ddof_func(\n (), {\"dtype\": dtype, \"out\": out, \"keepdims\": keepdims}, fname=\"std\"\n )\n\n result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)\n if axis is None or self.ndim == 1:\n return self._box_func(result)\n return self._from_backing_data(result)\n\n # ----------------------------------------------------------------\n # Rendering Methods\n\n def _formatter(self, boxed=False):\n from pandas.io.formats.format import get_format_timedelta64\n\n return get_format_timedelta64(self, box=True)\n\n @dtl.ravel_compat\n def _format_native_types(self, na_rep=\"NaT\", date_format=None, **kwargs):\n from pandas.io.formats.format import get_format_timedelta64\n\n formatter = get_format_timedelta64(self._ndarray, na_rep)\n return np.array([formatter(x) for x in self._ndarray])\n\n # ----------------------------------------------------------------\n # Arithmetic Methods\n\n def _add_offset(self, other):\n assert not isinstance(other, Tick)\n raise TypeError(\n f\"cannot add the type {type(other).__name__} to a {type(self).__name__}\"\n )\n\n def _add_period(self, other: Period):\n \"\"\"\n Add a Period object.\n \"\"\"\n # We will wrap in a PeriodArray and defer to the reversed operation\n from pandas.core.arrays.period import PeriodArray\n\n i8vals = np.broadcast_to(other.ordinal, self.shape)\n oth = PeriodArray(i8vals, freq=other.freq)\n return oth + self\n\n def _add_datetime_arraylike(self, other):\n \"\"\"\n Add DatetimeArray/Index or ndarray[datetime64] to TimedeltaArray.\n \"\"\"\n if isinstance(other, np.ndarray):\n # At this point we have already checked that dtype is datetime64\n from pandas.core.arrays import DatetimeArray\n\n other = DatetimeArray(other)\n\n # defer to implementation in DatetimeArray\n return other + self\n\n def _add_datetimelike_scalar(self, other):\n # adding a timedeltaindex to a datetimelike\n from pandas.core.arrays import DatetimeArray\n\n assert other is not NaT\n other = Timestamp(other)\n if other is NaT:\n # In this case we specifically interpret NaT as a datetime, not\n # the timedelta interpretation we would get by returning self + NaT\n result = self.asi8.view(\"m8[ms]\") + NaT.to_datetime64()\n return DatetimeArray(result)\n\n i8 = self.asi8\n result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan)\n result = self._maybe_mask_results(result)\n dtype = DatetimeTZDtype(tz=other.tz) if other.tz else DT64NS_DTYPE\n return DatetimeArray(result, dtype=dtype, freq=self.freq)\n\n def _addsub_object_array(self, other, op):\n # Add or subtract Array-like of objects\n try:\n # TimedeltaIndex can only operate with a subset of DateOffset\n # subclasses. Incompatible classes will raise AttributeError,\n # which we re-raise as TypeError\n return super()._addsub_object_array(other, op)\n except AttributeError as err:\n raise TypeError(\n f\"Cannot add/subtract non-tick DateOffset to {type(self).__name__}\"\n ) from err\n\n @unpack_zerodim_and_defer(\"__mul__\")\n def __mul__(self, other) -> TimedeltaArray:\n if is_scalar(other):\n # numpy will accept float and int, raise TypeError for others\n result = self._ndarray * other\n freq = None\n if self.freq is not None and not isna(other):\n freq = self.freq * other\n return type(self)(result, freq=freq)\n\n if not hasattr(other, \"dtype\"):\n # list, tuple\n other = np.array(other)\n if len(other) != len(self) and not is_timedelta64_dtype(other.dtype):\n # Exclude timedelta64 here so we correctly raise TypeError\n # for that instead of ValueError\n raise ValueError(\"Cannot multiply with unequal lengths\")\n\n if is_object_dtype(other.dtype):\n # this multiplication will succeed only if all elements of other\n # are int or float scalars, so we will end up with\n # timedelta64[ns]-dtyped result\n result = [self[n] * other[n] for n in range(len(self))]\n result = np.array(result)\n return type(self)(result)\n\n # numpy will accept float or int dtype, raise TypeError for others\n result = self._ndarray * other\n return type(self)(result)\n\n __rmul__ = __mul__\n\n @unpack_zerodim_and_defer(\"__truediv__\")\n def __truediv__(self, other):\n # timedelta / X is well-defined for timedelta-like or numeric X\n\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n if other is NaT:\n # specifically timedelta64-NaT\n result = np.empty(self.shape, dtype=np.float64)\n result.fill(np.nan)\n return result\n\n # otherwise, dispatch to Timedelta implementation\n return self._ndarray / other\n\n elif lib.is_scalar(other):\n # assume it is numeric\n result = self._ndarray / other\n freq = None\n if self.freq is not None:\n # Tick division is not implemented, so operate on Timedelta\n freq = self.freq.delta / other\n return type(self)(result, freq=freq)\n\n if not hasattr(other, \"dtype\"):\n # e.g. list, tuple\n other = np.array(other)\n\n if len(other) != len(self):\n raise ValueError(\"Cannot divide vectors with unequal lengths\")\n\n elif is_timedelta64_dtype(other.dtype):\n # let numpy handle it\n return self._ndarray / other\n\n elif is_object_dtype(other.dtype):\n # We operate on raveled arrays to avoid problems in inference\n # on NaT\n srav = self.ravel()\n orav = other.ravel()\n result = [srav[n] / orav[n] for n in range(len(srav))]\n result = np.array(result).reshape(self.shape)\n\n # We need to do dtype inference in order to keep DataFrame ops\n # behavior consistent with Series behavior\n inferred = lib.infer_dtype(result)\n if inferred == \"timedelta\":\n flat = result.ravel()\n result = type(self)._from_sequence(flat).reshape(result.shape)\n elif inferred == \"floating\":\n result = result.astype(float)\n\n return result\n\n else:\n result = self._ndarray / other\n return type(self)(result)\n\n @unpack_zerodim_and_defer(\"__rtruediv__\")\n def __rtruediv__(self, other):\n # X / timedelta is defined only for timedelta-like X\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n if other is NaT:\n # specifically timedelta64-NaT\n result = np.empty(self.shape, dtype=np.float64)\n result.fill(np.nan)\n return result\n\n # otherwise, dispatch to Timedelta implementation\n return other / self._ndarray\n\n elif lib.is_scalar(other):\n raise TypeError(\n f\"Cannot divide {type(other).__name__} by {type(self).__name__}\"\n )\n\n if not hasattr(other, \"dtype\"):\n # e.g. list, tuple\n other = np.array(other)\n\n if len(other) != len(self):\n raise ValueError(\"Cannot divide vectors with unequal lengths\")\n\n elif is_timedelta64_dtype(other.dtype):\n # let numpy handle it\n return other / self._ndarray\n\n elif is_object_dtype(other.dtype):\n # Note: unlike in __truediv__, we do not _need_ to do type\n # inference on the result. It does not raise, a numeric array\n # is returned. GH#23829\n result = [other[n] / self[n] for n in range(len(self))]\n return np.array(result)\n\n else:\n raise TypeError(\n f\"Cannot divide {other.dtype} data by {type(self).__name__}\"\n )\n\n @unpack_zerodim_and_defer(\"__floordiv__\")\n def __floordiv__(self, other):\n\n if is_scalar(other):\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n if other is NaT:\n # treat this specifically as timedelta-NaT\n result = np.empty(self.shape, dtype=np.float64)\n result.fill(np.nan)\n return result\n\n # dispatch to Timedelta implementation\n result = other.__rfloordiv__(self._ndarray)\n return result\n\n # at this point we should only have numeric scalars; anything\n # else will raise\n result = self.asi8 // other\n np.putmask(result, self._isnan, iNaT)\n freq = None\n if self.freq is not None:\n # Note: freq gets division, not floor-division\n freq = self.freq / other\n if freq.nanos == 0 and self.freq.nanos != 0:\n # e.g. if self.freq is Nano(1) then dividing by 2\n # rounds down to zero\n freq = None\n return type(self)(result.view(\"m8[ns]\"), freq=freq)\n\n if not hasattr(other, \"dtype\"):\n # list, tuple\n other = np.array(other)\n if len(other) != len(self):\n raise ValueError(\"Cannot divide with unequal lengths\")\n\n elif is_timedelta64_dtype(other.dtype):\n other = type(self)(other)\n\n # numpy timedelta64 does not natively support floordiv, so operate\n # on the i8 values\n result = self.asi8 // other.asi8\n mask = self._isnan | other._isnan\n if mask.any():\n result = result.astype(np.float64)\n np.putmask(result, mask, np.nan)\n return result\n\n elif is_object_dtype(other.dtype):\n result = [self[n] // other[n] for n in range(len(self))]\n result = np.array(result)\n if lib.infer_dtype(result, skipna=False) == \"timedelta\":\n result, _ = sequence_to_td64ns(result)\n return type(self)(result)\n return result\n\n elif is_integer_dtype(other.dtype) or is_float_dtype(other.dtype):\n result = self._ndarray // other\n return type(self)(result)\n\n else:\n dtype = getattr(other, \"dtype\", type(other).__name__)\n raise TypeError(f\"Cannot divide {dtype} by {type(self).__name__}\")\n\n @unpack_zerodim_and_defer(\"__rfloordiv__\")\n def __rfloordiv__(self, other):\n\n if is_scalar(other):\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n if other is NaT:\n # treat this specifically as timedelta-NaT\n result = np.empty(self.shape, dtype=np.float64)\n result.fill(np.nan)\n return result\n\n # dispatch to Timedelta implementation\n result = other.__floordiv__(self._ndarray)\n return result\n\n raise TypeError(\n f\"Cannot divide {type(other).__name__} by {type(self).__name__}\"\n )\n\n if not hasattr(other, \"dtype\"):\n # list, tuple\n other = np.array(other)\n\n if len(other) != len(self):\n raise ValueError(\"Cannot divide with unequal lengths\")\n\n elif is_timedelta64_dtype(other.dtype):\n other = type(self)(other)\n # numpy timedelta64 does not natively support floordiv, so operate\n # on the i8 values\n result = other.asi8 // self.asi8\n mask = self._isnan | other._isnan\n if mask.any():\n result = result.astype(np.float64)\n np.putmask(result, mask, np.nan)\n return result\n\n elif is_object_dtype(other.dtype):\n result = [other[n] // self[n] for n in range(len(self))]\n result = np.array(result)\n return result\n\n else:\n dtype = getattr(other, \"dtype\", type(other).__name__)\n raise TypeError(f\"Cannot divide {dtype} by {type(self).__name__}\")\n\n @unpack_zerodim_and_defer(\"__mod__\")\n def __mod__(self, other):\n # Note: This is a naive implementation, can likely be optimized\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n return self - (self // other) * other\n\n @unpack_zerodim_and_defer(\"__rmod__\")\n def __rmod__(self, other):\n # Note: This is a naive implementation, can likely be optimized\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n return other - (other // self) * self\n\n @unpack_zerodim_and_defer(\"__divmod__\")\n def __divmod__(self, other):\n # Note: This is a naive implementation, can likely be optimized\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n\n res1 = self // other\n res2 = self - res1 * other\n return res1, res2\n\n @unpack_zerodim_and_defer(\"__rdivmod__\")\n def __rdivmod__(self, other):\n # Note: This is a naive implementation, can likely be optimized\n if isinstance(other, self._recognized_scalars):\n other = Timedelta(other)\n\n res1 = other // self\n res2 = other - res1 * self\n return res1, res2\n\n def __neg__(self) -> TimedeltaArray:\n if self.freq is not None:\n return type(self)(-self._ndarray, freq=-self.freq)\n return type(self)(-self._ndarray)\n\n def __pos__(self) -> TimedeltaArray:\n return type(self)(self._ndarray, freq=self.freq)\n\n def __abs__(self) -> TimedeltaArray:\n # Note: freq is not preserved\n return type(self)(np.abs(self._ndarray))\n\n # ----------------------------------------------------------------\n # Conversion Methods - Vectorized analogues of Timedelta methods\n\n def total_seconds(self) -> np.ndarray:\n \"\"\"\n Return total duration of each element expressed in seconds.\n\n This method is available directly on TimedeltaArray, TimedeltaIndex\n and on Series containing timedelta values under the ``.dt`` namespace.\n\n Returns\n -------\n seconds : [ndarray, Float64Index, Series]\n When the calling object is a TimedeltaArray, the return type\n is ndarray. When the calling object is a TimedeltaIndex,\n the return type is a Float64Index. When the calling object\n is a Series, the return type is Series of type `float64` whose\n index is the same as the original.\n\n See Also\n --------\n datetime.timedelta.total_seconds : Standard library version\n of this method.\n TimedeltaIndex.components : Return a DataFrame with components of\n each Timedelta.\n\n Examples\n --------\n **Series**\n\n >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d'))\n >>> s\n 0 0 days\n 1 1 days\n 2 2 days\n 3 3 days\n 4 4 days\n dtype: timedelta64[ns]\n\n >>> s.dt.total_seconds()\n 0 0.0\n 1 86400.0\n 2 172800.0\n 3 259200.0\n 4 345600.0\n dtype: float64\n\n **TimedeltaIndex**\n\n >>> idx = pd.to_timedelta(np.arange(5), unit='d')\n >>> idx\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq=None)\n\n >>> idx.total_seconds()\n Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0],\n dtype='float64')\n \"\"\"\n return self._maybe_mask_results(1e-9 * self.asi8, fill_value=None)\n\n def to_pytimedelta(self) -> np.ndarray:\n \"\"\"\n Return Timedelta Array/Index as object ndarray of datetime.timedelta\n objects.\n\n Returns\n -------\n datetimes : ndarray\n \"\"\"\n return tslibs.ints_to_pytimedelta(self.asi8)\n\n days = _field_accessor(\"days\", \"days\", \"Number of days for each element.\")\n seconds = _field_accessor(\n \"seconds\",\n \"seconds\",\n \"Number of seconds (>= 0 and less than 1 day) for each element.\",\n )\n microseconds = _field_accessor(\n \"microseconds\",\n \"microseconds\",\n \"Number of microseconds (>= 0 and less than 1 second) for each element.\",\n )\n nanoseconds = _field_accessor(\n \"nanoseconds\",\n \"nanoseconds\",\n \"Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.\",\n )\n\n @property\n def components(self):\n \"\"\"\n Return a dataframe of the components (days, hours, minutes,\n seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.\n\n Returns\n -------\n a DataFrame\n \"\"\"\n from pandas import DataFrame\n\n columns = [\n \"days\",\n \"hours\",\n \"minutes\",\n \"seconds\",\n \"milliseconds\",\n \"microseconds\",\n \"nanoseconds\",\n ]\n hasnans = self._hasnans\n if hasnans:\n\n def f(x):\n if isna(x):\n return [np.nan] * len(columns)\n return x.components\n\n else:\n\n def f(x):\n return x.components\n\n result = DataFrame([f(x) for x in self], columns=columns)\n if not hasnans:\n result = result.astype(\"int64\")\n return result\n\n\n# ---------------------------------------------------------------------\n# Constructor Helpers\n\n\ndef sequence_to_td64ns(data, copy=False, unit=None, errors=\"raise\"):\n \"\"\"\n Parameters\n ----------\n data : list-like\n copy : bool, default False\n unit : str, optional\n The timedelta unit to treat integers as multiples of. For numeric\n data this defaults to ``'ns'``.\n Must be un-specified if the data contains a str and ``errors==\"raise\"``.\n errors : {\"raise\", \"coerce\", \"ignore\"}, default \"raise\"\n How to handle elements that cannot be converted to timedelta64[ns].\n See ``pandas.to_timedelta`` for details.\n\n Returns\n -------\n converted : numpy.ndarray\n The sequence converted to a numpy array with dtype ``timedelta64[ns]``.\n inferred_freq : Tick or None\n The inferred frequency of the sequence.\n\n Raises\n ------\n ValueError : Data cannot be converted to timedelta64[ns].\n\n Notes\n -----\n Unlike `pandas.to_timedelta`, if setting ``errors=ignore`` will not cause\n errors to be ignored; they are caught and subsequently ignored at a\n higher level.\n \"\"\"\n inferred_freq = None\n if unit is not None:\n unit = parse_timedelta_unit(unit)\n\n # Unwrap whatever we have into a np.ndarray\n if not hasattr(data, \"dtype\"):\n # e.g. list, tuple\n if np.ndim(data) == 0:\n # i.e. generator\n data = list(data)\n data = np.array(data, copy=False)\n elif isinstance(data, ABCSeries):\n data = data._values\n elif isinstance(data, ABCTimedeltaIndex):\n inferred_freq = data.freq\n data = data._data._ndarray\n elif isinstance(data, TimedeltaArray):\n inferred_freq = data.freq\n data = data._ndarray\n elif isinstance(data, IntegerArray):\n data = data.to_numpy(\"int64\", na_value=tslibs.iNaT)\n elif is_categorical_dtype(data.dtype):\n data = data.categories.take(data.codes, fill_value=NaT)._values\n copy = False\n\n # Convert whatever we have into timedelta64[ns] dtype\n if is_object_dtype(data.dtype) or is_string_dtype(data.dtype):\n # no need to make a copy, need to convert if string-dtyped\n data = objects_to_td64ns(data, unit=unit, errors=errors)\n copy = False\n\n elif is_integer_dtype(data.dtype):\n # treat as multiples of the given unit\n data, copy_made = ints_to_td64ns(data, unit=unit)\n copy = copy and not copy_made\n\n elif is_float_dtype(data.dtype):\n # cast the unit, multiply base/frac separately\n # to avoid precision issues from float -> int\n mask = np.isnan(data)\n m, p = precision_from_unit(unit or \"ns\")\n base = data.astype(np.int64)\n frac = data - base\n if p:\n frac = np.round(frac, p)\n data = (base * m + (frac * m).astype(np.int64)).view(\"timedelta64[ns]\")\n data[mask] = iNaT\n copy = False\n\n elif is_timedelta64_dtype(data.dtype):\n if data.dtype != TD64NS_DTYPE:\n # non-nano unit\n data = ensure_timedelta64ns(data)\n copy = False\n\n else:\n # This includes datetime64-dtype, see GH#23539, GH#29794\n raise TypeError(f\"dtype {data.dtype} cannot be converted to timedelta64[ns]\")\n\n data = np.array(data, copy=copy)\n\n assert data.dtype == \"m8[ns]\", data\n return data, inferred_freq\n\n\ndef ints_to_td64ns(data, unit=\"ns\"):\n \"\"\"\n Convert an ndarray with integer-dtype to timedelta64[ns] dtype, treating\n the integers as multiples of the given timedelta unit.\n\n Parameters\n ----------\n data : numpy.ndarray with integer-dtype\n unit : str, default \"ns\"\n The timedelta unit to treat integers as multiples of.\n\n Returns\n -------\n numpy.ndarray : timedelta64[ns] array converted from data\n bool : whether a copy was made\n \"\"\"\n copy_made = False\n unit = unit if unit is not None else \"ns\"\n\n if data.dtype != np.int64:\n # converting to int64 makes a copy, so we can avoid\n # re-copying later\n data = data.astype(np.int64)\n copy_made = True\n\n if unit != \"ns\":\n dtype_str = f\"timedelta64[{unit}]\"\n data = data.view(dtype_str)\n\n data = ensure_timedelta64ns(data)\n\n # the astype conversion makes a copy, so we can avoid re-copying later\n copy_made = True\n\n else:\n data = data.view(\"timedelta64[ns]\")\n\n return data, copy_made\n\n\ndef objects_to_td64ns(data, unit=None, errors=\"raise\"):\n \"\"\"\n Convert a object-dtyped or string-dtyped array into an\n timedelta64[ns]-dtyped array.\n\n Parameters\n ----------\n data : ndarray or Index\n unit : str, default \"ns\"\n The timedelta unit to treat integers as multiples of.\n Must not be specified if the data contains a str.\n errors : {\"raise\", \"coerce\", \"ignore\"}, default \"raise\"\n How to handle elements that cannot be converted to timedelta64[ns].\n See ``pandas.to_timedelta`` for details.\n\n Returns\n -------\n numpy.ndarray : timedelta64[ns] array converted from data\n\n Raises\n ------\n ValueError : Data cannot be converted to timedelta64[ns].\n\n Notes\n -----\n Unlike `pandas.to_timedelta`, if setting `errors=ignore` will not cause\n errors to be ignored; they are caught and subsequently ignored at a\n higher level.\n \"\"\"\n # coerce Index to np.ndarray, converting string-dtype if necessary\n values = np.array(data, dtype=np.object_, copy=False)\n\n result = array_to_timedelta64(values, unit=unit, errors=errors)\n return result.view(\"timedelta64[ns]\")\n\n\ndef _validate_td64_dtype(dtype):\n dtype = pandas_dtype(dtype)\n if is_dtype_equal(dtype, np.dtype(\"timedelta64\")):\n # no precision disallowed GH#24806\n msg = (\n \"Passing in 'timedelta' dtype with no precision is not allowed. \"\n \"Please pass in 'timedelta64[ns]' instead.\"\n )\n raise ValueError(msg)\n\n if not is_dtype_equal(dtype, TD64NS_DTYPE):\n raise ValueError(f\"dtype {dtype} cannot be converted to timedelta64[ns]\")\n\n return dtype\n" ]
[ [ "pandas.core.arrays.datetimelike.DatetimeLikeArrayMixin.astype", "pandas._libs.tslibs.Timestamp", "numpy.linspace", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.dtypes.dtypes.DatetimeTZDtype", "pandas._libs.lib.is_scalar", "numpy.dtype", "numpy.round", "pandas.core.ops.common.unpack_zerodim_and_defer", "pandas.core.arrays.datetimelike.validate_inferred_freq", "pandas.core.nanops.nanstd", "pandas.core.nanops.nansum", "pandas._libs.tslibs.to_offset", "pandas.io.formats.format.get_format_timedelta64", "pandas._libs.tslibs.timedeltas.parse_timedelta_unit", "pandas.core.arrays._ranges.generate_regular_range", "pandas.core.arrays.period.PeriodArray", "pandas._libs.tslibs.conversion.ensure_timedelta64ns", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_string_dtype", "pandas.core.dtypes.cast.astype_td64_unit_conversion", "pandas.core.arrays.datetimelike.maybe_infer_freq", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.core.dtypes.common.is_integer_dtype", "numpy.putmask", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.algorithms.checked_add_with_arr", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.core.arrays.datetimelike.validate_endpoints", "numpy.isnan", "pandas.core.arrays.DatetimeArray", "numpy.timedelta64", "numpy.ndim", "pandas._libs.tslibs.timedeltas.ints_to_pytimedelta", "pandas._libs.tslibs.ints_to_pytimedelta", "numpy.array", "pandas.compat.numpy.function.validate_sum", "pandas._libs.tslibs.Timedelta", "pandas._libs.tslibs.timedeltas.array_to_timedelta64", "pandas.core.common.count_not_none", "numpy.abs", "pandas.core.dtypes.common.is_scalar", "pandas._libs.tslibs.fields.get_timedelta_field", "pandas.core.construction.extract_array", "pandas.core.dtypes.common.is_object_dtype", "pandas._libs.tslibs.conversion.precision_from_unit", "numpy.broadcast_to", "pandas._libs.tslibs.NaT.to_datetime64", "pandas.core.dtypes.missing.isna", "pandas._libs.lib.infer_dtype", "pandas.core.arrays.datetimelike.validate_periods", "numpy.empty", "pandas.compat.numpy.function.validate_stat_ddof_func" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
idc9/mvlearn
[ "c9d5cd10ac34e0f901a4b0b8804397f2c0d75401" ]
[ "mvlearn/model_selection/validation.py" ]
[ "\"\"\"Validation utils.\"\"\"\n# Copyright 2019 NeuroData (http://neurodata.io)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Authors: Pierre Ablin\nimport numpy as np\n\nfrom sklearn.model_selection import cross_validate as sk_cross_validate\nfrom sklearn.pipeline import Pipeline\n\nfrom ..utils import check_Xs\nfrom ..compose import SimpleSplitter\n\n\ndef cross_validate(estimator, Xs, y, *args, **kwargs):\n \"\"\"Evaluate metric(s) by cross-validation and also record fit/score times.\n\n Works on multiview data, by wrapping\n `sklearn.model_selection.cross_validate`.\n\n Parameters\n ----------\n estimator : estimator object implementing 'fit'\n The object to use to fit the data.\n\n Xs : list of array-likes\n - Xs shape: (n_views,)\n - Xs[i] shape: (n_samples, n_features_i)\n The multiview data to fit\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\n default=None\n The target variable to try to predict in the case of\n supervised learning.\n\n args : any\n Additional arguments passed to `sklearn.model_selection.cross_validate`\n\n kwargs : any\n Additional named arguments to `sklearn.model_selection.cross_validate`\n\n Returns\n -------\n scores : dict of float arrays of shape (n_splits,)\n The output of `sklearn.model_selection.cross_validate`.\n \"\"\"\n X_transformed, _, _, n_features = check_Xs(\n Xs, copy=True, return_dimensions=True\n )\n pipeline = Pipeline([('splitter', SimpleSplitter(n_features)),\n ('estimator', estimator)])\n return sk_cross_validate(pipeline, np.hstack(Xs), y, *args, **kwargs)\n" ]
[ [ "numpy.hstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vinzmc/AutoEq
[ "e6b1648ea09ae3eade14f92c6f9d5afd87e400ac" ]
[ "measurements/referenceaudioanalyzer/reference_audio_analyzer_crawler.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport re\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageDraw\nimport colorsys\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nfrom measurements.name_prompt import NamePrompt\n\nsys.path.insert(1, os.path.realpath(os.path.join(sys.path[0], os.pardir, os.pardir)))\nfrom measurements.name_index import NameIndex, NameItem\nfrom measurements.crawler import Crawler\nfrom frequency_response import FrequencyResponse\n\nDIR_PATH = os.path.abspath(os.path.join(__file__, os.pardir))\n\n\nclass ReferenceAudioAnalyzerCrawler(Crawler):\n pro_report_regex = re.compile(r'^pro report$', re.I)\n performed_on_stand_regex = re.compile(r'^Measurements were performed on the stands?:$')\n\n def __init__(self, driver=None):\n if driver is None:\n opts = Options()\n opts.add_argument('--headless')\n driver = webdriver.Chrome(os.path.abspath(os.path.join(DIR_PATH, '..', 'chromedriver')), options=opts)\n super().__init__(driver=driver)\n\n @staticmethod\n def read_name_index():\n return NameIndex.read_tsv(os.path.join(DIR_PATH, 'name_index.tsv'))\n\n def write_name_index(self):\n self.name_index.write_tsv(os.path.join(DIR_PATH, 'name_index.tsv'))\n\n @staticmethod\n def get_existing():\n suffix_regex = re.compile(r' \\(.*\\)$')\n name_index = NameIndex.read_files(os.path.join(DIR_PATH, 'data', '**', '*.csv'))\n # Add models without mod suffixes which don't exist\n for i, row in name_index.df.iterrows():\n model = re.sub(suffix_regex, '', row.true_name)\n if model != row.true_name:\n name_index.df = name_index.df.append(pd.DataFrame(\n [[row.false_name, model, row.form]],\n columns=name_index.df.columns\n ))\n name_index.df.drop_duplicates()\n return name_index\n\n def get_urls(self):\n if self.driver is None:\n raise TypeError('self.driver cannot be None')\n\n document = self.get_beautiful_soup('https://reference-audio-analyzer.pro/en/catalog-reports.php?sp_1=1&tp=1')\n anchors = document.find(name='article', attrs={'class': 'ar3'}).find_all('a')\n urls = {}\n for anchor in anchors:\n if not anchor.has_attr('href'):\n continue\n urls[anchor.text] = f'https://reference-audio-analyzer.pro{anchor[\"href\"]}'\n return urls\n\n @staticmethod\n def find_curve(im, inspection, min_hue, max_hue, min_saturation, max_saturation, a_max, a_res):\n amplitude = []\n # Iterate each column\n for x in range(im.size[0]):\n pxs = [] # Graph pixels\n # Iterate each row (pixel in column)\n for y in range(im.size[1]):\n # Convert read RGB pixel values and convert to HSV\n h, s, v = colorsys.rgb_to_hsv(*[v / 255.0 for v in im.getpixel((x, y))][:3])\n # Graph pixels are colored\n if min_saturation < s < max_saturation and min_hue / 360 < h < max_hue / 360:\n pxs.append(float(y))\n else:\n p = im.getpixel((x, y))\n inspection[x, y] = (int(0.3 * p[0]), int(255 * 0.7 + 0.3 * p[1]), int(0 + 0.3 * p[2]))\n if not pxs:\n # No graph pixels found on this column\n amplitude.append(None)\n else:\n # Mean of recorded pixels\n v = np.mean(pxs)\n # Convert to dB value\n v = a_max - v * a_res\n amplitude.append(v)\n return amplitude, im, inspection\n\n @staticmethod\n def parse_image(im, model):\n \"\"\"Parses graph image downloaded from innerfidelity.com\"\"\"\n # Crop by left and right edges\n box = (69, 31, 550, 290)\n im = im.crop(box)\n\n px_a_max = 0\n px_a_min = im.size[1]\n # im.show()\n\n # X axis\n f_min = 20\n f_max = 20000\n f_step = (f_max / f_min) ** (1 / im.size[0])\n f = [f_min]\n for _ in range(1, im.size[0]):\n f.append(f[-1] * f_step)\n\n # Y axis\n a_max = 150\n a_min = 66\n a_res = (a_max - a_min) / (px_a_min - px_a_max)\n\n # Try blue curve\n _im = im.copy()\n inspection = _im.load()\n amplitude, _im, _inspection = ReferenceAudioAnalyzerCrawler.find_curve(\n _im, inspection, 203, 206, 0.8, 1.0, a_max, a_res\n )\n if len([x for x in amplitude if x is None]) >= 0.5 * len(amplitude):\n # More than half of the pixels were discarded, try green curve\n _im = im.copy()\n inspection = _im.load()\n amplitude, _im, _inspection = ReferenceAudioAnalyzerCrawler.find_curve(\n _im, inspection, 119, 121, 0.8, 1.0, a_max, a_res\n )\n\n # Inspection image\n draw = ImageDraw.Draw(_im)\n x0 = np.log(30 / f_min) / np.log(f_step)\n x1 = np.log(10000 / f_min) / np.log(f_step)\n y_0 = px_a_max + 12 / a_res\n y_1 = px_a_min - 12 / a_res\n draw.rectangle(((x0, y_0), (x1, y_1)), outline='magenta')\n draw.rectangle(((x0 + 1, y_0 + 1), (x1 - 1, y_1 - 1)), outline='magenta')\n\n # Create frequency response\n fr = FrequencyResponse(model, f, amplitude)\n fr.interpolate()\n if len(fr.frequency) < 2:\n im.show()\n raise ValueError(f'Failed to parse image for {fr.name}')\n fr.smoothen_fractional_octave(window_size=1/3, treble_window_size=1/3)\n fr.raw = fr.smoothed.copy()\n fr.smoothed = np.array([])\n fr.center()\n\n return fr, _im\n\n def process(self, item, url):\n if item.form == 'ignore':\n return\n\n image_dir = os.path.join(DIR_PATH, 'images')\n inspection_dir = os.path.join(DIR_PATH, 'inspection')\n data_dir = os.path.join(DIR_PATH, 'data')\n\n os.makedirs(image_dir, exist_ok=True)\n os.makedirs(os.path.join(inspection_dir, 'parse'), exist_ok=True)\n os.makedirs(os.path.join(inspection_dir, 'fr'), exist_ok=True)\n\n # Download and parse image\n self.download_images(url, item, data_dir, image_dir, inspection_dir, callback=self.process_image)\n\n def download_images(self, url, item, data_dir, image_dir, inspection_dir, callback):\n document = self.get_beautiful_soup(url) # Reports page\n for label in document.find_all(name='span', text=self.pro_report_regex):\n parent = label.parent.parent.parent\n anchor = parent.find_all('a')[1]\n report_url = f'https://reference-audio-analyzer.pro{anchor[\"href\"]}'\n suffix = anchor.text.lower().strip()\n name = item.true_name\n if suffix != item.false_name.lower() and suffix != 'default':\n name += f' ({suffix})'\n # The suffixes above are read automatically from the reports compilation page.\n # However these might not be the names that should exist in AutoEq.\n mods = self.name_index.find(false_name=name)\n if mods:\n # Find an item in name index which has the given name with automatic\n # suffixes as false name and replace the name with it's true name.\n true_name = mods.items[0].true_name\n image_path, rig = self.download_image(report_url, image_dir, item.false_name, true_name, item.form)\n if image_path:\n callback(image_path, rig, true_name, item.form, data_dir, inspection_dir)\n else:\n # Not in the name index, prompt user\n manufacturer, manufacturer_match = self.manufacturers.find(name)\n if manufacturer:\n model = re.sub(re.escape(manufacturer_match), '', name, flags=re.IGNORECASE).strip()\n name_proposals = self.get_name_proposals(name)\n similar_names = self.get_name_proposals(name, n=6, normalize_digits=True, threshold=0)\n similar_names = [item.true_name for item in similar_names.items]\n else:\n model = name\n name_proposals = None\n similar_names = None\n prompt = NamePrompt(\n model,\n self.prompt_mod_callback(name, report_url, data_dir, image_dir, inspection_dir, callback),\n manufacturer=manufacturer,\n name_proposals=name_proposals,\n search_callback=self.search,\n false_name=item.false_name,\n similar_names=similar_names\n ).widget\n if len(self.prompts.children) > 0:\n if type(self.prompts.children) == tuple:\n self.prompts.children = [x for x in self.prompts.children] + [prompt]\n else:\n self.prompts.children.append(prompt)\n else:\n self.prompts.children = [prompt]\n else:\n true_name = name\n image_path, rig = self.download_image(report_url, image_dir, item.false_name, true_name, item.form)\n if image_path:\n callback(image_path, rig, true_name, item.form, data_dir, inspection_dir)\n\n def prompt_mod_callback(self, false_name, report_url, data_dir, image_dir, inspection_dir, callback):\n def fn(true_name, form):\n self.name_index.add(NameItem(false_name, true_name, form))\n self.write_name_index()\n image_path, rig = self.download_image(report_url, image_dir, false_name, true_name, form)\n if image_path:\n callback(image_path, rig, true_name, form, data_dir, inspection_dir)\n return fn\n\n def download_image(self, report_url, image_dir, false_name, true_name, form):\n document = self.get_beautiful_soup(report_url) # Sets the driver also\n el = document.find(name='li', text=self.performed_on_stand_regex)\n try:\n rig = el.parent.find(name='ul').find(name='a').text\n except AttributeError as err:\n rig = 'HDM-X' if form == 'onear' else 'SIEC'\n print(f'Measurement rig could not be read for \"{false_name}\", guessing {rig}')\n try:\n graph = self.driver.find_element_by_id('response9').find_element_by_tag_name('div') # FR Graph\n except Exception:\n print(f'No graph for {false_name}')\n return None, None\n # Background image\n report_url = graph.value_of_css_property('background-image').replace('url(\"', '').replace('\")', '')\n file_path = self.download(report_url, true_name, image_dir)\n return file_path, rig\n\n def process_image(self, image_path, rig, name, form, data_dir, inspection_dir):\n im = Image.open(image_path)\n if im is None:\n print(f'Could not open image in \"{image_path}\"')\n return\n\n mod = self.name_index.find(true_name=name)\n # Get the form from name index if an entry already exists\n form = mod.items[0].form if mod else form\n\n fr, inspection = ReferenceAudioAnalyzerCrawler.parse_image(im, name)\n\n out_dir = os.path.join(data_dir, form, rig, name)\n os.makedirs(out_dir, exist_ok=True)\n\n # Save inspection images\n inspection.save(os.path.join(inspection_dir, 'parse', f'{name}.png'))\n fig, ax = fr.plot_graph(show=False, file_path=os.path.join(inspection_dir, 'fr', f'{name}.png'))\n plt.close(fig)\n\n # Write to CSV\n fr.write_to_csv(os.path.join(out_dir, f'{name}.csv'))\n print(f'Wrote CSV to \"{os.path.join(out_dir, f\"{name}.csv\")}\"')\n\n\ndef main():\n crawler = ReferenceAudioAnalyzerCrawler()\n crawler.process_new(prompt=False)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.log", "pandas.DataFrame", "numpy.mean", "matplotlib.pyplot.close", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Gopalbansal8106/python-machine-learning-book
[ "d0c8598bb499b3c535356da5d1226c39bba85986" ]
[ "code/ch13/mnist_keras_mlp.py" ]
[ "import os\nimport struct\nimport numpy as np\nimport theano\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD\n\n\ndef load_mnist(path, kind='train'):\n \"\"\"Load MNIST data from `path`\"\"\"\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels\n\n#### Loading the data\n\nX_train, y_train = load_mnist('mnist', kind='train')\nX_test, y_test = load_mnist('mnist', kind='t10k')\n\n#### Preparing the data\n\nX_train = X_train.astype(theano.config.floatX)\nX_test = X_test.astype(theano.config.floatX)\ny_train_ohe = np_utils.to_categorical(y_train)\n\n\n#### Training\n\nnp.random.seed(1)\n\nmodel = Sequential()\nmodel.add(Dense(input_dim=X_train.shape[1],\n output_dim=50,\n init='uniform',\n activation='tanh'))\n\nmodel.add(Dense(input_dim=50,\n output_dim=50,\n init='uniform',\n activation='tanh'))\n\nmodel.add(Dense(input_dim=50,\n output_dim=y_train_ohe.shape[1],\n init='uniform',\n activation='softmax'))\n\nsgd = SGD(lr=0.001, decay=1e-7, momentum=.9)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd)\n\nmodel.fit(X_train, y_train_ohe,\n nb_epoch=50,\n batch_size=300,\n verbose=1,\n validation_split=0.1,\n show_accuracy=True)\n\ny_train_pred = model.predict_classes(X_train, verbose=0)\ntrain_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]\nprint('Training accuracy: %.2f%%' % (train_acc * 100))\n\ny_test_pred = model.predict_classes(X_test, verbose=0)\ntest_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]\nprint('Test accuracy: %.2f%%' % (test_acc * 100))\n" ]
[ [ "numpy.fromfile", "numpy.sum", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]