repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
microsoft/DualOctreeGNN
[ "29eed84653d4f0c1681c8227714cf84e76c31abe", "29eed84653d4f0c1681c8227714cf84e76c31abe" ]
[ "tools/shapenet.py", "solver/sampler.py" ]
[ "# --------------------------------------------------------\n# Dual Octree Graph Networks\n# Copyright (c) 2022 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Peng-Shuai Wang\n# --------------------------------------------------------\n\nimport os\nimport time\nimport wget\nimport shutil\nimport torch\nimport ocnn\nimport trimesh\nimport logging\nimport mesh2sdf\nimport zipfile\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom plyfile import PlyData, PlyElement\n\nlogger = logging.getLogger(\"trimesh\")\nlogger.setLevel(logging.ERROR)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--run', type=str, required=True)\nparser.add_argument('--start', type=int, default=0)\nparser.add_argument('--end', type=int, default=45572)\nargs = parser.parse_args()\n\nsize = 128 # resolution of SDF\nlevel = 0.015 # 2/128 = 0.015625\nshape_scale = 0.5 # rescale the shape into [-0.5, 0.5]\nproject_folder = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nroot_folder = os.path.join(project_folder, 'data/ShapeNet')\n\n\ndef create_flag_file(filename):\n r''' Creates a flag file to indicate whether some time-consuming works\n have been done.\n '''\n\n folder = os.path.dirname(filename)\n if not os.path.exists(folder):\n os.makedirs(folder)\n with open(filename, 'w') as fid:\n fid.write('succ @ ' + time.ctime())\n\n\ndef check_folder(filenames: list):\n r''' Checks whether the folder contains the filename exists.\n '''\n\n for filename in filenames:\n folder = os.path.dirname(filename)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\ndef get_filenames(filelist):\n r''' Gets filenames from a filelist.\n '''\n\n filelist = os.path.join(root_folder, 'filelist', filelist)\n with open(filelist, 'r') as fid:\n lines = fid.readlines()\n filenames = [line.split()[0] for line in lines]\n return filenames\n\n\ndef unzip_shapenet():\n r''' Unzip the ShapeNetCore.v1\n '''\n\n filename = os.path.join(root_folder, 'ShapeNetCore.v1.zip')\n flag_file = os.path.join(root_folder, 'flags/unzip_shapenet_succ')\n if not os.path.exists(flag_file):\n print('-> Unzip ShapeNetCore.v1.zip.')\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(root_folder)\n create_flag_file(flag_file)\n\n folder = os.path.join(root_folder, 'ShapeNetCore.v1')\n flag_file = os.path.join(root_folder, 'flags/unzip_shapenet_all_succ')\n if not os.path.exists(flag_file):\n print('-> Unzip all zip files in ShapeNetCore.v1.')\n filenames = os.listdir(folder)\n for filename in filenames:\n if filename.endswith('.zip'):\n print('- Unzip %s' % filename)\n zipname = os.path.join(folder, filename)\n with zipfile.ZipFile(zipname, 'r') as zip_ref:\n zip_ref.extractall(folder)\n os.remove(zipname)\n create_flag_file(flag_file)\n\n\ndef download_filelist():\n r''' Downloads the filelists used for learning.\n '''\n\n flag_file = os.path.join(root_folder, 'flags/download_filelist_succ')\n if not os.path.exists(flag_file):\n print('-> Download the filelist.')\n url = 'https://www.dropbox.com/s/4jvam486l8961t7/shapenet.filelist.zip?dl=1'\n filename = os.path.join(root_folder, 'filelist.zip')\n wget.download(url, filename, bar=None)\n\n folder = os.path.join(root_folder, 'filelist')\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(path=folder)\n os.remove(filename)\n create_flag_file(flag_file)\n\n\ndef run_mesh2sdf():\n r''' Converts the meshes from ShapeNet to SDFs and manifold meshes.\n '''\n\n print('-> Run mesh2sdf.')\n mesh_scale = 0.8\n filenames = get_filenames('all.txt')\n for i in tqdm(range(args.start, args.end), ncols=80):\n filename = filenames[i]\n filename_raw = os.path.join(\n root_folder, 'ShapeNetCore.v1', filename, 'model.obj')\n filename_obj = os.path.join(root_folder, 'mesh', filename + '.obj')\n filename_box = os.path.join(root_folder, 'bbox', filename + '.npz')\n filename_npy = os.path.join(root_folder, 'sdf', filename + '.npy')\n check_folder([filename_obj, filename_box, filename_npy])\n if os.path.exists(filename_obj): continue\n\n # load the raw mesh\n mesh = trimesh.load(filename_raw, force='mesh')\n\n # rescale mesh to [-1, 1] for mesh2sdf, note the factor **mesh_scale**\n vertices = mesh.vertices\n bbmin, bbmax = vertices.min(0), vertices.max(0)\n center = (bbmin + bbmax) * 0.5\n scale = 2.0 * mesh_scale / (bbmax - bbmin).max()\n vertices = (vertices - center) * scale\n\n # run mesh2sdf\n sdf, mesh_new = mesh2sdf.compute(vertices, mesh.faces, size, fix=True,\n level=level, return_mesh=True)\n mesh_new.vertices = mesh_new.vertices * shape_scale\n\n # save\n np.savez(filename_box, bbmax=bbmax, bbmin=bbmin, mul=mesh_scale)\n np.save(filename_npy, sdf)\n mesh_new.export(filename_obj)\n\n\ndef sample_pts_from_mesh():\n r''' Samples 10k points with normals from the ground-truth meshes.\n '''\n\n print('-> Run sample_pts_from_mesh.')\n num_samples = 40000\n mesh_folder = os.path.join(root_folder, 'mesh')\n output_folder = os.path.join(root_folder, 'dataset')\n filenames = get_filenames('all.txt')\n for i in tqdm(range(args.start, args.end), ncols=80):\n filename = filenames[i]\n filename_obj = os.path.join(mesh_folder, filename + '.obj')\n filename_pts = os.path.join(output_folder, filename, 'pointcloud.npz')\n check_folder([filename_pts])\n if os.path.exists(filename_pts): continue\n\n # sample points\n mesh = trimesh.load(filename_obj, force='mesh')\n points, idx = trimesh.sample.sample_surface(mesh, num_samples)\n normals = mesh.face_normals[idx]\n\n # save points\n np.savez(filename_pts, points=points.astype(np.float16),\n normals=normals.astype(np.float16))\n\n\ndef sample_sdf():\n r''' Samples ground-truth SDF values for training.\n '''\n\n # constants\n depth, full_depth = 6, 4\n sample_num = 4 # number of samples in each octree node\n grid = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],\n [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])\n\n print('-> Sample SDFs from the ground truth.')\n filenames = get_filenames('all.txt')\n for i in tqdm(range(args.start, args.end), ncols=80):\n filename = filenames[i]\n dataset_folder = os.path.join(root_folder, 'dataset')\n filename_sdf = os.path.join(root_folder, 'sdf', filename + '.npy')\n filename_pts = os.path.join(dataset_folder, filename, 'pointcloud.npz')\n filename_out = os.path.join(dataset_folder, filename, 'sdf.npz')\n if os.path.exists(filename_out): continue\n\n # load data\n pts = np.load(filename_pts)\n sdf = np.load(filename_sdf)\n sdf = torch.from_numpy(sdf)\n points = pts['points'].astype(np.float32)\n normals = pts['normals'].astype(np.float32)\n points = points / shape_scale # rescale points to [-1, 1]\n\n # build octree\n points = ocnn.points_new(\n torch.from_numpy(points), torch.from_numpy(normals),\n torch.Tensor(), torch.Tensor())\n octree2points = ocnn.Points2Octree(depth=depth, full_depth=full_depth)\n octree = octree2points(points)\n\n # sample points and grads according to the xyz\n xyzs, grads, sdfs = [], [], []\n for d in range(full_depth, depth + 1):\n xyz = ocnn.octree_property(octree, 'xyz', d)\n xyz = ocnn.octree_decode_key(xyz)\n\n # sample k points in each octree node\n xyz = xyz[:, :3].float() # + 0.5 -> octree node center\n xyz = xyz.unsqueeze(1) + torch.rand(xyz.shape[0], sample_num, 3)\n xyz = xyz.view(-1, 3) # (N, 3)\n xyz = xyz * (size / 2 ** d) # normalize to [0, 2^sdf_depth]\n xyz = xyz[(xyz < 127).all(dim=1)] # remove out-of-bound points\n xyzs.append(xyz)\n\n # interpolate the sdf values\n xyzi = torch.floor(xyz) # the integer part (N, 3)\n corners = xyzi.unsqueeze(1) + grid # (N, 8, 3)\n coordsf = xyz.unsqueeze(1) - corners # (N, 8, 3), in [-1.0, 1.0]\n weights = (1 - coordsf.abs()).prod(dim=-1) # (N, 8, 1)\n corners = corners.long().view(-1, 3)\n x, y, z = corners[:, 0], corners[:, 1], corners[:, 2]\n s = sdf[x, y, z].view(-1, 8)\n sw = torch.sum(s * weights, dim=1)\n sdfs.append(sw)\n\n # calc the gradient\n gx = s[:, 4] - s[:, 0] + s[:, 5] - s[:, 1] + \\\n s[:, 6] - s[:, 2] + s[:, 7] - s[:, 3] # noqa\n gy = s[:, 2] - s[:, 0] + s[:, 3] - s[:, 1] + \\\n s[:, 6] - s[:, 4] + s[:, 7] - s[:, 5] # noqa\n gz = s[:, 1] - s[:, 0] + s[:, 3] - s[:, 2] + \\\n s[:, 5] - s[:, 4] + s[:, 7] - s[:, 6] # noqa\n grad = torch.stack([gx, gy, gz], dim=-1)\n norm = torch.sqrt(torch.sum(grad ** 2, dim=-1, keepdims=True))\n grad = grad / (norm + 1.0e-8)\n grads.append(grad)\n\n # concat the results\n xyzs = torch.cat(xyzs, dim=0).numpy()\n points = (xyzs / 64 - 1).astype(np.float16) * shape_scale # !shape_scale\n grads = torch.cat(grads, dim=0).numpy().astype(np.float16)\n sdfs = torch.cat(sdfs, dim=0).numpy().astype(np.float16)\n\n # save results\n # points = (points * args.scale).astype(np.float16) # in [-scale, scale]\n np.savez(filename_out, points=points, grad=grads, sdf=sdfs)\n\n\ndef sample_occu():\n r''' Samples occupancy values for evaluating the IoU following ConvONet.\n '''\n\n num_samples = 100000\n grid = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],\n [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])\n\n # filenames = get_filenames('all.txt')\n filenames = get_filenames('test.txt') + get_filenames('test_unseen5.txt')\n for filename in tqdm(filenames, ncols=80):\n filename_sdf = os.path.join(root_folder, 'sdf', filename + '.npy')\n filename_occu = os.path.join(root_folder, 'dataset', filename, 'points')\n if os.path.exists(filename_occu) or (not os.path.exists(filename_sdf)):\n continue\n\n sdf = np.load(filename_sdf)\n factor = 127.0 / 128.0 # make sure the interpolation is well defined\n points_uniform = np.random.rand(num_samples, 3) * factor # in [0, 1)\n points = (points_uniform - 0.5) * (2 * shape_scale) # !!! rescale\n points = points.astype(np.float16)\n\n # interpolate the sdf values\n xyz = points_uniform * 128 # in [0, 127)\n xyzi = np.floor(xyz) # the integer part (N, 3)\n corners = np.expand_dims(xyzi, 1) + grid # (N, 8, 3)\n coordsf = np.expand_dims(xyz, 1) - corners # (N, 8, 3), in [-1.0, 1.0]\n weights = np.prod(1 - np.abs(coordsf), axis=-1) # (N, 8)\n\n corners = np.reshape(corners.astype(np.int64), (-1, 3))\n x, y, z = corners[:, 0], corners[:, 1], corners[:, 2]\n values = np.reshape(sdf[x, y, z], (-1, 8))\n value = np.sum(values * weights, axis=1)\n occu = value < 0\n occu = np.packbits(occu)\n\n # save\n np.savez(filename_occu, points=points, occupancies=occu)\n\n\ndef generate_test_points():\n r''' Generates points in `ply` format for testing.\n '''\n\n noise_std = 0.005\n point_sample_num = 3000\n # filenames = get_filenames('all.txt')\n filenames = get_filenames('test.txt') + get_filenames('test_unseen5.txt')\n for filename in tqdm(filenames, ncols=80):\n filename_pts = os.path.join(\n root_folder, 'dataset', filename, 'pointcloud.npz')\n filename_ply = os.path.join(\n root_folder, 'test.input', filename + '.ply')\n if not os.path.exists(filename_pts): continue\n check_folder([filename_ply])\n\n # sample points\n pts = np.load(filename_pts)\n points = pts['points'].astype(np.float32)\n noise = noise_std * np.random.randn(point_sample_num, 3)\n rand_idx = np.random.choice(points.shape[0], size=point_sample_num)\n points_noise = points[rand_idx] + noise\n\n # save ply\n vertices = []\n py_types = (float, float, float)\n npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]\n for idx in range(points_noise.shape[0]):\n vertices.append(\n tuple(dtype(d) for dtype, d in zip(py_types, points_noise[idx])))\n structured_array = np.array(vertices, dtype=npy_types)\n el = PlyElement.describe(structured_array, 'vertex')\n PlyData([el]).write(filename_ply)\n\n\ndef download_dataset():\n r''' Directly downloads the dataset.\n '''\n\n flag_file = os.path.join(root_folder, 'flags/download_dataset_succ')\n if not os.path.exists(flag_file):\n print('-> Download the dataset.')\n url = 'https://www.dropbox.com/s/mc3lrwqpmnfq3j8/shapenet.dataset.zip?dl=1'\n filename = os.path.join(root_folder, 'shapenet.dataset.zip')\n wget.download(url, filename)\n\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(path=root_folder)\n # os.remove(filename)\n create_flag_file(flag_file)\n\n\ndef generate_dataset_unseen5():\n r'''Creates the unseen5 dataset\n '''\n\n dataset_folder = os.path.join(root_folder, 'dataset')\n unseen5_folder = os.path.join(root_folder, 'dataset.unseen5')\n if not os.path.exists(unseen5_folder):\n os.makedirs(unseen5_folder)\n for folder in ['02808440', '02773838', '02818832', '02876657', '03938244']:\n curr_folder = os.path.join(dataset_folder, folder)\n if os.path.exists(curr_folder):\n shutil.move(os.path.join(dataset_folder, folder), unseen5_folder)\n\n\ndef copy_convonet_filelists():\n r''' Copies the filelist of ConvONet to the datasets, which are needed when\n calculating the evaluation metrics.\n '''\n\n with open(os.path.join(root_folder, 'filelist/lists.txt'), 'r') as fid:\n lines = fid.readlines()\n filenames = [line.split()[0] for line in lines]\n filelist_folder = os.path.join(root_folder, 'filelist')\n for filename in filenames:\n src_name = os.path.join(filelist_folder, filename)\n des_name = src_name.replace('filelist/convonet.filelist', 'dataset') \\\n .replace('filelist/unseen5.filelist', 'dataset.unseen5')\n if not os.path.exists(des_name):\n shutil.copy(src_name, des_name)\n\n\ndef convert_mesh_to_sdf():\n unzip_shapenet()\n download_filelist()\n run_mesh2sdf()\n\n\ndef generate_dataset():\n sample_pts_from_mesh()\n sample_sdf()\n sample_occu()\n generate_test_points()\n generate_dataset_unseen5()\n copy_convonet_filelists()\n\n\nif __name__ == '__main__':\n eval('%s()' % args.run)\n", "# --------------------------------------------------------\n# Dual Octree Graph Networks\n# Copyright (c) 2022 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Peng-Shuai Wang\n# --------------------------------------------------------\n\nimport torch\nfrom torch.utils.data import Sampler, DistributedSampler, Dataset\n\n\nclass InfSampler(Sampler):\n def __init__(self, dataset: Dataset, shuffle: bool = True) -> None:\n self.dataset = dataset\n self.shuffle = shuffle\n self.reset_sampler()\n\n def reset_sampler(self):\n num = len(self.dataset)\n indices = torch.randperm(num) if self.shuffle else torch.arange(num)\n self.indices = indices.tolist()\n self.iter_num = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n value = self.indices[self.iter_num]\n self.iter_num = self.iter_num + 1\n\n if self.iter_num >= len(self.indices):\n self.reset_sampler()\n return value\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass DistributedInfSampler(DistributedSampler):\n def __init__(self, dataset: Dataset, shuffle: bool = True) -> None:\n super().__init__(dataset, shuffle=shuffle)\n self.reset_sampler()\n\n def reset_sampler(self):\n self.indices = list(super().__iter__())\n self.iter_num = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n value = self.indices[self.iter_num]\n self.iter_num = self.iter_num + 1\n\n if self.iter_num >= len(self.indices):\n self.reset_sampler()\n return value\n" ]
[ [ "numpy.savez", "numpy.expand_dims", "torch.cat", "torch.sum", "numpy.random.randn", "numpy.reshape", "torch.from_numpy", "numpy.save", "numpy.packbits", "torch.rand", "numpy.load", "torch.floor", "numpy.random.choice", "numpy.random.rand", "numpy.floor", "torch.stack", "numpy.array", "numpy.sum", "numpy.abs", "torch.Tensor" ], [ "torch.randperm", "torch.arange" ] ]
ahmedtaiye/tfeatslekan
[ "fc6bbfe9f1cfdb56b002c03f611725120be0d9c4", "fc6bbfe9f1cfdb56b002c03f611725120be0d9c4" ]
[ "L1.py", "set.py" ]
[ "\r\nfrom __future__ import print_function\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom sklearn.decomposition import TruncatedSVD\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import Normalizer\r\nfrom sklearn import metrics\r\nfrom sklearn.decomposition import TruncatedSVD\r\nfrom sklearn.decomposition import NMF\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\nfrom sklearn.decomposition import ProjectedGradientNMF\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics.pairwise import euclidean_distances\r\nfrom sklearn.metrics import jaccard_similarity_score\r\nfrom sklearn.metrics.pairwise import paired_distances\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom nltk.corpus import stopwords\r\n#import codecs, difflib, Levenshtein, distance\r\nimport logging\r\nfrom optparse import OptionParser\r\nimport sys\r\nfrom time import time\r\nimport numpy as np\r\n# Display progress logs on stdout\r\nlogging.basicConfig(level=logging.INFO,\r\n format='%(asctime)s %(levelname)s %(message)s')\r\n\r\n# parse commandline arguments\r\nop = OptionParser()\r\nop.add_option(\"--lsa\",\r\n dest=\"n_components\", type=\"int\",\r\n help=\"Preprocess documents with latent semantic analysis.\")\r\nop.add_option(\"--no-minibatch\",\r\n action=\"store_false\", dest=\"minibatch\", default=True,\r\n help=\"Use ordinary k-means algorithm (in batch mode).\")\r\nop.add_option(\"--no-idf\",\r\n action=\"store_false\", dest=\"use_idf\", default=True,\r\n help=\"Disable Inverse Document Frequency feature weighting.\")\r\nop.add_option(\"--use-hashing\",\r\n action=\"store_true\", default=False,\r\n help=\"Use a hashing feature vectorizer\")\r\nop.add_option(\"--n-features\", type=int, default=10000,\r\n help=\"Maximum number of features (dimensions)\"\r\n \" to extract from text.\")\r\nop.add_option(\"--verbose\",\r\n action=\"store_true\", dest=\"verbose\", default=False,\r\n help=\"Print progress reports inside k-means algorithm.\")\r\n\r\nprint(__doc__)\r\nop.print_help()\r\ndef is_interactive():\r\n return not hasattr(sys.modules['__main__'], '__file__')\r\n\r\n# work-around for Jupyter notebook and IPython console\r\nargv = [] if is_interactive() else sys.argv[1:]\r\n(opts, args) = op.parse_args(argv)\r\nif len(args) > 0:\r\n op.error(\"this script takes no arguments.\")\r\n sys.exit(1)\r\n\r\ndef is_interactive():\r\n return not hasattr(sys.modules['__main__'], '__file__')\r\n# Bring in standard stopwords\r\nwith np.errstate(divide='ignore'):\r\n np.float64(1.0) / 0.0\r\nfrom nltk.corpus import stopwords\r\n\r\n# Bring in the default English NLTK stop words\r\nstoplist = stopwords.words('english')\r\n\r\n# Define additional stopwords in a string\r\nadditional_stopwords = \"\"\"To [ ] I you am As it can't <<...>> sincerely, . > - < Kenneth Lay/Corp/Enron@Enron Best regards Sincerely From Sent Original Message Q <-> * | /\\ 100% 12345678910 () \"\"\"\r\n\r\n# Split the the additional stopwords string on each word and then add\r\n# those words to the NLTK stopwords list\r\nstoplist += additional_stopwords.split()\r\n\r\nstopWords = stopwords.words('english')\r\n\r\nprint (\"\\nCalculating document similarity scores...\")\r\n\r\n# Open and read a bunch of files\r\nf = open('ken-lay_body.txt')\r\ndoc1 = str(f.read())\r\nf = open('jeff-skilling_body.txt')\r\ndoc2 = str(f.read())\r\nf = open('Richard-shapiro_body.txt')\r\ndoc3 = str(f.read())\r\nf = open('kay-mann_body.txt')\r\ndoc4 = str(f.read())\r\nf = open('Jeff-dasovich_body.txt',)\r\ndoc5 = str(f.read())\r\nf = open('tana jones_body.txt')\r\ndoc6 = str(f.read())\r\nf = open('steven kean_body.txt')\r\ndoc7 = str(f.read())\r\nf = open('shackleton sara_body.txt')\r\ndoc8 = str(f.read())\r\nf = open('james steffes_body.txt')\r\ndoc9 = str(f.read())\r\nf = open('Mark taylor_body.txt')\r\ndoc10 = str(f.read())\r\nf = open('davis pete_body.txt')\r\ndoc11 = str(f.read())\r\nf = open('Chris g_body.txt')\r\ndoc12 = str(f.read())\r\nf = open('kate symes_body.txt')\r\ndoc13 = str(f.read())\r\nf = open('Mcconnell.body.txt')\r\ndoc14 = str(f.read())\r\nf = open('kaminski_body.txt')\r\ndoc15 = str(f.read())\r\n#train_string = 'By these proceedings for judicial review the Claimant seeks to challenge the decision of the Defendant dated the 23rd of May 2014 refusing the Claimant’s application of the 3rd of January 2012 for naturalisation as a British citizen'\r\n# Construct the training set as a list\r\ntrain_set = [ doc1, doc2, doc3, doc4, doc5, doc6,doc7, doc8, doc9, doc10, doc11, doc12, doc13, doc14, doc15]\r\n\r\n# Set up the vectoriser, passing in the stop words\r\ntfidf_vectorizer = TfidfVectorizer(stop_words=stopWords)\r\nvectorizer = TfidfVectorizer(min_df = 1, stop_words = 'english')\r\n# Apply the vectoriser to the training set\r\ntfidf_matrix_train = tfidf_vectorizer.fit_transform(train_set)\r\nC = cosine_similarity(tfidf_matrix_train)\r\n#print (\"\\nSimilarity Score [*] \",cosine_similarity(tfidf_matrix_train[0:1], tfidf_matrix_train))\r\n\r\ntfidf_matrix_train.shape\r\n#print(tfidf_matrix_train.toarray())\r\n#print(vector.toarray())\r\n\r\n\r\n\r\n\r\nprint(\"Top terms per cluster:\")\r\n\r\nif opts.n_components:\r\n print(\"Performing dimensionality reduction using LSA\")\r\n t0 = time()\r\n # Vectorizer results are normalized, which makes KMeans behave as\r\n # spherical k-means for better results. Since LSA/SVD results are\r\n # not normalized, we have to redo the normalization.\r\n svd = TruncatedSVD(opts.n_components)\r\n normalizer = Normalizer(copy=False)\r\n lsa = make_pipeline(svd, normalizer)\r\n tfidf_matrix_train = lsa.fit_transform(tfidf_matrix_train)\r\n print(\"done in %fs\" % (time() - t0))\r\n\r\n explained_variance = svd.explained_variance_ratio_.sum()\r\n print(\"Explained variance of the SVD step: {}%\".format(\r\n int(explained_variance * 100)))\r\n\r\n print()\r\n\r\n\r\n# #############################################################################\r\n# Do the actual clustering\r\ntrue_k = 5\r\nkm = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1)\r\n\r\ndata_vectorized=km.fit(tfidf_matrix_train)\r\nlabels = km.labels_\r\n\r\nif opts.minibatch:\r\n km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\r\n init_size=1000, batch_size=1000, verbose=opts.verbose)\r\nelse:\r\n km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\r\n verbose=opts.verbose)\r\n\r\nNUM_TOPICS= labels\r\nprint(\"Clustering sparse data with %s\" % km)\r\nt0 = time()\r\nkm.fit(tfidf_matrix_train)\r\nprint(\"done in %0.3fs\" % (time() - t0))\r\nprint()\r\n\r\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels,km.labels_))\r\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels,km.labels_))\r\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels,km.labels_))\r\nprint(\"Adjusted Rand-Index: %.3f\"\r\n % metrics.adjusted_rand_score( labels, km.labels_))\r\nprint(\"Silhouette Coefficient: %0.3f\"\r\n % metrics.silhouette_score(tfidf_matrix_train, km.labels_, sample_size=1000))\r\n\r\nprint()\r\n\r\n\r\n\r\n# Build a Latent Dirichlet Allocation Model\r\nlda_model = LatentDirichletAllocation(n_topics=NUM_TOPICS, max_iter=10, learning_method='online')\r\nlda_Z = lda_model.fit_transform(data_vectorized)\r\nprint(lda_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)\r\n\r\n\r\n\r\npgnmf_model= ProjectedGradientNMF(n_components=NUM_TOPICS)\r\npgnmf_z= pgnmf_model.fit_transform(data_vectorized)\r\nprint(pgnmf_z.shape) # (NO_DOCUMENTS, NO_TOPICS)\r\n\r\n\r\n# Build a Non-Negative Matrix Factorization Model\r\nnmf_model = NMF(n_components=NUM_TOPICS)\r\nnmf_Z = nmf_model.fit_transform(data_vectorized)\r\nprint(nmf_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)\r\n\r\n# Build a Latent Semantic Indexing Model\r\nlsi_model = TruncatedSVD(n_components=NUM_TOPICS)\r\nlsi_Z = lsi_model.fit_transform(data_vectorized)\r\nprint(lsi_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)\r\n\r\n\r\n# Let's see how the first document in the corpus looks like in different topic spaces\r\nprint(lda_Z[0])\r\nprint(nmf_Z[0])\r\nprint(lsi_Z[0])\r\nprint(pgnmf_z[0])\r\n# Let's see how the first document in the corpus looks like in different topic spaces\r\nprint(lda_Z[0])\r\nprint(nmf_Z[0])\r\nprint(lsi_Z[0])\r\nprint(pgnmf_z[0])\r\ndef print_topics(model, vectorizer, top_n=10):\r\n for idx, topic in enumerate(model.components_):\r\n print(\"Concepts %d:\" % (idx))\r\n print([(vectorizer.get_feature_names()[i], topic[i])\r\n for i in topic.argsort()[:-top_n - 1:-1]])\r\n\r\nprint(\"LDA Model:\")\r\nprint_topics(lda_model, vectorizer)\r\nprint(\"=\" * 20)\r\n\r\n# #############################################################################\r\n# Do the actual clustering\r\n\r\nif opts.minibatch:\r\n km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\r\n init_size=10000, batch_size=1000, verbose=opts.verbose)\r\nelse:\r\n km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\r\n verbose=opts.verbose)\r\n\r\nprint(\"Clustering sparse data with %s\" % km)\r\nt0 = time()\r\nkm.fit(tfidf_matrix_train)\r\nprint(\"done in %0.3fs\" % (time() - t0))\r\nprint()\r\n\r\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, km.labels_))\r\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels, km.labels_))\r\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, km.labels_))\r\nprint(\"Adjusted Rand-Index: %.3f\"\r\n % metrics.adjusted_rand_score(labels, km.labels_))\r\nprint(\"Silhouette Coefficient: %0.3f\"\r\n % metrics.silhouette_score(tfidf_matrix_train, km.labels_, sample_size=1000))\r\n\r\nprint()\r\n\r\nprint(\"NMF Model:\")\r\nprint_topics(nmf_model, vectorizer)\r\nprint(\"=\" * 20)\r\n\r\n# #############################################################################\r\n# Do the actual clustering\r\n\r\nif opts.minibatch:\r\n km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\r\n init_size=10000, batch_size=1000, verbose=opts.verbose)\r\nelse:\r\n km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\r\n verbose=opts.verbose)\r\n\r\nprint(\"Clustering sparse data with %s\" % km)\r\nt0 = time()\r\nkm.fit(tfidf_matrix_train)\r\nprint(\"done in %0.3fs\" % (time() - t0))\r\nprint()\r\n\r\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, km.labels_))\r\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels, km.labels_))\r\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, km.labels_))\r\nprint(\"Adjusted Rand-Index: %.3f\"\r\n % metrics.adjusted_rand_score(labels, km.labels_))\r\nprint(\"Silhouette Coefficient: %0.3f\"\r\n % metrics.silhouette_score(tfidf_matrix_train, km.labels_, sample_size=1000))\r\n\r\nprint()\r\n\r\n\r\nprint(\"PGNMF Model:\")\r\nprint_topics(pgnmf_model, vectorizer)\r\nprint(\"=\" * 20)\r\n\r\n# #############################################################################\r\n# Do the actual clustering\r\n\r\nif opts.minibatch:\r\n km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\r\n init_size=10000, batch_size=1000, verbose=opts.verbose)\r\nelse:\r\n km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\r\n verbose=opts.verbose)\r\n\r\nprint(\"Clustering sparse data with %s\" % km)\r\nt0 = time()\r\nkm.fit(tfidf_matrix_train)\r\nprint(\"done in %0.3fs\" % (time() - t0))\r\nprint()\r\n\r\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, km.labels_))\r\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels, km.labels_))\r\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, km.labels_))\r\nprint(\"Adjusted Rand-Index: %.3f\"\r\n % metrics.adjusted_rand_score(labels, km.labels_))\r\nprint(\"Silhouette Coefficient: %0.3f\"\r\n % metrics.silhouette_score(tfidf_matrix_train, km.labels_, sample_size=1000))\r\n\r\nprint()\r\n\r\n\r\nif not opts.use_hashing:\r\n print(\"Top terms per cluster:\")\r\n\r\n if opts.n_components:\r\n original_space_centroids = svd.inverse_transform(km.cluster_centers_)\r\n order_centroids = original_space_centroids.argsort()[:, ::-1]\r\n else:\r\n order_centroids = km.cluster_centers_.argsort()[:, ::-1]\r\n\r\n terms = tfidf_vectorizer .get_feature_names()\r\n for i in range(true_k):\r\n print(\"Cluster %d:\" % i, end='')\r\n for ind in order_centroids[i, :10]:\r\n print(' %s' % terms[ind], end='')\r\n print()\r\n\r\n", "\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\n\nedata = pd.read_csv(\"docword.enron.txt\", skiprows=3, sep = ' ', header=None)\nevocab = pd.read_csv(\"vocab.enron.txt\", header=None)\n\nprint (evocab)\nevocab.columns = ['word']\nedata.columns = ['docid','wordid','freq']\n# Taking a sample data set\nedata = edata.iloc[:100,:]\nevocab.index = evocab.index + 1\n\nwc = edata.groupby('wordid')['freq'].sum()\n#wc = egrouped['freq'].agg(np.sum)\nprint (wc)\n\nin_list = wc\n\n\n# In[ ]:\n\nm = 1\n# Input parameter 'm'\n# while(True):\n# m = input(\"Enter no. of randomized iterations required: \")\n# try:\n# m = int(m)\n# break\n# except:\n# print(\"Enter valid number.\")\n# continue\n\n\n# Calculating the dissimilarities and smoothing factors.\n\n# Set the value of parameter m = the no. of iterations you require\nCard = pd.Series(np.NAN)\nDS=pd.Series(np.NAN)\nidx_added = pd.Series(np.NAN)\npos = 0\nfor j in range(1,m+1):\n np.random.seed(42)\n new_indices = np.random.choice(in_list.index,len(in_list),replace=False)\n for i in pd.Series(new_indices).index:\n idx_added[i+pos] = new_indices[i]\n DS[i+pos]=np.var(in_list[new_indices[:i+1]])\n Card[i+pos] = len(in_list[:i+1])\n pos = pos+i+1\ndf = pd.DataFrame({'Index_added':idx_added,'DS':DS,'Card':Card})\ndf ['DS_Prev'] = df.DS.shift(1)\ndf['Card_prev'] = df.Card.shift(1)\ndf.Card_prev[(df.Card == 1)] = 0\ndf = df.fillna(0)\ndf['Smoothing'] = (df.Card - df.Card_prev)*(df.DS - df.DS_Prev)\n\n\n\n# In[ ]:\n\n\n# displaying the first 100 dissimilarities in decreasing order of smoothing factor.\nA =df.sort_values(['Smoothing'],ascending=False)['Index_added'].head(100)\ndiss = df.sort_values(['Smoothing'],ascending=False)['DS'].head(100)\nfreq = wc[A]\nword = evocab.word[A]\nout = pd.DataFrame([word.values,freq.values,diss.values],index=['word','freq','diss']).transpose()\n#print(out)\n#cnt = 0\n\n# for i in df.sort_values(by=['Smoothing'], ascending=False).index:\n#\n# if cnt < 100:\n#\n# print(evocab.word[df.Index_added[i]])\n#\n# cnt+=1\n\n# find indexes of sets with max sf\n\nmaxsf = []\n\nfor i in range(len(df.DS)):\n\n if df.Smoothing[i] == df.Smoothing.max():\n\n maxsf.append(i)\n\nprint(maxsf)\n\nN = len(in_list)\n\nexcp_set = []\n\nfor i in range(len(maxsf)):\n\n j = maxsf[i]\n\n k=j+1\n\n temp = []\n\n temp.append(df.Index_added[j])\n\n excp_set.append(temp.copy())\n\n temp_prev = pd.Series()\n\n temp_j = pd.Series()\n\n a=j\n\n while(a%N!=0):\n\n temp_j.set_value(len(temp_j),in_list[df.Index_added[a]])\n\n a=a-1\n\n temp_j.set_value(len(temp_j),in_list[df.Index_added[a]]) # Ij\n\n temp_prev = temp_j.copy() # Ij-1\n\n del(temp_prev[0])\n\n temp_prev.index = np.arange(len(temp_prev))\n\n while(k%N!=0):\n\n K_element = in_list[df.Index_added[k]] # K th element\n\n temp_prev.set_value(len(temp_prev),K_element) # Ij-1 U {ik}\n\n temp_j.set_value(len(temp_j),K_element) # Ij U {ik}\n\n Dk0 = np.var(temp_prev) - df.DS[j-1]\n\n Dk1 = np.var(temp_j) - df.DS[j]\n\n if Dk0-Dk1 >= df.DS[j]: # If Dk0 - Dk1 >= Dj we add the element to exception set.\n\n excp_set[i].append(df.Index_added[k])\n\n del(temp_prev[len(temp_prev)-1])\n\n del(temp_j[len(temp_j)-1])\n\n k+=1\n\nprint(excp_set) # contains the indices of exception elements.\n\n" ]
[ [ "sklearn.decomposition.TruncatedSVD", "sklearn.decomposition.NMF", "sklearn.cluster.KMeans", "sklearn.metrics.silhouette_score", "sklearn.preprocessing.Normalizer", "sklearn.pipeline.make_pipeline", "sklearn.metrics.pairwise.cosine_similarity", "sklearn.metrics.v_measure_score", "sklearn.metrics.homogeneity_score", "sklearn.metrics.completeness_score", "sklearn.decomposition.LatentDirichletAllocation", "sklearn.decomposition.ProjectedGradientNMF", "numpy.float64", "numpy.errstate", "sklearn.metrics.adjusted_rand_score", "sklearn.cluster.MiniBatchKMeans", "sklearn.feature_extraction.text.TfidfVectorizer" ], [ "pandas.read_csv", "pandas.Series", "numpy.random.seed", "pandas.DataFrame", "numpy.var" ] ]
hashstat/cvxpy
[ "20d667ebe8614821fa38e41b1e333257512d9594", "86307f271819bb78fcdf64a9c3a424773e8269fa" ]
[ "examples/extensions/feature_selection.py", "cvxpy/atoms/elementwise/elementwise.py" ]
[ "\"\"\"\nCopyright 2013 Steven Diamond\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cvxpy import Card, norm, Minimize, Parameter, pos, Problem, Variable\nfrom mixed_integer import *\nimport numpy as np\n\n# Feature selection on a linear kernel SVM classifier.\n# Uses the Alternating Direction Method of Multipliers\n# with a (non-convex) cardinality constraint.\n\n# Generate data.\nnp.random.seed(1)\nN = 50\nM = 40\nn = 10\ndata = []\nfor i in range(N):\n data += [(1, np.random.normal(1.0, 2.0, (n, 1)))]\nfor i in range(M):\n data += [(-1, np.random.normal(-1.0, 2.0, (n, 1)))]\n\n# Construct problem.\ngamma = Parameter(nonneg=True)\ngamma.value = 0.1\n# 'a' is a variable constrained to have at most 6 non-zero entries.\na = Card(n, k=6)\nb = Variable()\n\nslack = [pos(1 - label*(sample.T*a - b)) for (label, sample) in data]\nobjective = Minimize(norm(a, 2) + gamma*sum(slack))\np = Problem(objective)\n# Extensions can attach new solve methods to the CVXPY Problem class.\np.solve(method=\"admm\")\n\n# Count misclassifications.\nerror = 0\nfor label, sample in data:\n if not label*(a.value.T*sample - b.value)[0] >= 0:\n error += 1\n\nprint(\"%s misclassifications\" % error)\nprint(a.value)\nprint(b.value)\n", "\"\"\"\nCopyright 2013 Steven Diamond\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport abc\nfrom typing import Tuple\n\nfrom cvxpy.atoms.atom import Atom\nimport cvxpy.utilities as u\nimport cvxpy.lin_ops.lin_utils as lu\nimport numpy as np\nimport scipy.sparse as sp\n\n\nclass Elementwise(Atom):\n \"\"\" Abstract base class for elementwise atoms. \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def shape_from_args(self):\n \"\"\"Shape is the same as the sum of the arguments.\n \"\"\"\n return u.shape.sum_shapes([arg.shape for arg in self.args])\n\n def validate_arguments(self) -> None:\n \"\"\"\n Verify that all the shapes are the same\n or can be promoted.\n \"\"\"\n u.shape.sum_shapes([arg.shape for arg in self.args])\n super(Elementwise, self).validate_arguments()\n\n def is_symmetric(self) -> bool:\n \"\"\"Is the expression symmetric?\n \"\"\"\n symm_args = all(arg.is_symmetric() for arg in self.args)\n return self.shape[0] == self.shape[1] and symm_args\n\n @staticmethod\n def elemwise_grad_to_diag(value, rows, cols):\n \"\"\"Converts elementwise gradient into a diagonal matrix for Atom._grad()\n\n Args:\n value: A scalar or NumPy matrix.\n\n Returns:\n A SciPy CSC sparse matrix.\n \"\"\"\n if not np.isscalar(value):\n value = value.ravel(order='F')\n return sp.dia_matrix((value, [0]), shape=(rows, cols)).tocsc()\n\n @staticmethod\n def _promote(arg, shape: Tuple[int, ...]):\n \"\"\"Promotes the lin op if necessary.\n\n Parameters\n ----------\n arg : LinOp\n LinOp to promote.\n shape : tuple\n The shape desired.\n\n Returns\n -------\n tuple\n Promoted LinOp.\n \"\"\"\n if arg.shape != shape:\n return lu.promote(arg, shape)\n else:\n return arg\n" ]
[ [ "numpy.random.normal", "numpy.random.seed" ], [ "scipy.sparse.dia_matrix", "numpy.isscalar" ] ]
NuTufts/chroma_lartpc
[ "ea6d1a62d22eeeaac069efdef1068a56be683fcc" ]
[ "chroma/uboone/materials.py" ]
[ "import os,sys\nimport numpy as np\n\n# This module has functions and defintions to load the optical \n# properties required by the MicroBooNE detector\n\nmaterialnames = [\"LAr\", # liquid argon [ may have its own module one day ]\n \"ArGas\", # gaseous argon\n \"Titanium\", # for the wires (fancy)\n \"STEEL_STAINLESS_Fe7Cr2Ni\", # cryostat walls\n \"Acrylic\", # wavelength shifting plates\n \"Glass\", # pmt window\n \"bialkali\", # pmt photocathode\n \"Vacuum\",\n \"PU_foam_light\", # mastic insulation. Irrelevant.\n \"PU_foam_dense\", # mastic insulation. Irrelevant.\n \"Air\", # lab air, Irrelevant\n \"G10\", # fiberglass\n \"Concrete\",] # Irrelevant\n# --------------------------------------------------------------------------------\n# what needs to be specified.\n# Materials need:\n# - refractive_index (can be function of wavelength)\n# - absorption_length (function of wavelength)\n# - scattering_length (function of wavelength)\n# See chroma.geometry: class Material for more information\n# --------------------------------------------------------------------------------\n# LAr: Liquid Argon\n# * Refractice index from\n# Sinnock, A. C. Refractive indices of the condensed rare gases, argon, krypton and xenon. \n# Journal of Physics C: Solid State Physics 13, 2375 (1980).\n# Measured at 83 K at 546.1 nm\n# Values at 260 and 400 are dummpy values\n# * Scattering Length from\n# Ishida et al. NIMA 384 (1997) 380-386: 66+/-3 cm\n# [USED] Seidel et al. NIMA 489 (2002) 189–194: 90 cm (calculated)\n# * Absorption Length\n# Going to be a function of puity and other inputs. \n# 80.9 cm from (from C. Rubbia)\n# 2000.0 cm from LArSoft\n# refractive from LArSoft\n#lar_refractive_index = np.array( [ (260.0, 1.2316),\n# (400.0, 1,2316),\n# (546.1, 1.2316) ] )\n# below in mm\nlar_refractive_index = np.array( [ (114.1, 1.60),\n (117.4, 1.56),\n (122.5, 1.45),\n (125.2, 1.39),\n (135.3, 1.35),\n (160.2, 1.29),\n (200.3, 1.26),\n (278.7, 1.24),\n (401.3, 1.23),\n (681.3, 1.23) ] )\nlar_scattering_length = np.array( [ (117.3, 100.0),\n (124.6, 380.0),\n (128.2, 900.0),\n (145.9, 1920.0),\n (164.7, 4100.0),\n (190.5, 9300.0),\n (217.9, 18500.0),\n (250.5, 37900.0) ] )\n\ndef load_lar_material_info( matclass ):\n matclass.set( 'refractive_index', lar_refractive_index[:,1], lar_refractive_index[:,0] )\n matclass.set( 'scattering_length', lar_scattering_length[:,1], lar_scattering_length[:,0] )\n matclass.set( 'absorption_length', 20000.0 ) # mm\n\n# --------------------------------------------------------------------------------\n# Gaseous Argon\ndef load_argas_material_info( matclass ):\n matclass.set('refractive_index', 1.0)\n matclass.absorption_length = np.array( 1.0e6 )\n matclass.set('scattering_length', 1000.0 )\n\n# --------------------------------------------------------------------------------\n# Acrylic\n# This can vary based on mnufacturer, even batch to batch...especially bellow 440 nm\n# We use data from RPT #1 from MiniClean report in \n# Bodmer et al., http://arxiv.org/pdf/1310.6454v2.pdf\n\ndef load_acrylic_material_info( matclass ):\n matclass.set('refractive_index', 1.49)\n matclass.absorption_length = np.array( [(375.0,29.0), (405.0, 155.0), (440.0, 261.0), (543, 3360.0), (632.0, 1650.0), (800, 1650.0)] )\n matclass.set('scattering_length', 1000.0 )\n\n# --------------------------------------------------------------------------------\n# Matclass\n\ndef load_glass_material_info( matclass ):\n # Taken from chroma.demo.optics as a starting point\n matclass.set('refractive_index', 1.49)\n matclass.absorption_length = \\\n np.array([(200, 0.1e-6), (300, 0.1e-6), (330, 1000.0), (500, 2000.0), (600, 1000.0), (770, 500.0), (800, 0.1e-6)])\n matclass.set('scattering_length', 1e6)\n\n# --------------------------------------------------------------------------------\n# Vacuum\n\ndef load_vacuum_material_info( matclass ):\n # Taken from chroma.demo.optics as a starting point\n matclass.set('refractive_index', 1.0)\n matclass.set('absorption_length', 1.0e6)\n matclass.set('scattering_length', 1.0e6)\n\n# --------------------------------------------------------------------------------\n# Dummy values for non-transmissive materials\n\ndef load_stainless_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_titanium_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_bialkali_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_concrete_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_air_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_pufoam_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_G10_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_dummy_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\n\n# --------------------------------------------------------------------------------\ndef load_uboone_materials( c2cclass ):\n \"\"\"\n c2cclass: collada_to_chroma class instance\n \"\"\"\n if not isinstance(c2class, ColladaToChroma):\n raise TypeError('input to function should be instance of ColladaToChroma')\n loaders = { \"LAr\":load_lar_material_info,\n \"ArGas\":load_argas_material_info,\n \"Titanium\":load_titanium_material_info,\n \"Acrylic\":load_acrylic_material_info,\n \"Glass\":load_glass_material_info,\n \"Vacuum\":load_vacuum_material_info,\n \"STEEL_STAINLESS_Fe7Cr2Ni\":load_vacuum_material_info,\n \"PU_foam_light\":load_pufoam_material_info,\n \"PU_foam_dense\":load_pufoam_material_info,\n \"Concrete\":load_concrete_material_info,\n \"Concrete\":load_G10_material_info }\n \ndef clean_material_name( matname ):\n # pointer addresses attached to names\n return matname.split(\"0x\")[0]\n" ]
[ [ "numpy.array" ] ]
Mu-L/TheAlgorithmsOfPython
[ "2d3d660155241113b23e4ed810e05479b2fc4bba", "2d3d660155241113b23e4ed810e05479b2fc4bba" ]
[ "machine_learning/polymonial_regression.py", "fuzzy_logic/fuzzy_operations.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\n\n# Fitting Polynomial Regression to the dataset\nfrom sklearn.preprocessing import PolynomialFeatures\n\n# Importing the dataset\ndataset = pd.read_csv(\n \"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/\"\n \"position_salaries.csv\"\n)\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n\npoly_reg = PolynomialFeatures(degree=4)\nX_poly = poly_reg.fit_transform(X)\npol_reg = LinearRegression()\npol_reg.fit(X_poly, y)\n\n\n# Visualizing the Polymonial Regression results\ndef viz_polymonial():\n plt.scatter(X, y, color=\"red\")\n plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color=\"blue\")\n plt.title(\"Truth or Bluff (Linear Regression)\")\n plt.xlabel(\"Position level\")\n plt.ylabel(\"Salary\")\n plt.show()\n return\n\n\nif __name__ == \"__main__\":\n viz_polymonial()\n\n # Predicting a new result with Polymonial Regression\n pol_reg.predict(poly_reg.fit_transform([[5.5]]))\n # output should be 132148.43750003\n", "\"\"\"README, Author - Jigyasa Gandhi(mailto:[email protected])\nRequirements:\n - scikit-fuzzy\n - numpy\n - matplotlib\nPython:\n - 3.5\n\"\"\"\nimport numpy as np\nimport skfuzzy as fuzz\n\n\nif __name__ == \"__main__\":\n # Create universe of discourse in Python using linspace ()\n X = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)\n\n # Create two fuzzy sets by defining any membership function\n # (trapmf(), gbellmf(), gaussmf(), etc).\n abc1 = [0, 25, 50]\n abc2 = [25, 50, 75]\n young = fuzz.membership.trimf(X, abc1)\n middle_aged = fuzz.membership.trimf(X, abc2)\n\n # Compute the different operations using inbuilt functions.\n one = np.ones(75)\n zero = np.zeros((75,))\n # 1. Union = max(µA(x), µB(x))\n union = fuzz.fuzzy_or(X, young, X, middle_aged)[1]\n # 2. Intersection = min(µA(x), µB(x))\n intersection = fuzz.fuzzy_and(X, young, X, middle_aged)[1]\n # 3. Complement (A) = (1- min(µA(x))\n complement_a = fuzz.fuzzy_not(young)\n # 4. Difference (A/B) = min(µA(x),(1- µB(x)))\n difference = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]\n # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]\n alg_sum = young + middle_aged - (young * middle_aged)\n # 6. Algebraic Product = (µA(x) * µB(x))\n alg_product = young * middle_aged\n # 7. Bounded Sum = min[1,(µA(x), µB(x))]\n bdd_sum = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]\n # 8. Bounded difference = min[0,(µA(x), µB(x))]\n bdd_difference = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]\n\n # max-min composition\n # max-product composition\n\n # Plot each set A, set B and each operation result using plot() and subplot().\n import matplotlib.pyplot as plt\n\n plt.figure()\n\n plt.subplot(4, 3, 1)\n plt.plot(X, young)\n plt.title(\"Young\")\n plt.grid(True)\n\n plt.subplot(4, 3, 2)\n plt.plot(X, middle_aged)\n plt.title(\"Middle aged\")\n plt.grid(True)\n\n plt.subplot(4, 3, 3)\n plt.plot(X, union)\n plt.title(\"union\")\n plt.grid(True)\n\n plt.subplot(4, 3, 4)\n plt.plot(X, intersection)\n plt.title(\"intersection\")\n plt.grid(True)\n\n plt.subplot(4, 3, 5)\n plt.plot(X, complement_a)\n plt.title(\"complement_a\")\n plt.grid(True)\n\n plt.subplot(4, 3, 6)\n plt.plot(X, difference)\n plt.title(\"difference a/b\")\n plt.grid(True)\n\n plt.subplot(4, 3, 7)\n plt.plot(X, alg_sum)\n plt.title(\"alg_sum\")\n plt.grid(True)\n\n plt.subplot(4, 3, 8)\n plt.plot(X, alg_product)\n plt.title(\"alg_product\")\n plt.grid(True)\n\n plt.subplot(4, 3, 9)\n plt.plot(X, bdd_sum)\n plt.title(\"bdd_sum\")\n plt.grid(True)\n\n plt.subplot(4, 3, 10)\n plt.plot(X, bdd_difference)\n plt.title(\"bdd_difference\")\n plt.grid(True)\n\n plt.subplots_adjust(hspace=0.5)\n plt.show()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "sklearn.preprocessing.PolynomialFeatures", "sklearn.model_selection.train_test_split", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.linspace", "matplotlib.pyplot.title", "numpy.ones", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.grid", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
AleksaC/gym-snake
[ "216a1af7cc1edd3d95be8a5ae2effc5f420452b0" ]
[ "gym-snake/gym_snake/envs/snake_env.py" ]
[ "from collections import deque\nimport time\n\nimport gym\nimport numpy as np\n\nfrom gym import spaces, logger\nfrom gym.utils import seeding\nfrom gym.envs.classic_control import rendering\n\n\nclass SnakeEnv(gym.Env):\n metadata = {\n \"render.modes\": [\"human\", \"rgb_array\"],\n \"video.frames_per_second\": \"35\"\n }\n\n def __init__(self, height=20, width=20, scaling_factor=6,\n starting_position=(7, 5), snake_size=3, direction=(0, 1),\n time_penalty=-0.01, food_reward=1, loss_penalty=-1, win_reward=10):\n self.action_space = spaces.Discrete(3)\n self.ACTIONS = [\"STRAIGHT\", \"LEFT\", \"RIGHT\"]\n self.observation_space = spaces.Box(0, 2, (height + 2, width + 2), dtype=\"uint8\")\n self.viewer = None\n self.seed()\n\n # rewards and penalties\n self.time_penalty = time_penalty\n self.food_reward = food_reward\n self.loss_penalty = loss_penalty\n self.win_reward = win_reward\n if loss_penalty > 0 or time_penalty > 0:\n logger.warn(\"Values of penalties should not be positive.\")\n\n # initialize size and position properties\n self.height = height\n self.width = width\n if height + 1 > starting_position[0] > 0 and width + 1 > starting_position[1] > snake_size:\n self.starting_position = starting_position\n else:\n raise ValueError(\"starting_position of snake should be in range (0 - height + 1, snake_size - width + 1)\")\n self.scaling_factor = scaling_factor\n self.initial_size = snake_size\n self.snake_size = snake_size\n self.max_size = height * width\n self.state = np.zeros((height + 2, width + 2), dtype=\"uint8\")\n self.game_over = False\n\n # set bounds of the environment\n self.state[:, 0] = self.state[:, -1] = 1\n self.state[0, :] = self.state[-1, :] = 1\n\n # initialize snake properties\n self.initial_direction = direction\n self.direction = direction\n self.snake = deque()\n\n # initialize position of the snake\n self._init_field(starting_position, snake_size)\n\n # place food on the field\n self.food = self._generate_food()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _init_field(self, starting_position, snake_size):\n y, x = starting_position\n for i in range(snake_size):\n self.state[y][x] = 1\n self.snake.appendleft((y, x))\n x -= 1\n\n def _generate_food(self):\n y, x = self.np_random.randint(self.height), self.np_random.randint(self.width)\n while self.state[y][x]:\n y, x = self.np_random.randint(self.height), self.np_random.randint(self.width)\n self.state[y][x] = 2\n\n return y, x\n\n def _check_for_collision(self, y, x):\n done = False\n pop = True\n reward = self.time_penalty\n\n if self.state[y][x]:\n if self.state[y][x] == 2:\n pop = False\n reward += self.food_reward\n self.snake_size += 1\n if self.snake_size == self.max_size:\n reward += self.win_reward\n self.game_over = done = True\n self.food = self._generate_food()\n else:\n reward += self.loss_penalty\n self.game_over = done = True\n pop = False\n\n self.state[y][x] = 1\n\n return reward, done, pop\n\n def step(self, action):\n y, x = self.snake[-1]\n if action == 0:\n y += self.direction[0]\n x += self.direction[1]\n elif action == 1:\n if self.direction[0] == 0:\n self.direction = (-self.direction[1], 0)\n y += self.direction[0]\n else:\n self.direction = (0, self.direction[0])\n x += self.direction[1]\n elif action == 2:\n if self.direction[0] == 0:\n self.direction = (self.direction[1], 0)\n y += self.direction[0]\n else:\n self.direction = (0, -self.direction[0])\n x += self.direction[1]\n else:\n raise ValueError(\"Action can only be 0, 1 or 2\")\n\n if self.game_over:\n raise RuntimeError(\"You're calling step() even though the environment has returned done = True.\"\n \"You should restart the environment after receiving done = True\")\n\n reward, done, pop = self._check_for_collision(y, x)\n\n if not done:\n self.snake.append((y, x))\n\n if pop:\n y, x = self.snake.popleft()\n self.state[y][x] = 0\n\n observation = self.state\n\n info = {\n \"snake\": self.snake,\n \"snake_size\": self.snake_size,\n \"direction\": self.direction,\n \"food\": self.food\n }\n\n return observation, reward, done, info\n\n def reset(self):\n self.game_over = False\n self.direction = self.initial_direction\n\n while self.snake:\n y, x = self.snake.pop()\n self.state[y][x] = 0\n\n self.state[self.food[0]][self.food[1]] = 0\n\n self._init_field(self.starting_position, self.initial_size)\n self.food = self._generate_food()\n self.snake_size = self.initial_size\n\n return self.state\n\n def _to_rgb(self, scaling_factor):\n scaled_grid = np.zeros(((self.height + 2) * scaling_factor, (self.width + 2) * scaling_factor), dtype=\"uint8\")\n scaled_grid[:, :scaling_factor] = scaled_grid[:, -scaling_factor:] = 255\n scaled_grid[:scaling_factor, :] = scaled_grid[-scaling_factor:, :] = 255\n\n y, x = self.food\n scaled_y, scaled_x = y * scaling_factor, x * scaling_factor\n scaled_grid[scaled_y : scaled_y + scaling_factor, scaled_x : scaled_x + scaling_factor] = 255\n\n for (y, x) in self.snake:\n scaled_y, scaled_x = y * scaling_factor, x * scaling_factor\n scaled_grid[scaled_y : scaled_y + scaling_factor, scaled_x : scaled_x + scaling_factor] = 255\n\n img = np.empty(((self.height + 2) * scaling_factor, (self.width + 2) * scaling_factor, 3), dtype=\"uint8\")\n img[:, :, 0] = img[:, :, 1] = img[:, :, 2] = scaled_grid\n\n return img\n\n def render(self, mode=\"human\", close=False):\n img = self._to_rgb(self.scaling_factor)\n if mode == \"rgb_array\":\n return img\n elif mode == \"human\":\n if self.viewer is None:\n self.viewer = rendering.SimpleImageViewer()\n self.viewer.imshow(img)\n time.sleep(0.027)\n\n return self.viewer.isopen\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n" ]
[ [ "numpy.zeros", "numpy.empty" ] ]
moghadas76/test_bigcity
[ "607b9602c5b1113b23e1830455e174b0901d7558", "607b9602c5b1113b23e1830455e174b0901d7558", "607b9602c5b1113b23e1830455e174b0901d7558", "607b9602c5b1113b23e1830455e174b0901d7558" ]
[ "libcity/model/traffic_speed_prediction/STAGGCN.py", "test/test_gwnet.py", "libcity/data/dataset/stdn_dataset.py", "libcity/utils/dataset.py" ]
[ "import math\nfrom logging import getLogger\nfrom typing import Optional\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import weight_norm\n\nfrom libcity.model import loss\nfrom libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel\n\n\ndef remove_self_loops(edge_index: torch.Tensor):\n return edge_index[:, edge_index[0] != edge_index[1]]\n\n\ndef maybe_num_nodes(edge_index: torch.Tensor, num_nodes: Optional[int] = None):\n if num_nodes is not None:\n return num_nodes\n else:\n return int(edge_index.max()) + 1\n\n\ndef add_self_loops(edge_index: torch.Tensor, num_nodes: Optional[int] = None):\n return torch.cat((edge_index,\n torch.arange(maybe_num_nodes(edge_index, num_nodes))\n .repeat(2, 1)\n .to(edge_index.device)), dim=1)\n\n\ndef softmax(x: torch.Tensor, index: torch.Tensor, num_nodes: Optional[int] = None, dim: int = 0):\n N = maybe_num_nodes(index, num_nodes)\n x_max = scatter(x, index, dim, dim_size=N, reduce='max').index_select(dim, index)\n out = (x - x_max).exp()\n out_sum = scatter(out, index, dim, dim_size=N, reduce='sum').index_select(dim, index)\n return out / out_sum\n\n\nclass STAGGCN(AbstractTrafficStateModel):\n def __init__(self, config, data_feature):\n super().__init__(config, data_feature)\n self._scaler = self.data_feature.get('scaler')\n self.adj_mx = self.data_feature.get('adj_mx', 1)\n self.num_nodes = self.data_feature.get('num_nodes', 1)\n self.input_dim = self.data_feature.get('feature_dim', 1)\n self.output_dim = self.data_feature.get('output_dim', 1)\n self.ext_dim = self.data_feature.get('ext_dim', 1)\n\n # 以下两项是STAG-GCN对数据集额外进行预处理得到的边关系数据\n # 对数据集预处理得到的空间邻接边集\n self.edge_index = self.data_feature.get('edge_index', torch.tensor([[], []], dtype=torch.long)) # 空间邻接边\n # 对数据集预处理得到的语义邻接边集\n self.dtw_edge_index = self.data_feature.get('dtw_edge_index', torch.tensor([[], []], dtype=torch.long)) # 语义邻接边\n\n self._logger = getLogger()\n self.device = config.get('device', torch.device('cpu'))\n self.input_window = config.get('input_window', 1)\n self.output_window = config.get('output_window', 1)\n self.graph_dim = config.get('graph_dim', 32)\n self.tcn_dim = config.get('tcn_dim', [10])\n self.attn_head = config.get('atten_head', 3)\n self.choice = config.get('choice', [1, 1, 1])\n self.batch_size = config.get('batch_size', 64)\n\n self.edge_index = self.edge_index.to(self.device)\n self.dtw_edge_index = self.dtw_edge_index.to(self.device)\n\n self.model = STAGGCNModel(input_dim=self.input_dim,\n output_dim=self.output_dim,\n node_num=self.num_nodes,\n seq_len=self.input_window,\n pred_len=self.output_window,\n graph_dim=self.graph_dim,\n tcn_dim=self.tcn_dim,\n attn_head=self.attn_head,\n choice=self.choice).to(self.device)\n\n def forward(self, batch):\n x = batch['X'] # shape = (batch_size, input_length, num_nodes, input_dim)\n\n # [batch_size, pred_len, num_nodes, output_dim]\n return self.model(x, self.edge_index, self.dtw_edge_index)\n\n def calculate_loss(self, batch):\n y_true = batch['y']\n y_predicted = self.predict(batch)\n y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])\n y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])\n return loss.masked_mae_torch(y_predicted, y_true)\n\n def predict(self, batch):\n # one-inference multi-step prediction\n return self.forward(batch)\n\n\nclass STAGGCNModel(nn.Module):\n def __init__(self, input_dim=1, output_dim=1,\n node_num=325, seq_len=12, pred_len=6, graph_dim=32,\n tcn_dim=[10], attn_head=4, choice=[1, 1, 1]):\n super(STAGGCNModel, self).__init__()\n self.node_num = node_num\n self.seq_len = seq_len\n self.pred_len = pred_len\n self.graph_dim = graph_dim\n # self.output_dim = seq_len + np.sum(choice) * graph_dim\n self.pred_len_raw = np.sum(choice) * graph_dim\n\n self.STCell = STCell(node_num, seq_len, graph_dim, tcn_dim,\n choice=choice, attn_head=attn_head,\n input_dim=input_dim, output_dim=output_dim)\n self.output_linear = nn.Linear(in_features=self.pred_len_raw, out_features=self.pred_len)\n # self.output_linear_0 = nn.Linear(in_features=self.graph_dim, out_features=256)\n # self.output_linear_1 = nn.Linear(in_features=256, out_features=self.pred_len)\n\n def forward(self, x, edge_index, dtw_edge_index):\n # x: [batch_size, seq_len, num_nodes, input_dim]\n # st_output: [batch_size, num_nodes, output_dim, sum(choice)*graph_dim ==\n # [batch_size, num_nodes, output_dim, pred_len_raw]]\n st_output = self.STCell(x, edge_index, dtw_edge_index)\n output = st_output\n\n # [batch_size, num_nodes, output_dim, pred_len]\n output = self.output_linear(output)\n # output = F.relu(self.output_linear_0(output))\n # output = self.output_linear_1(output)\n # output = torch.reshape(output, (-1, self.node_num, self.pred_len))\n\n # [batch_size, pred_len, num_nodes, output_dim]\n return output.permute(0, 3, 1, 2).contiguous()\n\n\nclass Chomp1d(nn.Module):\n def __init__(self, chomp_size):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n\n def forward(self, x):\n return x[:, :, :-self.chomp_size].contiguous()\n\n\nclass TemporalBlock(nn.Module):\n def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):\n super(TemporalBlock, self).__init__()\n self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,\n stride=stride, padding=padding, dilation=dilation))\n self.chomp1 = Chomp1d(padding)\n self.relu1 = nn.ReLU()\n self.dropout1 = nn.Dropout(dropout)\n\n self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,\n stride=stride, padding=padding, dilation=dilation))\n self.chomp2 = Chomp1d(padding)\n self.relu2 = nn.ReLU()\n self.dropout2 = nn.Dropout(dropout)\n\n self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,\n self.conv2, self.chomp2, self.relu2, self.dropout2)\n self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None\n self.relu = nn.ReLU()\n self.init_weights()\n\n def init_weights(self):\n self.conv1.weight.data.normal_(0, 0.01)\n self.conv2.weight.data.normal_(0, 0.01)\n if self.downsample is not None:\n self.downsample.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n # x: [batch_size*input_dim*num_nodes, n_inputs, seq_len]\n # self.conv1(x): [batch_size*input_dim*num_nodes, n_outputs, ...]\n # self.chomp1(self.conv2(x)): [batch_size*input_dim*num_nodes, n_outputs, seq_len]\n # return: [batch_size*input_dim*num_nodes, n_outputs, seq_len]\n out = self.net(x)\n res = x if self.downsample is None else self.downsample(x)\n return self.relu(out + res)\n\n\nclass TemporalConvNet(nn.Module):\n def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):\n super(TemporalConvNet, self).__init__()\n layers = []\n num_levels = len(num_channels)\n for i in range(num_levels):\n dilation_size = 2 ** i\n in_channels = num_inputs if i == 0 else num_channels[i - 1]\n out_channels = num_channels[i]\n layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,\n padding=(kernel_size - 1) * dilation_size, dropout=dropout)]\n\n self.network = nn.Sequential(*layers)\n\n def forward(self, x):\n # x: [batch_size*num_nodes, input_dim, seq_len]\n # return: [batch_size*num_nodes, output_dim*num_channels[-1], seq_len]\n return self.network(x)\n\n\nclass LearnedGCN(nn.Module):\n def __init__(self, node_num, in_feature, out_feature):\n super(LearnedGCN, self).__init__()\n self.node_num = node_num\n self.in_feature = in_feature\n self.out_feature = out_feature\n\n self.source_embed = nn.Parameter(torch.Tensor(self.node_num, 10))\n self.target_embed = nn.Parameter(torch.Tensor(10, self.node_num))\n self.linear = nn.Linear(self.in_feature, self.out_feature)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.source_embed.size(0))\n self.source_embed.data.uniform_(-stdv, stdv)\n self.target_embed.data.uniform_(-stdv, stdv)\n\n def forward(self, input):\n learned_matrix = F.softmax(F.relu(torch.mm(self.source_embed, self.target_embed)), dim=1)\n output = learned_matrix.matmul(input)\n output = self.linear(output)\n return output\n\n\nclass GATConv(nn.Module):\n def __init__(self,\n in_channels: int, out_channels: int,\n heads: int = 1, concat: bool = True,\n negative_slope: float = 0.2, dropout: float = 0.0,\n add_self_loops: bool = True, bias: bool = True):\n super(GATConv, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.attn_heads = heads\n\n self.negative_slope = negative_slope\n self.dropout = dropout\n\n self.bias = bias\n self.concat = concat\n self.add_self_loops = add_self_loops\n\n self.linear = nn.Linear(self.in_channels, self.attn_heads * self.out_channels, bias=False)\n self.attn_j = nn.Parameter(torch.Tensor(1, self.attn_heads, self.out_channels))\n self.attn_i = nn.Parameter(torch.Tensor(1, self.attn_heads, self.out_channels))\n\n if bias and concat:\n self.bias = nn.Parameter(torch.Tensor(self.attn_heads * self.out_channels))\n elif bias and not concat:\n self.bias = nn.Parameter(torch.Tensor(self.out_channels))\n else:\n self.register_parameter('bias', None)\n\n self._alpha = None\n\n self.init_weights()\n\n def init_weights(self):\n self._glorot(self.linear.weight)\n self._glorot(self.attn_j)\n self._glorot(self.attn_i)\n self._zeros(self.bias)\n\n @staticmethod\n def _glorot(t: torch.Tensor):\n if t is None:\n return\n stdv = math.sqrt(6. / (t.size(-2) * t.size(-1)))\n t.data.uniform_(-stdv, stdv)\n\n @staticmethod\n def _zeros(t: torch.Tensor):\n if t is None:\n return\n t.data.fill_(0.)\n\n def forward(self, x: torch.Tensor, edge_index: torch.Tensor):\n num_nodes = x.size(0)\n\n edge_index = remove_self_loops(edge_index)\n edge_index = add_self_loops(edge_index, num_nodes=num_nodes)\n\n edge_index_j, edge_index_i = edge_index\n\n # x: [num_nodes, num_features]\n # [num_edges, attn_heads, out_channels]\n x_j = self.linear(x).view(-1, self.attn_heads, self.out_channels)[edge_index_j]\n x_i = self.linear(x).view(-1, self.attn_heads, self.out_channels)[edge_index_i]\n\n # [num_edges, attn_heads]\n alpha_j = (x_j * self.attn_j).sum(dim=-1)[edge_index_j]\n alpha_i = (x_i * self.attn_i).sum(dim=-1)[edge_index_i]\n\n # message passing\n # [num_edges, attn_heads]\n alpha = alpha_j + alpha_i\n alpha = F.leaky_relu(alpha, self.negative_slope)\n alpha = softmax(alpha, edge_index_i, x_i.size(0))\n alpha = F.dropout(alpha, p=self.dropout, training=self.training)\n # [num_edges, attn_heads, out_channels]\n message = x_j * alpha.unsqueeze(-1)\n\n out = scatter(message, edge_index_i, dim=0, reduce='add')\n\n if self.concat:\n out = out.view(-1, self.attn_heads * self.out_channels)\n else:\n out = out.mean(dim=1)\n if self.bias is not None:\n out += self.bias\n\n return out\n\n\nclass STCell(nn.Module):\n def __init__(self, node_num=524, seq_len=12, graph_dim=16, tcn_dim=[10],\n choice=[1, 1, 1], attn_head=2, input_dim=1, output_dim=1):\n super(STCell, self).__init__()\n self.node_num = node_num\n self.seq_len = seq_len\n self.graph_dim = graph_dim\n self.tcn_dim = tcn_dim\n self.pred_len_raw = np.sum(choice) * graph_dim\n self.choice = choice\n # self.jklayer = JumpingKnowledge(\"max\")\n # self.jklayer = JumpingKnowledge(\"lstm\", self.graph_dim, 1)\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.in_features = seq_len * input_dim\n\n self.seq_linear = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.input_dim * seq_len)\n\n if choice[0] == 1:\n print(\"[TCN]\")\n print(\"node_num:\", node_num, \"\\tattn_head:\", attn_head)\n # one node of one input feature per embedding element\n self.self_attn = nn.MultiheadAttention(embed_dim=node_num * input_dim, num_heads=attn_head)\n # expand convolution output_dimension by output_dim\n self.tcn = TemporalConvNet(num_inputs=self.input_dim,\n num_channels=[x * self.output_dim for x in self.tcn_dim])\n self.tlinear = nn.Linear(in_features=self.output_dim * self.tcn_dim[-1] * self.seq_len,\n out_features=self.output_dim * self.graph_dim)\n\n if choice[1] == 1:\n print(\"[SP]\")\n self.sp_origin = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.output_dim * graph_dim)\n self.sp_gconv1 = GATConv(self.input_dim * seq_len, self.output_dim * graph_dim, heads=3, concat=False)\n self.sp_gconv2 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n self.sp_gconv3 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n self.sp_gconv4 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=1, concat=False)\n # self.sp_gconv5 = GATConv(graph_dim, graph_dim, heads = 1, concat = False)\n self.sp_source_embed = nn.Parameter(torch.Tensor(self.node_num, 12))\n self.sp_target_embed = nn.Parameter(torch.Tensor(12, self.node_num))\n self.sp_linear_1 = nn.Linear(self.input_dim * seq_len, self.output_dim * self.graph_dim)\n self.sp_linear_2 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n self.sp_linear_3 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n self.sp_linear_4 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n # self.sp_linear_5 = nn.Linear(self.graph_dim, self.graph_dim)\n # self.sp_jklayer = JumpingKnowledge(\"max\")\n\n nn.init.xavier_uniform_(self.sp_source_embed)\n nn.init.xavier_uniform_(self.sp_target_embed)\n\n if choice[2] == 1:\n print(\"[DTW]\")\n self.dtw_origin = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.output_dim * graph_dim)\n self.dtw_gconv1 = GATConv(self.input_dim * seq_len, self.output_dim * graph_dim, heads=3, concat=False)\n self.dtw_gconv2 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n self.dtw_gconv3 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n self.dtw_gconv4 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n # self.dtw_gconv5 = GATConv(graph_dim, graph_dim, heads = 1, concat = False)\n self.dtw_source_embed = nn.Parameter(torch.Tensor(self.node_num, 12))\n self.dtw_target_embed = nn.Parameter(torch.Tensor(12, self.node_num))\n self.dtw_linear_1 = nn.Linear(self.input_dim * self.seq_len, self.output_dim * self.graph_dim)\n self.dtw_linear_2 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n self.dtw_linear_3 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n self.dtw_linear_4 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n # self.dtw_linear_5 = nn.Linear(self.graph_dim, self.graph_dim)\n # self.dtw_jklayer = JumpingKnowledge(\"max\")\n\n nn.init.xavier_uniform_(self.dtw_source_embed)\n nn.init.xavier_uniform_(self.dtw_target_embed)\n\n def forward(self, x, edge_index, dtw_edge_index):\n # x: [batch_size, seq_len, num_nodes, input_dim]\n output_list = [0, 0, 0]\n batch_size = x.shape[0]\n\n if self.choice[0] == 1:\n # [seq_len, batch_size, input_dim*num_nodes]\n attn_input = x.permute(1, 0, 3, 2).reshape(self.seq_len, batch_size, -1).contiguous()\n # [seq_len, batch_size, input_dim*num_nodes]\n # input_dim*num_nodes is the embedding dimension\n attn_output, _ = self.self_attn(attn_input, attn_input, attn_input)\n # [seq_len, batch_size, input_dim*num_nodes]\n attn_output = torch.tanh(attn_output + attn_input)\n # [batch_size*num_nodes, input_dim, seq_len]\n attn_output = attn_output.reshape(self.seq_len, batch_size, self.input_dim, self.node_num) \\\n .permute(1, 3, 2, 0) \\\n .reshape(-1, self.input_dim, self.seq_len)\n\n # [batch_size*num_nodes, input_dim, seq_len]\n tcn_input = attn_output\n # [batch_size*num_nodes, output_dim*self.tcn_dim[-1], seq_len]\n tcn_output = self.tcn(tcn_input)\n # [batch_size*num_nodes, output_dim*self.tcn_dim[-1]*seq_len]\n tcn_output = torch.reshape(tcn_output,\n (-1, self.output_dim * self.tcn_dim[-1] * self.seq_len))\n # [batch_size*num_nodes, output_dim*self.graph_dim]\n tcn_output = self.tlinear(tcn_output)\n # [batch_size, num_nodes, output_dim, self.graph_dim]\n tcn_output = torch.reshape(tcn_output, (batch_size, self.node_num, self.output_dim, self.graph_dim))\n\n output_list[0] = tcn_output\n\n if self.choice[1] == 1 or self.choice[2] == 1:\n # [batch_size, num_nodes, input_dim*seq_len]\n sp_gout_0 = x.permute(0, 2, 3, 1).reshape(-1, self.input_dim * self.seq_len).contiguous()\n dtw_gout_0 = sp_gout_0.detach().clone()\n\n if self.choice[1] == 1:\n # [batch_size*num_nodes, input_dim*seq_len]\n sp_gout_0 = self.seq_linear(sp_gout_0) + sp_gout_0\n\n # [num_nodes, num_nodes]\n sp_learned_matrix = F.softmax(F.relu(torch.mm(self.sp_source_embed, self.sp_target_embed)), dim=1)\n\n # GATConv: [input_dim*seq_len, output_dim*graph_dim]\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_gout_1 = self.sp_gconv1(sp_gout_0, edge_index)\n # [batch_size, num_nodes, input_dim*seq_len]\n adp_input_1 = torch.reshape(sp_gout_0, (-1, self.node_num, self.input_dim * self.seq_len))\n # [batch_size, num_nodes, output_dim*graph_dim]\n sp_adp_1 = self.sp_linear_1(sp_learned_matrix.matmul(F.dropout(adp_input_1, p=0.1)))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_adp_1 = torch.reshape(sp_adp_1, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_origin = self.sp_origin(sp_gout_0)\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_output_1 = torch.tanh(sp_gout_1) * torch.sigmoid(sp_adp_1) + sp_origin * (1 - torch.sigmoid(sp_adp_1))\n\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_gout_2 = self.sp_gconv2(torch.tanh(sp_output_1), edge_index)\n # [batch_size, num_nodes, output_dim*graph_dim]\n adp_input_2 = torch.reshape(torch.tanh(sp_output_1), (-1, self.node_num, self.output_dim * self.graph_dim))\n # [batch_size, num_nodes, output_dim*graph_dim]\n sp_adp_2 = self.sp_linear_2(sp_learned_matrix.matmul(F.dropout(adp_input_2, p=0.1)))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_adp_2 = torch.reshape(sp_adp_2, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_output_2 = F.leaky_relu(sp_gout_2) * torch.sigmoid(sp_adp_2) + \\\n sp_output_1 * (1 - torch.sigmoid(sp_adp_2))\n\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_gout_3 = self.sp_gconv3(F.relu(sp_output_2), edge_index)\n # [batch_size, num_nodes, output_dim*graph_dim]\n adp_input_3 = torch.reshape(F.relu(sp_output_2), (-1, self.node_num, self.output_dim * self.graph_dim))\n # [batch_size, num_nodes, output_dim*graph_dim]\n sp_adp_3 = self.sp_linear_3(sp_learned_matrix.matmul(F.dropout(adp_input_3, p=0.1)))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_adp_3 = torch.reshape(sp_adp_3, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_output_3 = F.relu(sp_gout_3) * torch.sigmoid(sp_adp_3) + sp_output_2 * (1 - torch.sigmoid(sp_adp_3))\n\n sp_gout_4 = self.sp_gconv4(F.relu(sp_output_3), edge_index)\n adp_input_4 = torch.reshape(F.relu(sp_output_3), (-1, self.node_num, self.output_dim * self.graph_dim))\n sp_adp_4 = self.sp_linear_4(sp_learned_matrix.matmul(F.dropout(adp_input_4, p=0.1)))\n sp_adp_4 = torch.reshape(sp_adp_4, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_output_4 = F.relu(sp_gout_4) * torch.sigmoid(sp_adp_4) + sp_output_3 * (1 - torch.sigmoid(sp_adp_4))\n\n # sp_gout_5 = self.sp_gconv5(F.relu(sp_output_4), edge_index)\n # adp_input_5 = torch.reshape(F.relu(sp_output_4), (-1, self.node_num, self.graph_dim))\n # sp_adp_5 = self.sp_linear_5(sp_learned_matrix.matmul(F.dropout(adp_input_5,p=0.1)))\n # sp_adp_5 = torch.reshape(sp_adp_5, (-1, self.graph_dim))\n # sp_output_5 = F.relu(sp_gout_5) * torch.sigmoid(sp_adp_5) + sp_output_4 * (1 - torch.sigmoid(sp_adp_5))\n\n # [batch_size, num_nodes, output_dim, graph_dim]\n sp_output = torch.reshape(sp_output_4, (batch_size, self.node_num, self.output_dim, self.graph_dim))\n # sp_output = sp_output_4\n output_list[1] = sp_output\n\n if self.choice[2] == 1:\n dtw_gout_0 = self.seq_linear(dtw_gout_0) + dtw_gout_0\n\n dtw_learned_matrix = F.softmax(F.relu(torch.mm(self.dtw_source_embed, self.dtw_target_embed)), dim=1)\n\n dtw_gout_1 = self.dtw_gconv1(dtw_gout_0, dtw_edge_index)\n adp_input_1 = torch.reshape(dtw_gout_0, (-1, self.node_num, self.input_dim * self.seq_len))\n dtw_adp_1 = self.dtw_linear_1(dtw_learned_matrix.matmul(F.dropout(adp_input_1, p=0.1)))\n dtw_adp_1 = torch.reshape(dtw_adp_1, (-1, self.output_dim * self.graph_dim))\n dtw_origin = self.dtw_origin(dtw_gout_0)\n dtw_output_1 = torch.tanh(dtw_gout_1) * torch.sigmoid(dtw_adp_1) + \\\n dtw_origin * (1 - torch.sigmoid(dtw_adp_1))\n\n dtw_gout_2 = self.dtw_gconv2(torch.tanh(dtw_output_1), dtw_edge_index)\n adp_input_2 = torch.reshape(torch.tanh(dtw_output_1), (-1, self.node_num, self.output_dim * self.graph_dim))\n dtw_adp_2 = self.dtw_linear_2(dtw_learned_matrix.matmul(F.dropout(adp_input_2, p=0.1)))\n dtw_adp_2 = torch.reshape(dtw_adp_2, (-1, self.output_dim * self.graph_dim))\n dtw_output_2 = F.leaky_relu(dtw_gout_2) * torch.sigmoid(dtw_adp_2) + \\\n dtw_output_1 * (1 - torch.sigmoid(dtw_adp_2))\n\n dtw_gout_3 = self.dtw_gconv3(F.relu(dtw_output_2), dtw_edge_index)\n adp_input_3 = torch.reshape(F.relu(dtw_output_2), (-1, self.node_num, self.output_dim * self.graph_dim))\n dtw_adp_3 = self.dtw_linear_3(dtw_learned_matrix.matmul(F.dropout(adp_input_3, p=0.1)))\n dtw_adp_3 = torch.reshape(dtw_adp_3, (-1, self.output_dim * self.graph_dim))\n dtw_output_3 = F.relu(dtw_gout_3) * torch.sigmoid(dtw_adp_3) + dtw_output_2 * (1 - torch.sigmoid(dtw_adp_3))\n\n dtw_gout_4 = self.dtw_gconv4(F.relu(dtw_output_3), dtw_edge_index)\n adp_input_4 = torch.reshape(F.relu(dtw_output_3), (-1, self.node_num, self.output_dim * self.graph_dim))\n dtw_adp_4 = self.dtw_linear_4(dtw_learned_matrix.matmul(F.dropout(adp_input_4, p=0.1)))\n dtw_adp_4 = torch.reshape(dtw_adp_4, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n dtw_output_4 = F.relu(dtw_gout_4) * torch.sigmoid(dtw_adp_4) + dtw_output_3 * (1 - torch.sigmoid(dtw_adp_4))\n\n # dtw_gout_5 = self.dtw_gconv5(F.relu(dtw_output_4), dtw_edge_index)\n # adp_input_5 = torch.reshape(F.relu(dtw_output_4), (-1, self.node_num, self.graph_dim))\n # dtw_adp_5 = self.dtw_linear_5(dtw_learned_matrix.matmul(F.dropout(adp_input_5,p=0.1)))\n # dtw_adp_5 = torch.reshape(dtw_adp_5, (-1, self.graph_dim))\n # dtw_output_5 = \\\n # F.relu(dtw_gout_5) * torch.sigmoid(dtw_adp_5) + dtw_output_4 * (1 - torch.sigmoid(dtw_adp_5))\n\n # [batch_size, num_nodes, output_dim, graph_dim]\n dtw_output = torch.reshape(dtw_output_4, (batch_size, self.node_num, self.output_dim, self.graph_dim))\n # dtw_output = dtw_output_4\n output_list[2] = dtw_output\n\n # output_list[*]: [batch_size, num_nodes, output_dim, graph_dim]\n # cell_output: [batch_size, num_nodes, output_dim, sum(choice)*graph_dim]\n step = 0\n for i in range(len(self.choice)):\n if self.choice[i] == 1 and step == 0:\n cell_output = output_list[i]\n step += 1\n elif self.choice[i] == 1:\n cell_output = torch.cat((cell_output, output_list[i]), dim=3)\n\n # cell_output = self.jklayer([output_list[0], output_list[1], output_list[2]])\n # cell_output = self.out(cell_output)\n\n # cell_output = torch.reshape(cell_output, (-1, self.pred_len_raw))\n\n return cell_output\n", "from libcity.data import get_dataset\nfrom libcity.utils import get_logger, get_executor, get_model\n\nif __name__ == '__main__':\n config = {\n 'log_level': 'INFO',\n 'input_window': 12,\n 'output_window': 12,\n 'train_rate': 0.7,\n 'eval_rate': 0.1,\n 'cache_dataset': True,\n 'batch_size': 64,\n 'num_workers': 1,\n\n 'evaluator': 'TrafficStateEvaluator',\n 'dataset_class': 'TrafficStatePointDataset',\n 'executor': 'TrafficStateExecutor',\n 'model': 'GWNET',\n\n 'learning_rate': 0.001,\n 'learner': 'adam',\n 'lr_decay': False,\n 'weight_decay': 0.0001,\n 'dropout': 0.3,\n 'max_epoch': 100,\n 'epoch': 0,\n 'max_grad_norm': 5,\n 'clip_grad_norm': True,\n\n 'metrics': ['MAE', 'MSE', 'RMSE', 'MAPE', 'masked_MAE', 'masked_MSE', 'masked_RMSE', 'masked_MAPE', 'R2', 'EVAR'],\n 'gpu': True,\n 'gpu_id': '1',\n 'dataset': 'METR_LA',\n 'weight_col': 'cost',\n 'data_col': ['traffic_speed'],\n 'calculate_weight': True,\n 'add_time_in_day': False,\n 'add_day_in_week': False,\n 'scaler': \"standard\",\n 'use_early_stop': False,\n }\n import os\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = config['gpu_id']\n import torch\n config['device'] = torch.device(\"cuda\" if torch.cuda.is_available() and config['gpu'] else \"cpu\")\n\n logger = get_logger(config)\n dataset = get_dataset(config)\n train_data, valid_data, test_data = dataset.get_data()\n print(len(train_data.dataset), train_data.dataset[0][0].shape, train_data.dataset[0][1].shape,\n train_data.batch_size)\n print(len(valid_data.dataset), valid_data.dataset[0][0].shape, valid_data.dataset[0][1].shape,\n valid_data.batch_size)\n print(len(test_data.dataset), test_data.dataset[0][0].shape, test_data.dataset[0][1].shape, test_data.batch_size)\n\n data_feature = dataset.get_data_feature()\n print(data_feature['adj_mx'].shape)\n print(data_feature['adj_mx'].sum())\n model = get_model(config, data_feature)\n executor = get_executor(config, model)\n executor.train(train_data, valid_data)\n model_cache_file = './libcity/cache/model_cache/' + config['model'] + '_' + config['dataset'] + '.m'\n executor.save_model(model_cache_file)\n executor.load_model(model_cache_file)\n # 评估,评估结果将会放在 cache/evaluate_cache 下\n executor.evaluate(test_data)\n", "import numpy as np\nimport os\nimport pandas as pd\n\nfrom libcity.data.dataset import TrafficStateDataset\nfrom libcity.data.utils import generate_dataloader\nfrom libcity.utils import StandardScaler, NormalScaler, NoneScaler, MinMax01Scaler, MinMax11Scaler, ensure_dir\n\n\nclass STDNDataset(TrafficStateDataset):\n\n def __init__(self, config):\n super().__init__(config)\n # lstm_seq_len = input_window\n self.input_window = self.config.get('input_window', 7)\n self.output_window = self.config.get('output_window', 1)\n self.att_lstm_num = self.config.get('att_lstm_num', 3)\n self.att_lstm_seq_len = self.config.get('att_lstm_seq_len', 3)\n self.hist_feature_daynum = self.config.get('hist_feature_daynum', 7)\n self.last_feature_num = self.config.get('last_feature_num', 48)\n self.points_per_hour = 3600 // self.time_intervals\n self.timeslot_daynum = self.points_per_hour * 24\n self.cnn_nbhd_size = self.config.get('cnn_nbhd_size', 3)\n self.nbhd_size = self.config.get('nbhd_size', 2)\n self.scaler = None\n self.flow_scaler = None\n self.feature_name = {'X': 'float', 'y': 'float', 'flatten_att_nbhd_inputs': 'float',\n 'flatten_att_flow_inputs': 'float', 'att_lstm_inputs': 'float', 'nbhd_inputs': 'float',\n 'flow_inputs': 'float', 'lstm_inputs': 'float'}\n self.batch_size = self.config.get('batch_size', 1)\n\n def _load_geo(self):\n super()._load_grid_geo()\n\n def _load_rel(self):\n super()._load_grid_rel()\n\n def _load_grid(self, filename):\n return super()._load_grid_4d(filename)\n\n def _load_gridod(self, filename):\n gridodfile = pd.read_csv(self.data_path + filename + '.gridod')\n # if self.data_col != '': # 根据指定的列加载数据集\n # if isinstance(self.data_col, list):\n # data_col = self.data_col.copy()\n # else: # str\n # data_col = [self.data_col.copy()]\n # data_col.insert(0, 'time')\n # data_col.insert(1, 'origin_row_id')\n # data_col.insert(2, 'origin_column_id')\n # data_col.insert(3, 'destination_row_id')\n # data_col.insert(4, 'destination_column_id')\n # gridodfile = gridodfile[data_col]\n # else: # 不指定则加载所有列\n # gridodfile = gridodfile[gridodfile.columns[2:]] # 从time列开始所有列\n gridodfile = gridodfile[gridodfile.columns[2:]]\n # 求时间序列\n self.timesolts = list(gridodfile['time'][:int(gridodfile.shape[0] / len(self.geo_ids) / len(self.geo_ids))])\n self.idx_of_timesolts = dict()\n if not gridodfile['time'].isna().any(): # 时间没有空值\n self.timesolts = list(map(lambda x: x.replace('T', ' ').replace('Z', ''), self.timesolts))\n self.timesolts = np.array(self.timesolts, dtype='datetime64[ns]')\n for idx, _ts in enumerate(self.timesolts):\n self.idx_of_timesolts[_ts] = idx\n # 转6-d数组\n feature_dim = len(gridodfile.columns) - 5\n df = gridodfile[gridodfile.columns[-feature_dim:]]\n len_time = len(self.timesolts)\n data = np.zeros((self.len_row, self.len_column, self.len_row, self.len_column, len_time, feature_dim))\n for oi in range(self.len_row):\n for oj in range(self.len_column):\n origin_index = (oi * self.len_column + oj) * len_time * len(self.geo_ids) # 每个起点占据len_t*n行\n for di in range(self.len_row):\n for dj in range(self.len_column):\n destination_index = (di * self.len_column + dj) * len_time # 每个终点占据len_t行\n index = origin_index + destination_index\n # print(index, index + len_time)\n data[oi][oj][di][dj] = df[index:index + len_time].values\n data = data.transpose((4, 0, 1, 2, 3, 5)) # (len_time, len_row, len_column, len_row, len_column, feature_dim)\n self._logger.info(\"Loaded file \" + filename + '.gridod' + ', shape=' + str(data.shape))\n return data\n\n def _sample_stdn(self, volume_df, flow_df):\n cnn_att_features = []\n lstm_att_features = []\n flow_att_features = []\n for i in range(self.att_lstm_num):\n cnn_att_features.append([])\n lstm_att_features.append([])\n flow_att_features.append([])\n for j in range(self.att_lstm_seq_len):\n cnn_att_features[i].append([])\n flow_att_features[i].append([])\n\n # cnn_features是一个长度为lstm_seq_len的列表,\n # cnn_features[i]也是一个列表,排列顺序为时间t, 横坐标x, 纵坐标y的顺序\n # 记录了t - (lstm_seq_len - i)时间以(x,y)为中心的7 * 7区域的volume数据\n cnn_features = []\n # flow_features是一个长度为lstm_seq_len的列表,\n # flow_features[i]也是一个列表,排列顺序为时间t, 横坐标x, 纵坐标y的顺序\n # 记录了t - (lstm_seq_len - i)时间以(x,y)为中心的7 * 7区域的flow数据\n flow_features = []\n for i in range(self.input_window):\n cnn_features.append([])\n flow_features.append([])\n\n time_start = (self.hist_feature_daynum + self.att_lstm_num) * self.timeslot_daynum + self.att_lstm_seq_len\n time_end = volume_df.shape[0]\n volume_type = volume_df.shape[-1]\n\n short_term_lstm_features = []\n for t in range(time_start, time_end):\n for x in range(self.len_row):\n for y in range(self.len_column):\n short_term_lstm_samples = []\n for seqn in range(self.input_window):\n real_t = t - (self.input_window - seqn)\n\n # cnn_feature表示在real_t时间以(x, y)为中心的7 * 7区域的volume数据\n cnn_feature = np.zeros((2 * self.cnn_nbhd_size + 1, 2 * self.cnn_nbhd_size + 1, volume_type))\n for cnn_nbhd_x in range(x - self.cnn_nbhd_size, x + self.cnn_nbhd_size + 1):\n for cnn_nbhd_y in range(y - self.cnn_nbhd_size, y + self.cnn_nbhd_size + 1):\n if not (0 <= cnn_nbhd_x < self.len_row and 0 <= cnn_nbhd_y < self.len_column):\n continue\n cnn_feature[cnn_nbhd_x - (x - self.cnn_nbhd_size),\n cnn_nbhd_y - (y - self.cnn_nbhd_size), :] = volume_df[real_t, cnn_nbhd_x, cnn_nbhd_y, :]\n cnn_features[seqn].append(cnn_feature)\n\n flow_feature_curr_out = flow_df[real_t, x, y, :, :, 0]\n flow_feature_curr_in = flow_df[real_t, :, :, x, y, 0]\n flow_feature_last_out_to_curr = flow_df[real_t - 1, x, y, :, :, 1]\n flow_feature_curr_in_from_last = flow_df[real_t - 1, :, :, x, y, 1]\n flow_feature = np.zeros(flow_feature_curr_in.shape + (4,))\n flow_feature[:, :, 0] = flow_feature_curr_out\n flow_feature[:, :, 1] = flow_feature_curr_in\n flow_feature[:, :, 2] = flow_feature_last_out_to_curr\n flow_feature[:, :, 3] = flow_feature_curr_in_from_last\n # local_flow_feature表示在real_t时间以(x, y)为中心的7 * 7区域的flow数据\n local_flow_feature = np.zeros((2 * self.cnn_nbhd_size + 1, 2 * self.cnn_nbhd_size + 1, 4))\n for cnn_nbhd_x in range(x - self.cnn_nbhd_size, x + self.cnn_nbhd_size + 1):\n for cnn_nbhd_y in range(y - self.cnn_nbhd_size, y + self.cnn_nbhd_size + 1):\n if not (0 <= cnn_nbhd_x < self.len_row and 0 <= cnn_nbhd_y < self.len_column):\n continue\n local_flow_feature[cnn_nbhd_x - (x - self.cnn_nbhd_size),\n cnn_nbhd_y - (y - self.cnn_nbhd_size), :] = flow_feature[cnn_nbhd_x, cnn_nbhd_y, :]\n flow_features[seqn].append(local_flow_feature)\n\n hist_feature = volume_df[\n real_t - self.hist_feature_daynum * self.timeslot_daynum: real_t: self.timeslot_daynum,\n x, y, :].flatten()\n last_feature = volume_df[real_t - self.last_feature_num: real_t, x, y, :].flatten()\n nbhd_feature = np.zeros((2 * self.nbhd_size + 1, 2 * self.nbhd_size + 1, volume_type))\n for nbhd_x in range(x - self.nbhd_size, x + self.nbhd_size + 1):\n for nbhd_y in range(y - self.nbhd_size, y + self.nbhd_size + 1):\n if not (0 <= nbhd_x < self.len_row and 0 <= nbhd_y < self.len_column):\n continue\n nbhd_feature[nbhd_x - (x - self.nbhd_size), nbhd_y - (y - self.nbhd_size),\n :] = volume_df[real_t, nbhd_x, nbhd_y, :]\n nbhd_feature = nbhd_feature.flatten()\n feature_vec = np.concatenate((hist_feature, last_feature))\n feature_vec = np.concatenate((feature_vec, nbhd_feature))\n short_term_lstm_samples.append(feature_vec)\n short_term_lstm_features.append(np.array(short_term_lstm_samples))\n\n for att_lstm_cnt in range(self.att_lstm_num):\n long_term_lstm_samples = []\n att_t = t - (self.att_lstm_num - att_lstm_cnt) * self.timeslot_daynum + (\n self.att_lstm_seq_len - 1) / 2 + 1\n att_t = int(att_t)\n for seqn in range(self.att_lstm_seq_len):\n real_t = att_t - (self.att_lstm_seq_len - seqn)\n\n cnn_feature = np.zeros(\n (2 * self.cnn_nbhd_size + 1, 2 * self.cnn_nbhd_size + 1, volume_type))\n for cnn_nbhd_x in range(x - self.cnn_nbhd_size, x + self.cnn_nbhd_size + 1):\n for cnn_nbhd_y in range(y - self.cnn_nbhd_size, y + self.cnn_nbhd_size + 1):\n if not (0 <= cnn_nbhd_x < self.len_row and 0 <= cnn_nbhd_y < self.len_column):\n continue\n cnn_feature[cnn_nbhd_x - (x - self.cnn_nbhd_size),\n cnn_nbhd_y - (y - self.cnn_nbhd_size), :] = volume_df[real_t, cnn_nbhd_x,\n cnn_nbhd_y, :]\n cnn_att_features[att_lstm_cnt][seqn].append(cnn_feature)\n\n flow_feature_curr_out = flow_df[real_t, x, y, :, :, 0]\n flow_feature_curr_in = flow_df[real_t, :, :, x, y, 0]\n flow_feature_last_out_to_curr = flow_df[real_t - 1, x, y, :, :, 1]\n flow_feature_curr_in_from_last = flow_df[real_t - 1, :, :, x, y, 1]\n flow_feature = np.zeros(flow_feature_curr_in.shape + (4,))\n flow_feature[:, :, 0] = flow_feature_curr_out\n flow_feature[:, :, 1] = flow_feature_curr_in\n flow_feature[:, :, 2] = flow_feature_last_out_to_curr\n flow_feature[:, :, 3] = flow_feature_curr_in_from_last\n local_flow_feature = np.zeros((2 * self.cnn_nbhd_size + 1, 2 * self.cnn_nbhd_size + 1, 4))\n for cnn_nbhd_x in range(x - self.cnn_nbhd_size, x + self.cnn_nbhd_size + 1):\n for cnn_nbhd_y in range(y - self.cnn_nbhd_size, y + self.cnn_nbhd_size + 1):\n if not (0 <= cnn_nbhd_x < self.len_row and 0 <= cnn_nbhd_y < self.len_column):\n continue\n local_flow_feature[cnn_nbhd_x - (x - self.cnn_nbhd_size),\n cnn_nbhd_y - (y - self.cnn_nbhd_size), :] = flow_feature[cnn_nbhd_x, cnn_nbhd_y, :]\n flow_att_features[att_lstm_cnt][seqn].append(local_flow_feature)\n\n hist_feature = volume_df[\n real_t - self.hist_feature_daynum * self.timeslot_daynum: real_t: self.timeslot_daynum,\n x, y, :].flatten()\n last_feature = volume_df[real_t - self.last_feature_num: real_t, x, y, :].flatten()\n nbhd_feature = np.zeros((2 * self.nbhd_size + 1, 2 * self.nbhd_size + 1, volume_type))\n for nbhd_x in range(x - self.nbhd_size, x + self.nbhd_size + 1):\n for nbhd_y in range(y - self.nbhd_size, y + self.nbhd_size + 1):\n if not (0 <= nbhd_x < self.len_row and 0 <= nbhd_y < self.len_column):\n continue\n nbhd_feature[nbhd_x - (x - self.nbhd_size), nbhd_y - (y - self.nbhd_size),\n :] = volume_df[real_t, nbhd_x, nbhd_y, :]\n nbhd_feature = nbhd_feature.flatten()\n feature_vec = np.concatenate((hist_feature, last_feature))\n feature_vec = np.concatenate((feature_vec, nbhd_feature))\n long_term_lstm_samples.append(feature_vec)\n lstm_att_features[att_lstm_cnt].append(np.array(long_term_lstm_samples))\n\n output_cnn_att_features = []\n output_flow_att_features = []\n for i in range(self.att_lstm_num):\n lstm_att_features[i] = np.array(lstm_att_features[i])\n for j in range(self.att_lstm_seq_len):\n cnn_att_features[i][j] = np.array(cnn_att_features[i][j])\n flow_att_features[i][j] = np.array(flow_att_features[i][j])\n output_cnn_att_features.append(cnn_att_features[i][j])\n output_flow_att_features.append(flow_att_features[i][j])\n output_cnn_att_features = np.stack(output_cnn_att_features, axis=0)\n output_cnn_att_features = np.swapaxes(output_cnn_att_features, 0, 1)\n output_cnn_att_features = np.reshape(output_cnn_att_features,\n (-1, self.len_row, self.len_column, *output_cnn_att_features.shape[1:]))\n output_flow_att_features = np.stack(output_flow_att_features, axis=0)\n output_flow_att_features = np.swapaxes(output_flow_att_features, 0, 1)\n output_flow_att_features = np.reshape(output_flow_att_features,\n (-1, self.len_row, self.len_column, *output_flow_att_features.shape[1:]))\n lstm_att_features = np.stack(lstm_att_features, axis=0)\n lstm_att_features = np.swapaxes(lstm_att_features, 0, 1)\n lstm_att_features = np.reshape(lstm_att_features,\n (-1, self.len_row, self.len_column, *lstm_att_features.shape[1:]))\n for i in range(self.input_window):\n cnn_features[i] = np.array(cnn_features[i])\n flow_features[i] = np.array(flow_features[i])\n cnn_features = np.stack(cnn_features, axis=0)\n cnn_features = np.swapaxes(cnn_features, 0, 1)\n cnn_features = np.reshape(cnn_features,\n (-1, self.len_row, self.len_column, *cnn_features.shape[1:]))\n flow_features = np.stack(flow_features, axis=0)\n flow_features = np.swapaxes(flow_features, 0, 1)\n flow_features = np.reshape(flow_features,\n (-1, self.len_row, self.len_column, *flow_features.shape[1:]))\n short_term_lstm_features = np.array(short_term_lstm_features)\n short_term_lstm_features = np.reshape(short_term_lstm_features,\n (-1, self.len_row, self.len_column, *short_term_lstm_features.shape[1:]))\n return output_cnn_att_features, output_flow_att_features, lstm_att_features, cnn_features, flow_features, short_term_lstm_features # , inputs, labels\n\n def _generate_input_data_stdn(self, volume_df, flow_df):\n flatten_att_nbhd_input, flatten_att_flow_input, att_lstm_input, nbhd_input, flow_input, lstm_input = self._sample_stdn(\n volume_df, flow_df)\n num_samples = lstm_input.shape[0]\n x_offsets = np.sort(np.concatenate((np.arange(-self.input_window + 1, 1, 1),)))\n y_offsets = np.sort(np.arange(1, self.output_window + 1, 1))\n\n flatten_att_nbhd_inputs = []\n flatten_att_flow_inputs = []\n att_lstm_inputs = []\n nbhd_inputs = []\n flow_inputs = []\n lstm_inputs = []\n x = []\n y = []\n\n min_t = abs(min(x_offsets))\n max_t = abs(num_samples - abs(max(y_offsets)))\n for t in range(min_t, max_t):\n flatten_att_nbhd_inputs_t = flatten_att_nbhd_input[t + y_offsets, ...]\n flatten_att_flow_inputs_t = flatten_att_flow_input[t + y_offsets, ...]\n att_lstm_inputs_t = att_lstm_input[t + y_offsets, ...]\n nbhd_inputs_t = nbhd_input[t + y_offsets, ...]\n flow_inputs_t = flow_input[t + y_offsets, ...]\n lstm_inputs_t = lstm_input[t + y_offsets, ...]\n x_t = volume_df[t + x_offsets, ...]\n y_t = volume_df[t + y_offsets, ...]\n\n flatten_att_nbhd_inputs.append(flatten_att_nbhd_inputs_t)\n flatten_att_flow_inputs.append(flatten_att_flow_inputs_t)\n att_lstm_inputs.append(att_lstm_inputs_t)\n nbhd_inputs.append(nbhd_inputs_t)\n flow_inputs.append(flow_inputs_t)\n lstm_inputs.append(lstm_inputs_t)\n x.append(x_t)\n y.append(y_t)\n x = np.stack(x, axis=0)\n y = np.stack(y, axis=0)\n flatten_att_nbhd_inputs = np.stack(flatten_att_nbhd_inputs, axis=0)\n flatten_att_flow_inputs = np.stack(flatten_att_flow_inputs, axis=0)\n att_lstm_inputs = np.stack(att_lstm_inputs, axis=0)\n nbhd_inputs = np.stack(nbhd_inputs, axis=0)\n flow_inputs = np.stack(flow_inputs, axis=0)\n lstm_inputs = np.stack(lstm_inputs, axis=0)\n return x, y, flatten_att_nbhd_inputs, flatten_att_flow_inputs, att_lstm_inputs, nbhd_inputs, flow_inputs, lstm_inputs\n\n def _generate_data(self):\n volume_df = self._load_grid(self.data_files[0])\n flow_df = self._load_gridod(self.data_files[0])\n\n x, y, flatten_att_nbhd_inputs, flatten_att_flow_inputs, att_lstm_inputs, nbhd_inputs, flow_inputs, lstm_inputs = self._generate_input_data_stdn(\n volume_df, flow_df)\n\n x = np.concatenate([x])\n y = np.concatenate([y])\n flatten_att_nbhd_inputs = np.concatenate([flatten_att_nbhd_inputs])\n flatten_att_flow_inputs = np.concatenate([flatten_att_flow_inputs])\n att_lstm_inputs = np.concatenate([att_lstm_inputs])\n nbhd_inputs = np.concatenate([nbhd_inputs])\n flow_inputs = np.concatenate([flow_inputs])\n lstm_inputs = np.concatenate([lstm_inputs])\n\n self._logger.info(\"Dataset created\")\n self._logger.info(\n \"x shape: \" + str(x.shape) + \", y shape: \" + str(y.shape) + \", flatten_att_nbhd_inputs shape: \" + str(\n flatten_att_nbhd_inputs.shape) + \", flatten_att_flow_inputs shape: \" + str(\n flatten_att_flow_inputs.shape) + \", att_lstm_inputs shape: \" + str(\n att_lstm_inputs.shape) + \", nbhd_inputs shape: \" + str(\n nbhd_inputs.shape) + \", flow_inputs shape: \" + str(flow_inputs.shape) + \", lstm_inputs shape: \" + str(\n lstm_inputs.shape))\n return x, y, flatten_att_nbhd_inputs, flatten_att_flow_inputs, att_lstm_inputs, nbhd_inputs, flow_inputs, lstm_inputs\n\n def _split_train_val_test_stdn(self, x, y, flatten_att_nbhd_inputs, flatten_att_flow_inputs, att_lstm_inputs,\n nbhd_inputs, flow_inputs, lstm_inputs):\n \"\"\"\n 划分训练集、测试集、验证集,并缓存数据集\n\n Args:\n x(np.ndarray): 输入数据 (num_samples, input_length, ..., feature_dim)\n y(np.ndarray): 输出数据 (num_samples, input_length, ..., feature_dim)\n\n Returns:\n tuple: tuple contains:\n x_train: (num_samples, input_length, ..., feature_dim) \\n\n y_train: (num_samples, input_length, ..., feature_dim) \\n\n x_val: (num_samples, input_length, ..., feature_dim) \\n\n y_val: (num_samples, input_length, ..., feature_dim) \\n\n x_test: (num_samples, input_length, ..., feature_dim) \\n\n y_test: (num_samples, input_length, ..., feature_dim)\n \"\"\"\n test_rate = 1 - self.train_rate - self.eval_rate\n num_samples = x.shape[0]\n num_test = round(num_samples * test_rate)\n num_train = round(num_samples * self.train_rate)\n num_val = num_samples - num_test - num_train\n\n # train\n x_train = x[:num_train]\n y_train = y[:num_train]\n flatten_att_nbhd_inputs_train = flatten_att_nbhd_inputs[:num_train]\n flatten_att_flow_inputs_train = flatten_att_flow_inputs[:num_train]\n att_lstm_inputs_train = att_lstm_inputs[:num_train]\n nbhd_inputs_train = nbhd_inputs[:num_train]\n flow_inputs_train = flow_inputs[:num_train]\n lstm_inputs_train = lstm_inputs[:num_train]\n # val\n x_val = x[num_train: num_train + num_val]\n y_val = y[num_train: num_train + num_val]\n flatten_att_nbhd_inputs_val = flatten_att_nbhd_inputs[num_train: num_train + num_val]\n flatten_att_flow_inputs_val = flatten_att_flow_inputs[num_train: num_train + num_val]\n att_lstm_inputs_val = att_lstm_inputs[num_train: num_train + num_val]\n nbhd_inputs_val = nbhd_inputs[num_train: num_train + num_val]\n flow_inputs_val = flow_inputs[num_train: num_train + num_val]\n lstm_inputs_val = lstm_inputs[num_train: num_train + num_val]\n # test\n x_test = x[-num_test:]\n y_test = y[-num_test:]\n flatten_att_nbhd_inputs_test = flatten_att_nbhd_inputs[-num_test:]\n flatten_att_flow_inputs_test = flatten_att_flow_inputs[-num_test:]\n att_lstm_inputs_test = att_lstm_inputs[-num_test:]\n nbhd_inputs_test = nbhd_inputs[-num_test:]\n flow_inputs_test = flow_inputs[-num_test:]\n lstm_inputs_test = lstm_inputs[-num_test:]\n self._logger.info(\n \"train\\t\" + \"x: \" + str(x_train.shape) + \"y: \" + str(y_train.shape) + \"flatten_att_nbhd_inputs: \" + str(\n flatten_att_nbhd_inputs_train.shape) + \"flatten_att_flow_inputs: \" + str(\n flatten_att_flow_inputs_train.shape) + \"att_lstm_inputs: \" + str(\n att_lstm_inputs_train.shape) + \"nbhd_inputs: \" + str(nbhd_inputs_train.shape) + \"flow_inputs: \" + str(\n flow_inputs_train.shape) + \"lstm_inputs: \" + str(lstm_inputs_train.shape))\n self._logger.info(\n \"eval\\t\" + \"x: \" + str(x_val.shape) + \"y: \" + str(y_val.shape) + \"flatten_att_nbhd_inputs: \" + str(\n flatten_att_nbhd_inputs_val.shape) + \"flatten_att_flow_inputs: \" + str(\n flatten_att_flow_inputs_val.shape) + \"att_lstm_inputs: \" + str(\n att_lstm_inputs_val.shape) + \"nbhd_inputs: \" + str(nbhd_inputs_val.shape) + \"flow_inputs: \" + str(\n flow_inputs_val.shape) + \"lstm_inputs: \" + str(lstm_inputs_val.shape))\n self._logger.info(\n \"test\\t\" + \"x: \" + str(x_test.shape) + \"y: \" + str(y_test.shape) + \"flatten_att_nbhd_inputs: \" + str(\n flatten_att_nbhd_inputs_test.shape) + \"flatten_att_flow_inputs: \" + str(\n flatten_att_flow_inputs_test.shape) + \"att_lstm_inputs: \" + str(\n att_lstm_inputs_test.shape) + \"nbhd_inputs: \" + str(nbhd_inputs_test.shape) + \"flow_inputs: \" + str(\n flow_inputs_test.shape) + \"lstm_inputs: \" + str(lstm_inputs_test.shape))\n\n if self.cache_dataset:\n ensure_dir(self.cache_file_folder)\n np.savez_compressed(\n self.cache_file_name,\n x_train=x_train,\n y_train=y_train,\n flatten_att_nbhd_inputs_train=flatten_att_nbhd_inputs_train,\n flatten_att_flow_inputs_train=flatten_att_flow_inputs_train,\n att_lstm_inputs_train=att_lstm_inputs_train,\n nbhd_inputs_train=nbhd_inputs_train,\n flow_inputs_train=flow_inputs_train,\n lstm_inputs_train=lstm_inputs_train,\n x_test=x_test,\n y_test=y_test,\n flatten_att_nbhd_inputs_test=flatten_att_nbhd_inputs_test,\n flatten_att_flow_inputs_test=flatten_att_flow_inputs_test,\n att_lstm_inputs_test=att_lstm_inputs_test,\n nbhd_inputs_test=nbhd_inputs_test,\n flow_inputs_test=flow_inputs_test,\n lstm_inputs_test=lstm_inputs_test,\n x_val=x_val,\n y_val=y_val,\n flatten_att_nbhd_inputs_val=flatten_att_nbhd_inputs_val,\n flatten_att_flow_inputs_val=flatten_att_flow_inputs_val,\n att_lstm_inputs_val=att_lstm_inputs_val,\n nbhd_inputs_val=nbhd_inputs_val,\n flow_inputs_val=flow_inputs_val,\n lstm_inputs_val=lstm_inputs_val,\n )\n self._logger.info('Saved at ' + self.cache_file_name)\n return x_train, y_train, flatten_att_nbhd_inputs_train, flatten_att_flow_inputs_train, att_lstm_inputs_train, nbhd_inputs_train, flow_inputs_train, lstm_inputs_train, \\\n x_val, y_val, flatten_att_nbhd_inputs_val, flatten_att_flow_inputs_val, att_lstm_inputs_val, nbhd_inputs_val, flow_inputs_val, lstm_inputs_val, \\\n x_test, y_test, flatten_att_nbhd_inputs_test, flatten_att_flow_inputs_test, att_lstm_inputs_test, nbhd_inputs_test, flow_inputs_test, lstm_inputs_test\n\n def _generate_train_val_test(self):\n \"\"\"\n 加载数据集,并划分训练集、测试集、验证集,并缓存数据集\n \"\"\"\n x, y, flatten_att_nbhd_inputs, flatten_att_flow_inputs, att_lstm_inputs, nbhd_inputs, flow_inputs, lstm_inputs = self._generate_data()\n return self._split_train_val_test_stdn(x, y, flatten_att_nbhd_inputs, flatten_att_flow_inputs, att_lstm_inputs,\n nbhd_inputs, flow_inputs, lstm_inputs)\n\n def _load_cache_train_val_test(self):\n \"\"\"\n 加载之前缓存好的训练集、测试集、验证集\n \"\"\"\n self._logger.info('Loading ' + self.cache_file_name)\n cat_data = np.load(self.cache_file_name)\n x_train = cat_data['x_train']\n y_train = cat_data['y_train']\n flatten_att_nbhd_inputs_train = cat_data['flatten_att_nbhd_inputs_train']\n flatten_att_flow_inputs_train = cat_data['flatten_att_flow_inputs_train']\n att_lstm_inputs_train = cat_data['att_lstm_inputs_train']\n nbhd_inputs_train = cat_data['nbhd_inputs_train']\n flow_inputs_train = cat_data['flow_inputs_train']\n lstm_inputs_train = cat_data['lstm_inputs_train']\n x_test = cat_data['x_test']\n y_test = cat_data['y_test']\n flatten_att_nbhd_inputs_test = cat_data['flatten_att_nbhd_inputs_test']\n flatten_att_flow_inputs_test = cat_data['flatten_att_flow_inputs_test']\n att_lstm_inputs_test = cat_data['att_lstm_inputs_test']\n nbhd_inputs_test = cat_data['nbhd_inputs_test']\n flow_inputs_test = cat_data['flow_inputs_test']\n lstm_inputs_test = cat_data['lstm_inputs_test']\n x_val = cat_data['x_val']\n y_val = cat_data['y_val']\n flatten_att_nbhd_inputs_val = cat_data['flatten_att_nbhd_inputs_val']\n flatten_att_flow_inputs_val = cat_data['flatten_att_flow_inputs_val']\n att_lstm_inputs_val = cat_data['att_lstm_inputs_val']\n nbhd_inputs_val = cat_data['nbhd_inputs_val']\n flow_inputs_val = cat_data['flow_inputs_val']\n lstm_inputs_val = cat_data['lstm_inputs_val']\n self._logger.info(\n \"train\\t\" + \"x: \" + str(x_train.shape) + \"y: \" + str(y_train.shape) + \"flatten_att_nbhd_inputs: \" + str(\n flatten_att_nbhd_inputs_train.shape) + \"flatten_att_flow_inputs: \" + str(\n flatten_att_flow_inputs_train.shape) + \"att_lstm_inputs: \" + str(\n att_lstm_inputs_train.shape) + \"nbhd_inputs: \" + str(nbhd_inputs_train.shape) + \"flow_inputs: \" + str(\n flow_inputs_train.shape) + \"lstm_inputs: \" + str(lstm_inputs_train.shape))\n self._logger.info(\n \"eval\\t\" + \"x: \" + str(x_val.shape) + \"y: \" + str(y_val.shape) + \"flatten_att_nbhd_inputs: \" + str(\n flatten_att_nbhd_inputs_val.shape) + \"flatten_att_flow_inputs: \" + str(\n flatten_att_flow_inputs_val.shape) + \"att_lstm_inputs: \" + str(\n att_lstm_inputs_val.shape) + \"nbhd_inputs: \" + str(nbhd_inputs_val.shape) + \"flow_inputs: \" + str(\n flow_inputs_val.shape) + \"lstm_inputs: \" + str(lstm_inputs_val.shape))\n self._logger.info(\n \"test\\t\" + \"x: \" + str(x_test.shape) + \"y: \" + str(y_test.shape) + \"flatten_att_nbhd_inputs: \" + str(\n flatten_att_nbhd_inputs_test.shape) + \"flatten_att_flow_inputs: \" + str(\n flatten_att_flow_inputs_test.shape) + \"att_lstm_inputs: \" + str(\n att_lstm_inputs_test.shape) + \"nbhd_inputs: \" + str(nbhd_inputs_test.shape) + \"flow_inputs: \" + str(\n flow_inputs_test.shape) + \"lstm_inputs: \" + str(lstm_inputs_test.shape))\n return x_train, y_train, flatten_att_nbhd_inputs_train, flatten_att_flow_inputs_train, att_lstm_inputs_train, nbhd_inputs_train, flow_inputs_train, lstm_inputs_train, \\\n x_val, y_val, flatten_att_nbhd_inputs_val, flatten_att_flow_inputs_val, att_lstm_inputs_val, nbhd_inputs_val, flow_inputs_val, lstm_inputs_val, \\\n x_test, y_test, flatten_att_nbhd_inputs_test, flatten_att_flow_inputs_test, att_lstm_inputs_test, nbhd_inputs_test, flow_inputs_test, lstm_inputs_test\n\n def _get_scalar_stdn(self, x_train, y_train, flow_inputs_train):\n if self.scaler_type == \"normal\":\n volume_scaler = NormalScaler(maxx=max(x_train.max(), y_train.max()))\n flow_scaler = NormalScaler(maxx=flow_inputs_train.max())\n self._logger.info(\n 'NormalScaler volume max: ' + str(volume_scaler.max) + ' flow max: ' + str(flow_scaler.max))\n elif self.scaler_type == \"standard\":\n volume_scaler = StandardScaler(mean=x_train.mean(), std=x_train.std())\n flow_scaler = StandardScaler(mean=flow_inputs_train.mean(), std=flow_inputs_train.std())\n self._logger.info('StandardScaler volume mean: ' + str(volume_scaler.mean) + ', volume std: ' + str(\n volume_scaler.std) + ', flow mean: ' + str(flow_scaler.mean) + ', flow std: ' + str(flow_scaler.std))\n elif self.scaler_type == \"minmax01\":\n volume_scaler = MinMax01Scaler(maxx=max(x_train.max(), y_train.max()),\n minn=min(x_train.min(), y_train.min()))\n flow_scaler = MinMax01Scaler(maxx=flow_inputs_train.max(), minn=flow_inputs_train.min())\n self._logger.info('MinMax01Scaler volume max: ' + str(volume_scaler.max) + ', volume min: ' + str(\n volume_scaler.min) + ', flow max: ' + str(flow_scaler.max) + ', flow min: ' + str(flow_scaler.min))\n elif self.scaler_type == \"minmax11\":\n volume_scaler = MinMax11Scaler(maxx=max(x_train.max(), y_train.max()),\n minn=min(x_train.min(), y_train.min()))\n flow_scaler = MinMax11Scaler(maxx=flow_inputs_train.max(), minn=flow_inputs_train.min())\n self._logger.info('MinMax11Scaler volume max: ' + str(volume_scaler.max) + ', volume min: ' + str(\n volume_scaler.min) + ', flow max: ' + str(flow_scaler.max) + ', flow min: ' + str(flow_scaler.min))\n elif self.scaler_type == \"none\":\n volume_scaler = NoneScaler()\n flow_scaler = NoneScaler()\n self._logger.info('NoneScaler')\n else:\n raise ValueError('Scaler type error!')\n return volume_scaler, flow_scaler\n\n def get_data(self):\n x_train, y_train, flatten_att_nbhd_inputs_train, flatten_att_flow_inputs_train, att_lstm_inputs_train, nbhd_inputs_train, flow_inputs_train, lstm_inputs_train = [], [], [], [], [], [], [], []\n x_val, y_val, flatten_att_nbhd_inputs_val, flatten_att_flow_inputs_val, att_lstm_inputs_val, nbhd_inputs_val, flow_inputs_val, lstm_inputs_val = [], [], [], [], [], [], [], []\n x_test, y_test, flatten_att_nbhd_inputs_test, flatten_att_flow_inputs_test, att_lstm_inputs_test, nbhd_inputs_test, flow_inputs_test, lstm_inputs_test = [], [], [], [], [], [], [], []\n if self.data is None:\n self.data = {}\n if self.cache_dataset and os.path.exists(self.cache_file_name):\n x_train, y_train, flatten_att_nbhd_inputs_train, flatten_att_flow_inputs_train, att_lstm_inputs_train, nbhd_inputs_train, flow_inputs_train, lstm_inputs_train, \\\n x_val, y_val, flatten_att_nbhd_inputs_val, flatten_att_flow_inputs_val, att_lstm_inputs_val, nbhd_inputs_val, flow_inputs_val, lstm_inputs_val, \\\n x_test, y_test, flatten_att_nbhd_inputs_test, flatten_att_flow_inputs_test, att_lstm_inputs_test, nbhd_inputs_test, flow_inputs_test, lstm_inputs_test = self._load_cache_train_val_test()\n else:\n x_train, y_train, flatten_att_nbhd_inputs_train, flatten_att_flow_inputs_train, att_lstm_inputs_train, nbhd_inputs_train, flow_inputs_train, lstm_inputs_train, \\\n x_val, y_val, flatten_att_nbhd_inputs_val, flatten_att_flow_inputs_val, att_lstm_inputs_val, nbhd_inputs_val, flow_inputs_val, lstm_inputs_val, \\\n x_test, y_test, flatten_att_nbhd_inputs_test, flatten_att_flow_inputs_test, att_lstm_inputs_test, nbhd_inputs_test, flow_inputs_test, lstm_inputs_test = self._generate_train_val_test()\n self.feature_dim = x_train.shape[-1]\n self.feature_vec_len = lstm_inputs_train.shape[-1]\n self.nbhd_type = nbhd_inputs_train.shape[-1]\n self.scaler, self.flow_scaler = self._get_scalar_stdn(x_train, y_train, flow_inputs_train)\n x_train = self.scaler.transform(x_train)\n y_train = self.scaler.transform(y_train)\n flatten_att_nbhd_inputs_train = self.scaler.transform(flatten_att_nbhd_inputs_train)\n att_lstm_inputs_train = self.scaler.transform(att_lstm_inputs_train)\n nbhd_inputs_train = self.scaler.transform(nbhd_inputs_train)\n lstm_inputs_train = self.scaler.transform(lstm_inputs_train)\n x_val = self.scaler.transform(x_val)\n y_val = self.scaler.transform(y_val)\n flatten_att_nbhd_inputs_val = self.scaler.transform(flatten_att_nbhd_inputs_val)\n att_lstm_inputs_val = self.scaler.transform(att_lstm_inputs_val)\n nbhd_inputs_val = self.scaler.transform(nbhd_inputs_val)\n lstm_inputs_val = self.scaler.transform(lstm_inputs_val)\n x_test = self.scaler.transform(x_test)\n y_test = self.scaler.transform(y_test)\n flatten_att_nbhd_inputs_test = self.scaler.transform(flatten_att_nbhd_inputs_test)\n att_lstm_inputs_test = self.scaler.transform(att_lstm_inputs_test)\n nbhd_inputs_test = self.scaler.transform(nbhd_inputs_test)\n lstm_inputs_test = self.scaler.transform(lstm_inputs_test)\n\n flatten_att_flow_inputs_train = self.flow_scaler.transform(flatten_att_flow_inputs_train)\n flow_inputs_train = self.flow_scaler.transform(flow_inputs_train)\n flatten_att_flow_inputs_val = self.flow_scaler.transform(flatten_att_flow_inputs_val)\n flow_inputs_val = self.flow_scaler.transform(flow_inputs_val)\n flatten_att_flow_inputs_test = self.flow_scaler.transform(flatten_att_flow_inputs_test)\n flow_inputs_test = self.flow_scaler.transform(flow_inputs_test)\n\n train_data = list(\n zip(x_train, y_train, flatten_att_nbhd_inputs_train, flatten_att_flow_inputs_train, att_lstm_inputs_train,\n nbhd_inputs_train, flow_inputs_train, lstm_inputs_train))\n eval_data = list(\n zip(x_val, y_val, flatten_att_nbhd_inputs_val, flatten_att_flow_inputs_val, att_lstm_inputs_val,\n nbhd_inputs_val, flow_inputs_val, lstm_inputs_val))\n test_data = list(\n zip(x_test, y_test, flatten_att_nbhd_inputs_test, flatten_att_flow_inputs_test, att_lstm_inputs_test,\n nbhd_inputs_test, flow_inputs_test, lstm_inputs_test))\n self.train_dataloader, self.eval_dataloader, self.test_dataloader = \\\n generate_dataloader(train_data, eval_data, test_data, self.feature_name,\n self.batch_size, self.num_workers, pad_with_last_sample=self.pad_with_last_sample)\n return self.train_dataloader, self.eval_dataloader, self.test_dataloader\n\n def get_data_feature(self):\n return {\"scaler\": self.scaler, \"adj_mx\": self.adj_mx,\n \"num_nodes\": self.num_nodes, \"feature_dim\": self.feature_dim,\n \"output_dim\": self.output_dim, \"len_row\": self.len_row, \"len_column\": self.len_column,\n \"feature_vec_len\": self.feature_vec_len, \"nbhd_type\": self.nbhd_type}\n", "\"\"\"\n数据预处理阶段相关的工具函数\n\"\"\"\nimport numpy as np\nimport time\nfrom datetime import datetime, timedelta\nfrom collections import defaultdict\n\n\ndef parse_time(time_in, timezone_offset_in_minute=0):\n \"\"\"\n 将 json 中 time_format 格式的 time 转化为 local datatime\n \"\"\"\n date = datetime.strptime(time_in, '%Y-%m-%dT%H:%M:%SZ') # 这是 UTC 时间\n return date + timedelta(minutes=timezone_offset_in_minute)\n\n\ndef cal_basetime(start_time, base_zero):\n \"\"\"\n 用于切分轨迹成一个 session,\n 思路为:给定一个 start_time 找到一个基准时间 base_time,\n 在该 base_time 到 base_time + time_length 区间的点划分到一个 session 内,\n 选取 base_time 来做的理由是:这样可以保证同一个小时段总是被 encode 成同一个数\n \"\"\"\n if base_zero:\n return start_time - timedelta(hours=start_time.hour,\n minutes=start_time.minute,\n seconds=start_time.second,\n microseconds=start_time.microsecond)\n else:\n # time length = 12\n if start_time.hour < 12:\n return start_time - timedelta(hours=start_time.hour,\n minutes=start_time.minute,\n seconds=start_time.second,\n microseconds=start_time.microsecond)\n else:\n return start_time - timedelta(hours=start_time.hour - 12,\n minutes=start_time.minute,\n seconds=start_time.second,\n microseconds=start_time.microsecond)\n\n\ndef cal_timeoff(now_time, base_time):\n \"\"\"\n 计算两个时间之间的差值,返回值以小时为单位\n \"\"\"\n # 先将 now 按小时对齐\n delta = now_time - base_time\n return delta.days * 24 + delta.seconds / 3600\n\n\ndef caculate_time_sim(data):\n time_checkin_set = defaultdict(set)\n tim_size = data['tim_size']\n data_neural = data['data']\n for uid in data_neural:\n uid_sessions = data_neural[uid]\n for session in uid_sessions:\n for checkin in session:\n timid = checkin[1]\n locid = checkin[0]\n if timid not in time_checkin_set:\n time_checkin_set[timid] = set()\n time_checkin_set[timid].add(locid)\n sim_matrix = np.zeros((tim_size, tim_size))\n for i in range(tim_size):\n for j in range(tim_size):\n set_i = time_checkin_set[i]\n set_j = time_checkin_set[j]\n if len(set_i | set_j) != 0:\n jaccard_ij = len(set_i & set_j) / len(set_i | set_j)\n sim_matrix[i][j] = jaccard_ij\n return sim_matrix\n\n\ndef parse_coordinate(coordinate):\n items = coordinate[1:-1].split(',')\n return float(items[0]), float(items[1])\n\n\ndef string2timestamp(strings, offset_frame):\n ts = []\n for t in strings:\n dtstr = '-'.join([t[:4].decode(), t[4:6].decode(), t[6:8].decode()])\n slot = int(t[8:]) - 1\n ts.append(np.datetime64(dtstr, 'm') + slot * offset_frame)\n return ts # [numpy.datetime64('2014-01-01T00:00'), ...]\n\n\ndef timestamp2array(timestamps, t):\n \"\"\"\n 把时间戳的序列中的每一个时间戳转成特征数组,考虑了星期和小时,\n 时间戳: numpy.datetime64('2013-07-01T00:00:00.000000000')\n\n Args:\n timestamps: 时间戳序列\n t: 一天有多少个时间步\n\n Returns:\n np.ndarray: 特征数组,shape: (len(timestamps), ext_dim)\n \"\"\"\n vec_wday = [time.strptime(\n str(t)[:10], '%Y-%m-%d').tm_wday for t in timestamps]\n vec_hour = [time.strptime(str(t)[11:13], '%H').tm_hour for t in timestamps]\n vec_minu = [time.strptime(str(t)[14:16], '%M').tm_min for t in timestamps]\n ret = []\n for idx, wday in enumerate(vec_wday):\n # day\n v = [0 for _ in range(7)]\n v[wday] = 1\n if wday >= 5: # 0是周一, 6是周日\n v.append(0) # weekend\n else:\n v.append(1) # weekday len(v)=8\n # hour\n v += [0 for _ in range(t)] # len(v)=8+T\n hour = vec_hour[idx]\n minu = vec_minu[idx]\n # 24*60/T 表示一个时间步是多少分钟\n # hour * 60 + minu 是从0:0开始到现在是多少分钟,相除计算是第几个时间步\n # print(hour, minu, T, (hour * 60 + minu) / (24 * 60 / T))\n v[int((hour * 60 + minu) / (24 * 60 / t))] = 1\n # +8是因为v前边有表示星期的8位\n if hour >= 18 or hour < 6:\n v.append(0) # night\n else:\n v.append(1) # day\n ret.append(v) # len(v)=7+1+T+1=T+9\n return np.asarray(ret)\n\n\ndef timestamp2vec_origin(timestamps):\n \"\"\"\n 把时间戳的序列中的每一个时间戳转成特征数组,只考虑星期,\n 时间戳: numpy.datetime64('2013-07-01T00:00:00.000000000')\n\n Args:\n timestamps: 时间戳序列\n\n Returns:\n np.ndarray: 特征数组,shape: (len(timestamps), 8)\n \"\"\"\n vec = [time.strptime(str(t)[:10], '%Y-%m-%d').tm_wday for t in timestamps]\n ret = []\n for i in vec:\n v = [0 for _ in range(7)]\n v[i] = 1\n if i >= 5:\n v.append(0) # weekend\n else:\n v.append(1) # weekday\n ret.append(v)\n return np.asarray(ret)\n" ]
[ [ "torch.nn.functional.dropout", "torch.cat", "torch.tanh", "torch.device", "torch.nn.Dropout", "torch.mm", "torch.nn.MultiheadAttention", "torch.reshape", "torch.tensor", "torch.nn.functional.relu", "torch.nn.Sequential", "torch.sigmoid", "torch.nn.Linear", "torch.nn.functional.leaky_relu", "torch.nn.Conv1d", "numpy.sum", "torch.Tensor", "torch.nn.init.xavier_uniform_", "torch.nn.ReLU" ], [ "torch.cuda.is_available" ], [ "numpy.swapaxes", "pandas.read_csv", "numpy.reshape", "numpy.arange", "numpy.stack", "numpy.concatenate", "numpy.savez_compressed", "numpy.load", "numpy.array", "numpy.zeros" ], [ "numpy.asarray", "numpy.zeros", "numpy.datetime64" ] ]
Sunnyfred/Atlantic_Hurricane_Simulations
[ "ee5d6d0f975876a01c4a21bebd3089bf3bbb843a", "ee5d6d0f975876a01c4a21bebd3089bf3bbb843a", "ee5d6d0f975876a01c4a21bebd3089bf3bbb843a", "ee5d6d0f975876a01c4a21bebd3089bf3bbb843a", "ee5d6d0f975876a01c4a21bebd3089bf3bbb843a" ]
[ "section3_change_pars_for_weak_hurricanes/Source_code_for_extracting_data/source_code_change_Clz/1_Calculate_wind_track.py", "section1_default_comparison_for_strong_hurricanes/Source_code_for_plotting/2_Plot_all_16km.py", "section2_change_pars_for_strong_hurricanes/Source_code_for_plotting/2_Plot_all_16km_Clz.py", "section3_change_pars_for_weak_hurricanes/Source_code_for_extracting_data/source_code_change_Clz_isftcflx_1/1_Calculate_z0_time_series_at_eyewall_2km.py", "section2_change_pars_for_strong_hurricanes/Source_code_for_extracting_data/source_code_change_A/1_Calculate_wind_intensity_time_series.py" ]
[ "import numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nfrom plotly.graph_objs import Scattergeo, Layout\nfrom plotly import offline\nfrom cartopy import config\nimport matplotlib as matplot\nfrom matplotlib.image import imread\nimport cartopy.crs as crs\nimport os\nimport shapely.geometry as sgeom\nfrom cartopy.feature import NaturalEarthFeature\nimport csv\nfrom wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,\n cartopy_ylim, latlon_coords)\nimport json\nfrom math import sin, cos, sqrt, atan2, radians\nimport pickle\n\n\n\n\n\n\n\nmainpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/'\nHurricaneall = ['Gert','Nicole','Joaquin','Cristobal','Ike']\nReal_Hurricane_Data = ['Gert_Real_Track_Time_NOAA.csv',\n 'Nicole_Real_Track_Time_NOAA.csv',\n 'Joaquin_Real_Track_Time_NOAA.csv',\n 'Cristobal_Real_Track_Time_NOAA.csv',\n 'Ike_Real_Track_Time_NOAA.csv']\ndays = [15, 14, 4, 26, 10] # start day\nhours = [-6, -6, -6, -6, -6] # start hour\noutput_interval=6\ngridsize = ['8km','16km']\nswansize = ['swgr8p0', 'swgr16p0']\nprefix = 'WRFSWAN_NoTurb_swdt10_cpdt7200_'\nDirall = ['_swh8_swt14_Clz0p0001',\n '_swh8_swt14_Clz0p01',\n '_swh8_swt14_A1200B4p5C0P11',\n '_swh8_swt14_Clz100p00']\noutputpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/postprocessing_WRFONLY/0_Paper_figures/section3_change_pars_for_weak_winds/source_code_outputs_change_Clz/'\n\n\n# This function returns a list of all wrf files in the directory.\ndef list_files(Dir, ncfiles):\n \tfor f in os.listdir(Dir):\n \t \tif f.startswith('wrfout'):\n \t \t \tncfiles.append(f)\n \treturn (ncfiles)\n\n\n\n\nfor gk in range(len(gridsize)):\n count1=0\n\n for Hurricane in Hurricaneall:\n \n \n\n \n # Initiate the lists that will contain all the necessary data to plot the hurricane's truck.\n Real_Times = []\n Real_Lat = []\n Real_Long =[]\n Real_hour=[]\n Real_day=[]\n real_dic={}\n with open(outputpath+Real_Hurricane_Data[count1]) as f:\n \t reader = csv.reader(f)\n \t # Move to the row containing the row headers. \n \t next (reader)\n \t row_header = next(reader)\n \t # Extract the data necessary to plot the real truck.\n \t for row in reader:\n \t \t Real_Lat.append(float(row[row_header.index('Lat')]))\n \t \t Real_Long.append(float(row[row_header.index('Lon')]))\n \t \t Real_hour.append(int(row[row_header.index('Time - hour')]))\t\n \t \t Real_day.append(int(row[row_header.index('Time - day')]))\t\t\n\n for i in range(len(Real_day)):\n real_dic[Real_day[i]]=[]\n\n for i in range(len(Real_day)):\n real_dic[Real_day[i]].append([Real_hour[i],Real_Lat[i],Real_Long[i]])\n print(real_dic)\n # with open(outputpath+Hurricane+'_track.txt', 'w') as outfile:\n # json.dump(real_dic, outfile)\n \n \n \n \n\n\n \n\n results=[] \n for Dir in Dirall:\n \n \n \n\n print('Current folder is: ')\n Dir_local = mainpath+Hurricane+ '/' +gridsize[gk]+ '/' +prefix+swansize[gk]+Dir\n print(Dir_local)\n #row.append(Hurricane+Dir)\n \n \n simu_dic = {}\n for i in range(len(Real_day)):\n simu_dic[Real_day[i]]=[]\n \n \n day=days[count1]\n hour=hours[count1]\n day_count=0\n # Set the working space>\n os.chdir(Dir_local)\n # initiate the list that will contain all wrf files in Dir directory.\n ncfiles = []\n # Use the list_files function to list all the wrf files in the directory.\n ncfiles = list_files(Dir_local, ncfiles)\n # Sort the ncfiles \n ncfiles = sorted(ncfiles)\n #print (ncfiles)\n # initiate the list that will contain the hurricane-track data\n min_slp = []\n min_lat = []\n min_long = []\n\n for ncfile in ncfiles: \n \n \t #print (ncfile)\n \t ncfile = Dataset(ncfile)\n \t # Get the latitude and longitude data.\n \t LAT = np.array(getvar(ncfile, \"XLAT\"))\n \t latitudes = (LAT[:,0])\n \t LONG = np.array(getvar(ncfile, \"XLONG\")) \n \t longitudes = (LONG[0,:])\n \t # Get the sea level pressure for each wrf output file.\n \t slp2D = getvar(ncfile, \"slp\")\n \t slp = np.array(slp2D)\n \t # Get theindex of the minimum value of pressure.\n \t idx = np.where(slp == np.amin(slp))\n \t #print (idx)\n \t # List the data of the minimum SLP\n \t min_slp.append(np.amin(slp)) \n \t min_lat.append(latitudes[idx[0]])\n \t min_long.append(longitudes[idx[1]])\n \t if day_count > 3:\n \t \t if day==31:\n \t \t \t day=1\n \t \t else:\n \t \t \t day+=1\n \t \t day_count=0\n \t day_count += 1 \n \n \t hour += output_interval\n \t if hour == 24:\n \t \t hour=0 \n \t print(day, hour)\n \t simu_dic[day].append([hour,latitudes[idx[0]].tolist()[0],longitudes[idx[1]].tolist()[0]])\n results.append(simu_dic)\n print(results)\n\n\n with open(outputpath+Hurricane+'_track_'+gridsize[gk]+'.txt', 'w') as outfile: \n json.dump(real_dic, outfile) \n for i in range(len(results)): \n json.dump(results[i], outfile) \n \n pickle.dump( slp2D, open( outputpath+Hurricane+'_'+gridsize[gk]+'.p', \"wb\" ) )\n \n \n count1=count1+1 ", "import csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import OrderedDict\nimport matplotlib as mpl\n# import matplotlib.gridspec as gridspec\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.ticker import StrMethodFormatter\nimport matplotlib.font_manager as font_manager\nfrom matplotlib.patches import Patch\nimport string\nfrom netCDF4 import Dataset\nimport json\nfrom cartopy.feature import NaturalEarthFeature\nimport cartopy.crs as crs\nimport pickle\nfrom wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,\n cartopy_ylim, latlon_coords)\n\nimport cartopy\nimport os\nfrom PIL import Image\n\nImage.MAX_IMAGE_PIXELS = None\nmap_location = \"C:/Users/limgr/.spyder-py3/Map\"\nos.environ[\"CARTOPY_USER_BACKGROUNDS\"] = map_location\n\n\n# List the colors that will be used for tracing the track.\ncsfont = {'fontname':'Times New Roman'}\nfont = font_manager.FontProperties(family='Times New Roman', size=25)\nfontbar = font_manager.FontProperties(family='Times New Roman', size=12)\nfont_wt = font_manager.FontProperties(family='Times New Roman', size=20)\ncolors = ['k','green','purple','darkblue', 'deepskyblue', 'tomato', \\\n 'blue', 'gray', 'lightcoral', 'turquoise','red','blue','green','pink']\npatterns = ['-', '--','-.','-',':',':','--','--', ':','-', '--', ':','-', '--', ':',\\\n '-.', '-.', '-.', ':', '--', '-']\nmarkers = ['s','D','^','o','*','>','+','x','X','D','^','<','>','v'] \nsizes = [7, 7, 7, 7, 7, 7, 4, 3, 3, 3, 3, 3, 6,5,4,3,2,2]\n\n\n\n\noptions = [\"Best Track\",\\\n \"WRF-MYJ\",\\\n \"WRF-YSU-0\",\\\n \"WRF-COAWST\",\\\n \"WRF-YSU-1\",\\\n \"WRF-YSU-2\"]\n\n\n \n \nmodels = [\"WRF-MYJ\",\\\n \"WRF-YSU-0\",\\\n \"WRF-COAWST\",\\\n \"WRF-YSU-1\",\\\n \"WRF-YSU-2\"]\n\n\n\n \nhurricanes = [\"Katrina\",\\\n \"Maria\",\\\n \"Irma\",\\\n \"Dorian\",\\\n \"Lorenzo\"]\n \n# subplot positions \nposition = [[0,0,2],[0,2,4],[0,4,6],[1,0,2],[1,2,4]]\nposition2 = [[0,4,0,7],[0,4,8,15],[0,4,16,23],[5,9,0,7],[5,9,8,15]]\n\n\nlinestyles = OrderedDict(\n [('solid', (0, ())),\n ('dashdotted', (0, (3, 3, 1, 3))),\n ('dashdotdotted', (0, (3, 2, 1, 2, 1, 2))),\n ('dashed', (0, (3, 3))),\n ('dotted', (0, (1, 3))),\n ('dashed', (0, (3, 3))),\n ('loosely dashed', (0, (5, 5))),\n ('loosely dotted', (0, (1, 10))),\n ('densely dotted', (0, (1, 1))),\n ('densely dashed', (0, (5, 1))),\n ('loosely dashdotted', (0, (3, 10, 1, 10))),\n ('densely dashdotted', (0, (3, 1, 1, 1))),\n ('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),\n ('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])\n\n\n\nR = 6373.0 # approxiamte radius of earth in km\n\n\n\n\n# folder for wi and wt files\n\n\ndir_wi = ['C:/Users/limgr/Desktop/Katrina_wind_intensity_16km.csv',\\\n 'C:/Users/limgr/Desktop/Maria_wind_intensity_16km.csv',\\\n 'C:/Users/limgr/Desktop/Irma_wind_intensity_16km.csv',\\\n 'C:/Users/limgr/Desktop/Dorian_wind_intensity_16km.csv',\\\n 'C:/Users/limgr/Desktop/Lorenzo_wind_intensity_16km.csv']\n\n \ndir_wt = ['C:/Users/limgr/Desktop/Katrina_track_16km.txt',\\\n 'C:/Users/limgr/Desktop/Maria_track_16km.txt',\\\n 'C:/Users/limgr/Desktop/Irma_track_16km.txt',\\\n 'C:/Users/limgr/Desktop/Dorian_track_16km.txt',\\\n 'C:/Users/limgr/Desktop/Lorenzo_track_16km.txt'] \n\ndir_p = ['C:/Users/limgr/Desktop/Katrina_16km.p',\\\n 'C:/Users/limgr/Desktop/Maria_16km.p',\\\n 'C:/Users/limgr/Desktop/Irma_16km.p',\\\n 'C:/Users/limgr/Desktop/Dorian_16km.p',\\\n 'C:/Users/limgr/Desktop/Lorenzo_16km.p'] \n\ndir_znt_eye = ['C:/Users/limgr/Desktop/Katrina_ZNT_eye_16km.csv',\\\n 'C:/Users/limgr/Desktop/Maria_ZNT_eye_16km.csv',\\\n 'C:/Users/limgr/Desktop/Irma_ZNT_eye_16km.csv',\\\n 'C:/Users/limgr/Desktop/Dorian_ZNT_eye_16km.csv',\\\n 'C:/Users/limgr/Desktop/Lorenzo_ZNT_eye_16km.csv'] \n \ndir_znt_eyewall = ['C:/Users/limgr/Desktop/Katrina_ZNT_eyewall_16km.csv',\\\n 'C:/Users/limgr/Desktop/Maria_ZNT_eyewall_16km.csv',\\\n 'C:/Users/limgr/Desktop/Irma_ZNT_eyewall_16km.csv',\\\n 'C:/Users/limgr/Desktop/Dorian_ZNT_eyewall_16km.csv',\\\n 'C:/Users/limgr/Desktop/Lorenzo_ZNT_eyewall_16km.csv'] \n\n \nlat_log_bound = [[-90.5, -84.5, 23, 29],\\\n [-74, -68, 19.5, 25.5],\\\n [-47, -39, 14, 22],\\\n [-76.5, -70.5, 23, 29],\\\n [-45.5, -39.5, 16.5, 22.5]]\n \nlat_log_bound = [[-93, -83, 24, 34],\\\n [-77, -67, 19, 29],\\\n [-51, -39, 14, 22],\\\n [-80, -69, 23, 29],\\\n [-47, -40, 16.5, 25.5]] \n \nlat_log_bound = [[-91, -85, 24, 30],\\\n [-77, -67, 19, 29],\\\n [-51, -39, 14, 22],\\\n [-78, -70, 23, 29],\\\n [-47, -40, 16.5, 25.5]] \n \n\ndef Calculate_Distance_Haversine1(x):\n return (np.sin(x[0]/2))**2\ndef Calculate_Distance_Haversine2(x):\n return np.cos(x[0])\ndef Calculate_Distance_Haversine3(x):\n return (np.sin(x[1]/2))**2\n\n\n\n\n\n\n\n\n\n#########################################\n# Plot normalized intensity time series #\n#########################################\n\nfig = plt.figure(figsize=(20,13))\nspec = mpl.gridspec.GridSpec(ncols=23, nrows=9)\n\n\nfor kk in range(len(hurricanes)):\n \n c=0\n rows=[]\n Times=[]\n Times=[]\n values=[]\n with open(dir_wi[kk], mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n Times.append(list(row.keys()))\n line_count += 1\n #print(row)\n rows.append(row)\n values.append(list(row.values()))\n line_count += 1\n print(f'Processed {line_count} lines.')\n \n Times0=Times[0]\n print(Times0)\n print(values[0])\n print(position[kk])\n \n \n ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\\\n position2[kk][2]:position2[kk][3]])\n ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n size=30, **csfont)\n\n\n\n # for i in range(0,line_count-1):\n # if i==0:\n # tmp=[float(i)*0.5144444 for i in values[i]]\n # #tmp=[float(i) for i in values[i]]\n # # elif (i!=2 and i!=3):\n # else:\n # tmp=[float(i) for i in values[i]]\n # else:\n # continue\n # print('tmp')\n # print(tmp) \n \n for i in range(0,line_count-1):\n if i==0:\n tmp=[float(i)*0.5144444 for i in values[i]]\n #tmp=[float(i) for i in values[i]]\n # elif (i!=2 and i!=3):\n else:\n tmp=[float(i) for i in values[i]]\n # else:\n # continue \n \n if hurricanes[kk]=='Katrina':\n if c==0:\n plt.plot( Times0[:5], tmp[:5], color = colors[c], \\\n linestyle=list(linestyles.values())[0],\\\n linewidth=5, markersize=sizes[c])\n else:\n plt.plot( Times0[:5], tmp[:5], color = colors[c], \\\n linestyle=list(linestyles.values())[i],\\\n linewidth=5, markersize=sizes[c])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([25, 80])\n elif hurricanes[kk]=='Dorian':\n if c==0:\n plt.plot( Times0[:-2], tmp[:-2], color = colors[c], \\\n linestyle=list(linestyles.values())[0],\\\n linewidth=5, markersize=sizes[c])\n else:\n plt.plot( Times0[:-2], tmp[:-2], color = colors[c], \\\n linestyle=list(linestyles.values())[i],\\\n linewidth=5, markersize=sizes[c]) \n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([25, 80])\n else:\n if c==0:\n plt.plot( Times0, tmp, color = colors[c], \\\n linestyle=list(linestyles.values())[0],\\\n linewidth=5, markersize=sizes[c])\n else:\n plt.plot( Times0, tmp, color = colors[c], \\\n linestyle=list(linestyles.values())[i],\\\n linewidth=5, markersize=sizes[c]) \n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n plt.ylim([25, 80])\n\n c+=1\n\n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(2)\n ax.tick_params(length=5, width=2)\n fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font, \\\n frameon=False)\n \n\n if kk==0 or kk==3:\n plt.ylabel(r'Intensity (m/s)', **csfont, fontsize=35)\n if kk==2 or kk==3 or kk==4:\n plt.xlabel(r\"Time Series (hr)\", fontsize=30, **csfont)\n plt.title(hurricanes[kk], {'size': 30}, **csfont)\n\n \n \nplt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wind_intensity_A.png', dpi=500)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n########################\n# Plot ZNT time series #\n########################\n\n\nfig = plt.figure(figsize=(20,13))\nspec = mpl.gridspec.GridSpec(ncols=23, nrows=9)\n\n\nfor kk in range(len(hurricanes)):\n \n c=0\n rows=[]\n Times=[]\n Times=[]\n values=[]\n with open(dir_znt_eye[kk], mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n Times.append(list(row.keys()))\n line_count += 1\n #print(row)\n rows.append(row)\n values.append(list(row.values()))\n line_count += 1\n print(f'Processed {line_count} lines.')\n \n Times0=Times[0]\n print(Times0)\n print(values[0])\n print(position[kk])\n \n \n ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\\\n position2[kk][2]:position2[kk][3]])\n ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n size=30, **csfont)\n\n\n for i in range(0,line_count-1):\n if i==0:\n #tmp=[float(i)*0.5144444 for i in values[i]]\n tmp=[float(i) for i in values[i]]\n # elif (i!=2 and i!=3):\n else:\n tmp=[float(i) for i in values[i]]\n # else:\n # continue\n \n if hurricanes[kk]=='Katrina':\n plt.plot( Times0[:5], tmp[:5], color = colors[c+1], \\\n linestyle=list(linestyles.values())[i+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([1e-5, 0.05])\n plt.yscale('log')\n elif hurricanes[kk]=='Dorian':\n plt.plot( Times0[:-2], tmp[:-2], color = colors[c+1], \\\n linestyle=list(linestyles.values())[i+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([1e-5, 0.05])\n plt.yscale('log')\n else:\n plt.plot( Times0, tmp, color = colors[c+1], \\\n linestyle=list(linestyles.values())[i+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n plt.ylim([1e-5, 0.05])\n plt.yscale('log')\n\n c+=1\n \n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(2)\n ax.tick_params(length=5, width=2)\n fig.legend(models, bbox_to_anchor=(0.87, 0.42), prop=font, \\\n frameon=False)\n \n\n if kk==0 or kk==3:\n plt.ylabel(r'$Z_0$ (m)', **csfont, fontsize=30)\n if kk==2 or kk==3 or kk==4:\n plt.xlabel(r\"Time Series (hr)\", fontsize=30, **csfont)\n plt.title(hurricanes[kk], {'size': 30}, **csfont)\n\n \n \nplt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_ZNT_eye.png', dpi=500)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n########################\n# Plot ZNT time series #\n########################\n\n\nfig = plt.figure(figsize=(20,13))\nspec = mpl.gridspec.GridSpec(ncols=23, nrows=9)\n\n\nfor kk in range(len(hurricanes)):\n \n c=0\n rows=[]\n Times=[]\n Times=[]\n values=[]\n with open(dir_znt_eyewall[kk], mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n Times.append(list(row.keys()))\n line_count += 1\n #print(row)\n rows.append(row)\n values.append(list(row.values()))\n line_count += 1\n print(f'Processed {line_count} lines.')\n \n Times0=Times[0]\n print(Times0)\n print(values[0])\n print(position[kk])\n \n \n ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\\\n position2[kk][2]:position2[kk][3]])\n ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n size=30, **csfont)\n\n\n for i in range(0,line_count-1):\n if i==0:\n #tmp=[float(i)*0.5144444 for i in values[i]]\n tmp=[float(i) for i in values[i]]\n # elif (i!=2 and i!=3):\n else:\n tmp=[float(i) for i in values[i]]\n # else:\n # continue\n \n if hurricanes[kk]=='Katrina':\n plt.plot( Times0[:5], tmp[:5], color = colors[c+1], \\\n linestyle=list(linestyles.values())[i+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([1e-5, 0.05])\n plt.yscale('log')\n elif hurricanes[kk]=='Dorian':\n plt.plot( Times0[:-2], tmp[:-2], color = colors[c+1], \\\n linestyle=list(linestyles.values())[i+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([1e-5, 0.05])\n plt.yscale('log')\n else:\n plt.plot( Times0, tmp, color = colors[c+1], \\\n linestyle=list(linestyles.values())[i+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n plt.ylim([1e-5, 0.05])\n plt.yscale('log')\n\n c+=1\n \n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(2)\n ax.tick_params(length=5, width=2)\n fig.legend(models, bbox_to_anchor=(0.87, 0.42), prop=font, \\\n frameon=False)\n \n\n if kk==0 or kk==3:\n plt.ylabel(r'$Z_0$ (m)', **csfont, fontsize=30)\n if kk==2 or kk==3 or kk==4:\n plt.xlabel(r\"Time Series (hr)\", fontsize=30, **csfont)\n plt.title(hurricanes[kk], {'size': 30}, **csfont)\n\n \n \nplt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_ZNT_eyewall.png', dpi=500)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n########################\n# Plot hurricane track #\n########################\n\n\n\n\n\n\nfig = plt.figure(figsize=(15,10))\nspec = mpl.gridspec.GridSpec(ncols=6, nrows=2)\n\nfor kk in range(len(hurricanes)):\n \n \n if hurricanes[kk]=='Katrina':\n cons=6\n elif hurricanes[kk]=='Dorian':\n cons=8\n else:\n cons=10\n \n real1=[]\n oussama1=[]\n wrf1=[]\n simu1=[]\n\n with open( dir_wt[kk], 'r' ) as f :\n data0 = f.read()\n data = json.loads('[' + data0.replace('}{', '},{') + ']')\n for i in range(0,len(data)):\n data2 = list(data[i].values())\n data3 = [e for sl in data2 for e in sl]\n for j in range(len(data3)):\n data3[j].pop(0)\n if i==0:\n real1.append(data3)\n # elif i==1:\n # oussama1.append(data3)\n # elif i==2:\n # wrf1.append(data3)\n else:\n simu1.append(data3)\n real1 = np.array(real1, dtype=np.float32)\n simu1 = np.array(simu1, dtype=np.float32)\n real_r = np.radians(real1)\n simu_r = np.radians(simu1)\n\n\n term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)\n term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \\\n np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \\\n np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)\n simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))\n\n\n\n\n # ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])\n # ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n # size=30)\n\n slp2D = pickle.load( open( dir_p[kk], \"rb\" ) )\n lats, lons = latlon_coords(slp2D)\n \n # Get the cartopy mapping object (use original data, rather than any processed data)\n cart_proj = get_cartopy(slp2D)\n\n # Set the GeoAxes to the projection used by WRF\n #ax = plt.axes(projection=cart_proj)\n ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]], projection=cart_proj)\n # ax.stock_img()\n \n\n # Download and add the states and coastlines\n states = NaturalEarthFeature(category=\"cultural\", scale=\"50m\",\n\t facecolor=\"none\",\n\t name=\"admin_1_states_provinces_shp\")\n ax.add_feature(states, linewidth=.5, edgecolor=\"black\")\n ax.coastlines('50m', linewidth=0.8)\n # Set the map bounds\n # ax.set_xlim(cartopy_xlim(slp2D))\n # ax.set_ylim(cartopy_ylim(slp2D))\n ax.set_extent(lat_log_bound[kk])\n ax.background_img(name='SR', resolution='high')\n\n # Show grid lines.\n gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,\n linewidth=1.5, color='gray', alpha=0.8, linestyle=':')\n gl.xlabel_style = {'size': 15, 'color': 'k','fontname':'Times New Roman'}\n gl.ylabel_style = {'size': 15, 'color': 'k','fontname':'Times New Roman'}\n gl.xlabels_top = False\n gl.ylabels_right = False\n\n c=0\n\n ll=[]\n rr=[]\n for i in range(real1.shape[0]):\n for j in range(real1.shape[1]):\n if j<cons:\n ll.append(real1[i][j][0])\n rr.append(real1[i][j][1])\n ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, \\\n linestyle=list(linestyles.values())[0],\\\n markersize=sizes[c], transform=crs.PlateCarree())\n c+=1\n\n\n ll=[]\n rr=[]\n for i in range(simu1.shape[0]):\n for j in range(simu1.shape[1]):\n if j<cons:\n ll.append(simu1[i][j][0])\n rr.append(simu1[i][j][1])\n ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, \\\n linestyle=list(linestyles.values())[i+1],\\\n markersize=sizes[c], transform=crs.PlateCarree())\n c+=1\n ll=[]\n rr=[]\n \n \n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(15)\n fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font_wt, \\\n frameon=False)\n \n plt.title(hurricanes[kk], {'size': 25}, **csfont)\n # plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\\\n # loc = \"upper right\", prop={'size': 7})\n # plt.xlabel(\"Lon\", fontsize=135)\n # plt.ylabel(\"Lat\", fontsize=135)\n # plt.title(hurricanes[kk], {'size': 35}, **csfont)\n # plt.show()\n\nplt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wt.png', dpi=500)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# fig = plt.figure(figsize=(15,10))\n# spec = mpl.gridspec.GridSpec(ncols=6, nrows=2)\n\n# for kk in range(len(hurricanes)):\n \n# real1=[]\n# oussama1=[]\n# wrf1=[]\n# simu1=[]\n\n# with open( dir_wt[kk], 'r' ) as f :\n# data0 = f.read()\n# data = json.loads('[' + data0.replace('}{', '},{') + ']')\n# for i in range(0,len(data)):\n# data2 = list(data[i].values())\n# data3 = [e for sl in data2 for e in sl]\n# for j in range(len(data3)):\n# data3[j].pop(0)\n# if i==0:\n# real1.append(data3)\n# # elif i==1:\n# # oussama1.append(data3)\n# # elif i==2:\n# # wrf1.append(data3)\n# else:\n# simu1.append(data3)\n# real1 = np.array(real1, dtype=np.float32)\n# simu1 = np.array(simu1, dtype=np.float32)\n# real_r = np.radians(real1)\n# simu_r = np.radians(simu1)\n\n\n# term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)\n# term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \\\n# np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \\\n# np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)\n# simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))\n\n\n# m = Basemap(projection='merc', llcrnrlat=lat_log_bound[kk][2],\\\n# urcrnrlat=lat_log_bound[kk][3], \\\n# llcrnrlon=lat_log_bound[kk][0], \\\n# urcrnrlon=lat_log_bound[kk][1], resolution= 'f' )\n# m.drawstates()\n# m.drawmeridians([-100, -90, -80, -70, -60, -50, -40, ], color='k', textcolor='k', linewidth=1.5,\n# zorder=None, dashes=[6, 1000], labels=[1, 0, 0, 1], labelstyle=None, fmt='%g', xoffset=None, \n# yoffset=None, ax=None, latmax=None, fontsize=12)\n# m.drawparallels([10, 15, 20, 25, 30, 35], color='k', textcolor='k', linewidth=1.5, zorder=None, dashes=[6, 1000], \n# \tlabels=[1, 0, 0, 1], labelstyle=None, fmt='%g', xoffset=None, yoffset=None, ax=None, latmax=None, fontsize=12)\n# m.drawmapscale(-101, 8, -96, 8, 1000, barstyle='fancy', units='km', fontsize=8)\n# m.drawcoastlines(linewidth=0.7, linestyle='solid', color='grey')\n# m.drawcountries()\n# m.shadedrelief()\n# m.drawmapboundary()\n\n\n# # ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])\n# # ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n# # size=30)\n\n# slp2D = pickle.load( open( dir_p[kk], \"rb\" ) )\n# lats, lons = latlon_coords(slp2D)\n \n# # Get the cartopy mapping object (use original data, rather than any processed data)\n# cart_proj = get_cartopy(slp2D)\n\n# # Set the GeoAxes to the projection used by WRF\n# #ax = plt.axes(projection=cart_proj)\n# ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]], projection=cart_proj)\n# ax.stock_img()\n \n\n# # Download and add the states and coastlines\n# states = NaturalEarthFeature(category=\"cultural\", scale=\"50m\",\n# \t facecolor=\"none\",\n# \t name=\"admin_1_states_provinces_shp\")\n# ax.add_feature(states, linewidth=.5, edgecolor=\"black\")\n# ax.coastlines('50m', linewidth=0.8)\n# # Set the map bounds\n# # ax.set_xlim(cartopy_xlim(slp2D))\n# # ax.set_ylim(cartopy_ylim(slp2D))\n# ax.set_extent(lat_log_bound[kk])\n\n\n# # Show grid lines.\n# gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,\n# linewidth=1.5, color='gray', alpha=0.8, linestyle=':')\n# gl.xlabel_style = {'size': 15, 'color': 'k'}\n# gl.ylabel_style = {'size': 15, 'color': 'k'}\n# gl.xlabels_top = False\n# gl.ylabels_right = False\n\n# c=0\n\n# ll=[]\n# rr=[]\n# for i in range(real1.shape[0]):\n# for j in range(real1.shape[1]):\n# if j<6:\n# ll.append(real1[i][j][0])\n# rr.append(real1[i][j][1])\n# ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, linestyle=patterns[c],\\\n# markersize=sizes[c], transform=crs.PlateCarree())\n# c+=1\n\n\n# ll=[]\n# rr=[]\n# for i in range(simu1.shape[0]):\n# for j in range(simu1.shape[1]):\n# if j<6:\n# ll.append(simu1[i][j][0])\n# rr.append(simu1[i][j][1])\n# ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, linestyle=patterns[c],\\\n# markersize=sizes[c], transform=crs.PlateCarree())\n# c+=1\n# ll=[]\n# rr=[]\n \n \n# for axis in ['top','bottom','left','right']:\n# ax.spines[axis].set_linewidth(15)\n# fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font_wt, \\\n# frameon=False)\n \n# plt.title(hurricanes[kk], {'size': 25}, **csfont)\n# # plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\\\n# # loc = \"upper right\", prop={'size': 7})\n# # plt.xlabel(\"Lon\", fontsize=135)\n# # plt.ylabel(\"Lat\", fontsize=135)\n# # plt.title(hurricanes[kk], {'size': 35}, **csfont)\n# # plt.show()\n\n# plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wt.png', dpi=500)\n# plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n###################\n# Plot error bars #\n###################\n\n\n\nsimu_error = []\n\nfor kk in range(len(hurricanes)):\n\n rows1=[]\n Times1=[]\n Times1=[]\n values1=[]\n real1_track=[]\n\n\n with open(dir_wi[kk], mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n sim_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n Times1.append(list(row.keys()))\n real1_track.append(list(row.values()))\n line_count += 1\n else:\n rows1.append(row)\n values1.append(list(row.values()))\n line_count += 1\n print('There is totally ',(line_count-1)*(len(row)),' data points')\n simu1=np.array(values1, dtype=np.float32)\n real1=np.array(real1_track, dtype=np.float32)\n real1=real1*0.5144444\n real1=real1\n simu_error1=abs(simu1-real1[:,None])/real1[:,None]#/((line_count-3)*(len(row)))\n print('absolute pressure error')\n print(abs(simu1-real1[:,None]))\n \n simu_error.append(simu_error1)\n\n\n\n\n\n\npar1_error_wi=np.zeros((4, 9))\npar2_error_wi=np.zeros((4, 9))\npar3_erro_wir=np.zeros((4, 9))\npar4_error_wi=np.zeros((4, 9))\npar5_error_wi=np.zeros((4, 9))\n\n\nsimu_error1 = simu_error[0]\nsimu_error2 = simu_error[1]\nsimu_error3 = simu_error[2]\nsimu_error4 = simu_error[3]\nsimu_error5 = simu_error[4]\n\n\npar1_error_wi=np.concatenate((simu_error1[0][0][0:5],simu_error2[0][0][:],\\\n simu_error3[0][0][:],simu_error4[0][0][:-2],simu_error5[0][0][:]))\npar1_error_wi=par1_error_wi.flatten()\npar1_error_wi_mean=np.mean(par1_error_wi)\npar1_error_wi_std=np.std(par1_error_wi)\npar1_error_wi_low=np.percentile(par1_error_wi, 20)\npar1_error_wi_hgh=np.percentile(par1_error_wi, 80)\n\n\npar2_error_wi=np.concatenate((simu_error1[0][1][0:5],simu_error2[0][1][:],\\\n simu_error3[0][1][:],simu_error4[0][1][:-2],simu_error5[0][1][:]))\npar2_error_wi=par2_error_wi.flatten()\npar2_error_wi_mean=np.mean(par2_error_wi)\npar2_error_wi_std=np.std(par2_error_wi)\npar2_error_wi_low=np.percentile(par2_error_wi, 20)\npar2_error_wi_hgh=np.percentile(par2_error_wi, 80)\n\n\npar3_error_wi=np.concatenate((simu_error1[0][2][0:5],simu_error2[0][2][:],\\\n simu_error3[0][2][:],simu_error4[0][2][:-2],simu_error5[0][2][:]))\npar3_error_wi=par3_error_wi.flatten()\npar3_error_wi_mean=np.mean(par3_error_wi)\npar3_error_wi_std=np.std(par3_error_wi)\npar3_error_wi_low=np.percentile(par3_error_wi, 20)\npar3_error_wi_hgh=np.percentile(par3_error_wi, 80)\n\n\n\npar4_error_wi=np.concatenate((simu_error1[0][3][0:5],simu_error2[0][3][:],\\\n simu_error3[0][3][:],simu_error4[0][3][:-2],simu_error5[0][3][:]))\npar4_error_wi=par4_error_wi.flatten()\npar4_error_wi_mean=np.mean(par4_error_wi)\npar4_error_wi_std=np.std(par4_error_wi)\npar4_error_wi_low=np.percentile(par4_error_wi, 20)\npar4_error_wi_hgh=np.percentile(par4_error_wi, 80)\n\n\npar5_error_wi=np.concatenate((simu_error1[0][4][0:5],simu_error2[0][4][:],\\\n simu_error3[0][4][:],simu_error4[0][4][:-2],simu_error5[0][4][:]))\npar5_error_wi=par5_error_wi.flatten()\npar5_error_wi_mean=np.mean(par5_error_wi)\npar5_error_wi_std=np.std(par5_error_wi)\npar5_error_wi_low=np.percentile(par5_error_wi, 20)\npar5_error_wi_hgh=np.percentile(par5_error_wi, 80)\n\n\n\n\nsimu_error = []\n\nfor kk in range(len(hurricanes)):\n \n real1=[]\n oussama1=[]\n wrf1=[]\n simu1=[]\n\n with open( dir_wt[kk], 'r' ) as f :\n data0 = f.read()\n data = json.loads('[' + data0.replace('}{', '},{') + ']')\n for i in range(0,len(data)):\n data2 = list(data[i].values())\n data3 = [e for sl in data2 for e in sl]\n for j in range(len(data3)):\n data3[j].pop(0)\n if i==0:\n real1.append(data3)\n # elif i==1:\n # oussama1.append(data3)\n # elif i==2:\n # wrf1.append(data3)\n else:\n simu1.append(data3)\n real1 = np.array(real1, dtype=np.float32)\n simu1 = np.array(simu1, dtype=np.float32)\n real_r = np.radians(real1)\n simu_r = np.radians(simu1)\n\n\n term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)\n term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \\\n np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \\\n np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)\n simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))\n simu_error.append(simu_error1)\n\n\npar1_error=np.zeros((4, 9))\npar2_error=np.zeros((4, 9))\npar3_error=np.zeros((4, 9))\npar4_error=np.zeros((4, 9))\npar5_error=np.zeros((4, 9))\n\nsimu_error1 = simu_error[0]\nsimu_error2 = simu_error[1]\nsimu_error3 = simu_error[2]\nsimu_error4 = simu_error[3]\nsimu_error5 = simu_error[4]\n\npar1_error_wt=np.concatenate((simu_error1[0][0:5],\\\n simu_error2[0][:],simu_error3[0][:],\\\n simu_error4[0][:-2],simu_error5[0][:]))\npar1_error_wt=par1_error_wt.flatten()\npar1_error_wt_mean=np.mean(par1_error_wt)\npar1_error_wt_std=np.std(par1_error_wt)\npar1_error_wt_low=np.percentile(par1_error_wt, 20)\npar1_error_wt_hgh=np.percentile(par1_error_wt, 80)\n\n\npar2_error_wt=np.concatenate((simu_error1[1][0:5],\\\n simu_error2[1][:],simu_error3[1][:],\\\n simu_error4[1][:-2],simu_error5[1][:]))\npar2_error_wt=par2_error_wt.flatten()\npar2_error_wt_mean=np.mean(par2_error_wt)\npar2_error_wt_std=np.std(par2_error_wt)\npar2_error_wt_low=np.percentile(par2_error_wt, 20)\npar2_error_wt_hgh=np.percentile(par2_error_wt, 80)\n\npar3_error_wt=np.concatenate((simu_error1[2][0:5],\\\n simu_error2[2][:],simu_error3[2][:],\\\n simu_error4[2][:-2],simu_error5[2][:]))\npar3_error_wt=par3_error_wt.flatten()\npar3_error_wt_mean=np.mean(par3_error_wt)\npar3_error_wt_std=np.std(par3_error_wt)\npar3_error_wt_low=np.percentile(par2_error_wt, 20)\npar3_error_wt_hgh=np.percentile(par2_error_wt, 80)\n\npar4_error_wt=np.concatenate((simu_error1[3][0:5],\\\n simu_error2[3][:],simu_error3[3][:],\\\n simu_error4[3][:-2],simu_error5[3][:]))\npar4_error_wt=par4_error_wt.flatten()\npar4_error_wt_mean=np.mean(par4_error_wt)\npar4_error_wt_std=np.std(par4_error_wt)\npar4_error_wt_low=np.percentile(par4_error_wt, 20)\npar4_error_wt_hgh=np.percentile(par4_error_wt, 80)\n\npar5_error_wt=np.concatenate((simu_error1[4][0:5],\\\n simu_error2[4][:],simu_error3[4][:],\\\n simu_error4[4][:-2],simu_error5[4][:]))\npar5_error_wt=par5_error_wt.flatten()\npar5_error_wt_mean=np.mean(par5_error_wt)\npar5_error_wt_std=np.std(par5_error_wt)\npar5_error_wt_low=np.percentile(par5_error_wt, 20)\npar5_error_wt_hgh=np.percentile(par5_error_wt, 80)\n\n\n\n\nx_pos = np.arange(len(models))\n\nCTEs_wi = [par1_error_wi_mean,\\\n par2_error_wi_mean,par3_error_wi_mean,par4_error_wi_mean,par5_error_wi_mean]\nerrors_wi = [par1_error_wi_std,\\\n par2_error_wi_std,par3_error_wi_std,par4_error_wi_std,par5_error_wi_std]\npercentile_10_wi = np.array([par1_error_wi_mean-par1_error_wi_low,\\\n par2_error_wi_mean-par2_error_wi_low,par3_error_wi_mean-par3_error_wi_low, \\\n par4_error_wi_mean-par4_error_wi_low,par5_error_wi_mean-par5_error_wi_low])\npercentile_90_wi = np.array([par1_error_wi_hgh-par1_error_wi_mean,\\\n par2_error_wi_hgh-par2_error_wi_mean,par3_error_wi_hgh-par3_error_wi_mean, \\\n par4_error_wi_hgh-par4_error_wi_mean,par5_error_wi_hgh-par5_error_wi_mean])\nerr_wi = np.vstack((percentile_10_wi, percentile_90_wi))\n\nCTEs_wt = [par1_error_wt_mean,\\\n par2_error_wt_mean,par3_error_wt_mean,par4_error_wt_mean,par5_error_wt_mean]\nerrors_wt = [par1_error_wt_std,\\\n par2_error_wt_std,par3_error_wt_std,par4_error_wt_std,par5_error_wt_std]\npercentile_10_wt = np.array([par1_error_wt_mean-par1_error_wt_low,\\\n par2_error_wt_mean-par2_error_wt_low,par3_error_wt_mean-par3_error_wt_low, \\\n par4_error_wt_mean-par4_error_wt_low,par5_error_wt_mean-par5_error_wt_low])\npercentile_90_wt = np.array([par1_error_wt_hgh-par1_error_wt_mean,\\\n par2_error_wt_hgh-par2_error_wt_mean,par3_error_wt_hgh-par3_error_wt_mean, \\\n par4_error_wt_hgh-par4_error_wt_mean,par5_error_wt_hgh-par5_error_wt_mean])\nprint(percentile_90_wt)\nerr_wt = np.vstack((percentile_10_wt, percentile_90_wt))\n\n\n\n# fig, ax = plt.subplots(1, 2, figsize=(40, 8), sharex=True)\nfig = plt.figure(figsize=(8,5))\nspec = mpl.gridspec.GridSpec(ncols=8, nrows=5)\n\n\nax = fig.add_subplot(spec[1:,0:4])\nax.text(0.7, 0.9, '('+string.ascii_lowercase[0]+')', transform=ax.transAxes, \n size=15, **csfont)\nbars = ax.bar(x_pos, CTEs_wi, yerr=err_wi, align='center', \\\n color=['green','purple','darkblue', 'deepskyblue', 'tomato'], alpha=0.8,\\\n ecolor='k', capsize=10, edgecolor='k', linewidth=3) \nfor i in range(len(x_pos)):\n bars[i].set(linestyle=list(linestyles.values())[0]) \nax.set_ylabel(r'Normalized Intensity', **csfont, fontsize=15)\nvals = ax.get_yticks()\nprint(vals)\nax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])\nax.set_xticks(x_pos)\nax.set_xticklabels(models, **csfont, fontsize=10)\n#ax.set_title(r'COAWST', **csfont, fontsize=20)\nax.yaxis.grid(True)\n# ax.set_ylim([0, 0.5])\n\n\nax = fig.add_subplot(spec[1:,4:])\nax.text(0.7, 0.9, '('+string.ascii_lowercase[1]+')', transform=ax.transAxes, \n size=15, **csfont) \nbars = ax.bar(x_pos, CTEs_wt, yerr=err_wt, align='center', \\\n color=['green','purple','darkblue', 'deepskyblue', 'tomato'], alpha=0.8,\\\n ecolor='k', capsize=10, edgecolor='k', linewidth=3) \nfor i in range(len(x_pos)):\n bars[i].set(linestyle=list(linestyles.values())[0])\nax.set_ylabel(r'Track Error (km)', **csfont, fontsize=15)\nvals = ax.get_yticks()\nax.set_yticklabels(['{}'.format(x) for x in vals])\nax.set_xticks(x_pos)\nax.set_xticklabels(models, **csfont, fontsize=10)\n#ax.set_title(r'COAWST', **csfont, fontsize=20)\nax.yaxis.grid(True)\n# ax.set_ylim([0, 110])\n\n\nax = fig.add_subplot(spec[0,0:])\nhandles = [plt.Rectangle((0,0),1,1, facecolor=colors[i+1], \\\n linestyle=list(linestyles.values())[0], edgecolor = 'k', linewidth=1.5\\\n ) for i in range(len(models))]\nplt.legend(handles, models, ncol=3, bbox_to_anchor=(0.85, 0.8), prop=fontbar, \\\n frameon=False)\nax.axes.xaxis.set_visible(False)\nax.axes.yaxis.set_visible(False)\nax.set_yticks([])\nax.set_yticklabels([])\nax.set_xticks([])\nax.set_xticklabels([])\nfor axis in ['top','bottom','left','right']:\n ax.spines[axis].set_visible(False)\n\n\n\n# for i, v in enumerate(CTEs):\n# ax.text(i, v+0.02, str(round(v, 3)), color='red', fontweight='bold')\n\n# Save the figure and show\nfig.autofmt_xdate()\nplt.tight_layout()\n#plt.savefig('wind_intensity_bar_plot.png')\nplt.savefig('C:/Users/limgr/Desktop/wi_wt_bar_plots.png', dpi=500)\nplt.show()\n\n\n\n\n", "import csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import OrderedDict\nimport matplotlib as mpl\n# import matplotlib.gridspec as gridspec\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.ticker import StrMethodFormatter\nimport matplotlib.font_manager as font_manager\nfrom matplotlib.patches import Patch\nimport string\nfrom netCDF4 import Dataset\nimport json\nfrom cartopy.feature import NaturalEarthFeature\nimport cartopy.crs as crs\nimport pickle\nfrom wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,\n cartopy_ylim, latlon_coords)\nimport cartopy\nimport os\nfrom PIL import Image\n\nImage.MAX_IMAGE_PIXELS = None\nmap_location = \"C:/Users/limgr/.spyder-py3/Map\"\nos.environ[\"CARTOPY_USER_BACKGROUNDS\"] = map_location\n\n\n\n\n# List the colors that will be used for tracing the track.\ncsfont = {'fontname':'Times New Roman'}\nfont = font_manager.FontProperties(family='Times New Roman', size=30)\nfontbar = font_manager.FontProperties(family='Times New Roman', size=12)\nfont_wt = font_manager.FontProperties(family='Times New Roman', size=20)\ncolors = ['k','blue','cyan','gray', 'red', \\\n 'blue', 'cyan', 'lightcoral', 'turquoise','red','blue','green','pink']\npatterns = ['-', '--','-.','-',':',':','--','--', ':','-', '--', ':','-', '--', ':',\\\n '-.', '-.', '-.', ':', '--', '-']\nmarkers = ['s','D','^','o','*','s','+','x','X','D','^','<','>','v'] \nsizes = [7, 7, 7, 7, 7, 3, 4, 3, 3, 3, 3, 3, 6,5,4,3,2,2]\n\n\n\n\noptions = [\"Best Track\",\\\n \"Clz=0.0001\",\\\n \"Clz=0.01\",\\\n \"Clz=1\",\\\n \"Clz=100\"]\n\n \n \nmodels = [\"Clz = 0.0001\",\\\n \"Clz = 0.01\",\\\n \"Clz = 1\",\\\n \"Clz = 100\"]\n\n\n \nhurricanes = [\"Katrina\",\\\n \"Maria\",\\\n \"Irma\",\\\n \"Dorian\",\\\n \"Lorenzo\"]\n \n# subplot positions \nposition = [[0,0,2],[0,2,4],[0,4,6],[1,0,2],[1,2,4]]\nposition2 = [[0,4,0,7],[0,4,8,15],[0,4,16,23],[5,9,0,7],[5,9,8,15]]\n\n\nlinestyles = OrderedDict(\n [('solid', (0, ())),\n ('dashdotted', (0, (3, 3, 1, 3))),\n ('dashdotdotted', (0, (3, 2, 1, 2, 1, 2))),\n ('dashed', (0, (3, 3))),\n ('dotted', (0, (1, 3))),\n ('dashed', (0, (3, 3))),\n ('loosely dotted', (0, (1, 10))),\n ('densely dotted', (0, (1, 1))),\n ('loosely dashed', (0, (5, 10))),\n ('densely dashed', (0, (5, 1))),\n ('loosely dashdotted', (0, (3, 10, 1, 10))),\n ('densely dashdotted', (0, (3, 1, 1, 1))),\n ('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),\n ('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])\n\n\n\nR = 6373.0 # approxiamte radius of earth in km\n\n\n\n\n# folder for wi and wt files\n\n\ndir_wi = ['C:/Users/limgr/Desktop/Katrina_wind_intensity_16km.csv',\\\n 'C:/Users/limgr/Desktop/Maria_wind_intensity_16km.csv',\\\n 'C:/Users/limgr/Desktop/Irma_wind_intensity_16km.csv',\\\n 'C:/Users/limgr/Desktop/Dorian_wind_intensity_16km.csv',\\\n 'C:/Users/limgr/Desktop/Lorenzo_wind_intensity_16km.csv']\n\n \ndir_wt = ['C:/Users/limgr/Desktop/Katrina_track_16km.txt',\\\n 'C:/Users/limgr/Desktop/Maria_track_16km.txt',\\\n 'C:/Users/limgr/Desktop/Irma_track_16km.txt',\\\n 'C:/Users/limgr/Desktop/Dorian_track_16km.txt',\\\n 'C:/Users/limgr/Desktop/Lorenzo_track_16km.txt'] \n\ndir_p = ['C:/Users/limgr/Desktop/Katrina_16km.p',\\\n 'C:/Users/limgr/Desktop/Maria_16km.p',\\\n 'C:/Users/limgr/Desktop/Irma_16km.p',\\\n 'C:/Users/limgr/Desktop/Dorian_16km.p',\\\n 'C:/Users/limgr/Desktop/Lorenzo_16km.p'] \n\ndir_znt_eye = ['C:/Users/limgr/Desktop/Katrina_ZNT_eye_16km.csv',\\\n 'C:/Users/limgr/Desktop/Maria_ZNT_eye_16km.csv',\\\n 'C:/Users/limgr/Desktop/Irma_ZNT_eye_16km.csv',\\\n 'C:/Users/limgr/Desktop/Dorian_ZNT_eye_16km.csv',\\\n 'C:/Users/limgr/Desktop/Lorenzo_ZNT_eye_16km.csv'] \n \ndir_znt_eyewall = ['C:/Users/limgr/Desktop/Katrina_ZNT_eyewall_16km.csv',\\\n 'C:/Users/limgr/Desktop/Maria_ZNT_eyewall_16km.csv',\\\n 'C:/Users/limgr/Desktop/Irma_ZNT_eyewall_16km.csv',\\\n 'C:/Users/limgr/Desktop/Dorian_ZNT_eyewall_16km.csv',\\\n 'C:/Users/limgr/Desktop/Lorenzo_ZNT_eyewall_16km.csv'] \n\n\ndir_wp = ['C:/Users/limgr/Desktop/Katrina_avg_speed_16km.csv', \\\n 'C:/Users/limgr/Desktop/Maria_avg_speed_16km.csv', \\\n 'C:/Users/limgr/Desktop/Irma_avg_speed_16km.csv', \\\n 'C:/Users/limgr/Desktop/Dorian_avg_speed_16km.csv', \\\n 'C:/Users/limgr/Desktop/Lorenzo_avg_speed_16km.csv'] \n \nlat_log_bound = [[-90.5, -84.5, 23, 29],\\\n [-74, -68, 19.5, 25.5],\\\n [-47, -39, 14, 22],\\\n [-76.5, -70.5, 23, 29],\\\n [-45.5, -39.5, 16.5, 22.5]]\n \nlat_log_bound = [[-93, -83, 24, 34],\\\n [-77, -67, 19, 29],\\\n [-51, -39, 14, 22],\\\n [-80, -69, 23, 29],\\\n [-47, -40, 16.5, 25.5]] \n \nlat_log_bound = [[-91, -85, 24, 30],\\\n [-77, -67, 19, 29],\\\n [-51, -39, 14, 22],\\\n [-78, -70, 23, 29],\\\n [-47, -40, 16.5, 25.5]] \n \n \n# lat_log_bound = [[-92, -86, 25, 30],\\\n# [-74, -68, 21.5, 25.5],\\\n# [-46, -43.5, 17, 19.5],\\\n# [-76, -73.5, 25.5, 28],\\\n# [-46, -42, 19, 23]]\n\ndef Calculate_Distance_Haversine1(x):\n return (np.sin(x[0]/2))**2\ndef Calculate_Distance_Haversine2(x):\n return np.cos(x[0])\ndef Calculate_Distance_Haversine3(x):\n return (np.sin(x[1]/2))**2\n\n\n\n\n\n\n\n\n\n#########################################\n# Plot normalized intensity time series #\n#########################################\n\n\nfig = plt.figure(figsize=(20,13))\nspec = mpl.gridspec.GridSpec(ncols=23, nrows=9)\n\nfor kk in range(len(hurricanes)):\n \n c=0\n rows=[]\n Times=[]\n Times=[]\n values=[]\n with open(dir_wi[kk], mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n Times.append(list(row.keys()))\n line_count += 1\n #print(row)\n rows.append(row)\n values.append(list(row.values()))\n line_count += 1\n print(f'Processed {line_count} lines.')\n \n Times0=Times[0]\n print(Times0)\n print(values[0])\n print(position[kk])\n \n \n ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\\\n position2[kk][2]:position2[kk][3]])\n ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n size=30, **csfont)\n\n\n for i in range(0,line_count-1):\n if i==0:\n tmp=[float(i)*0.5144444 for i in values[i]]\n #tmp=[float(i) for i in values[i]]\n # elif (i!=2 and i!=3):\n else:\n tmp=[float(i) for i in values[i]]\n # else:\n # continue\n \n if hurricanes[kk]=='Katrina':\n plt.plot( Times0[:5], tmp[:5], color = colors[c], \\\n linestyle=list(linestyles.values())[c],\\\n linewidth=5, markersize=sizes[c])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([25, 80])\n elif hurricanes[kk]=='Dorian':\n plt.plot( Times0[:-2], tmp[:-2], color = colors[c], \\\n linestyle=list(linestyles.values())[c],\\\n linewidth=5, markersize=sizes[c])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([25, 80])\n else:\n plt.plot( Times0, tmp, color = colors[c], \\\n linestyle=list(linestyles.values())[c],\\\n linewidth=5, markersize=sizes[c])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n plt.ylim([25, 80])\n\n c+=1\n \n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(2)\n ax.tick_params(length=5, width=2)\n fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font, \\\n frameon=False)\n \n\n if kk==0 or kk==3:\n plt.ylabel(r'Intensity (m/s)', **csfont, fontsize=35)\n if kk==2 or kk==3 or kk==4:\n plt.xlabel(r\"Time Series (hr)\", fontsize=30, **csfont)\n plt.title(hurricanes[kk], {'size': 30}, **csfont)\n\n \n \nplt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wind_intensity_A.png', dpi=500)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n########################\n# Plot ZNT time series #\n########################\n\n\nfig = plt.figure(figsize=(20,13))\nspec = mpl.gridspec.GridSpec(ncols=23, nrows=9)\n\n\nfor kk in range(len(hurricanes)):\n \n c=0\n rows=[]\n Times=[]\n Times=[]\n values=[]\n with open(dir_znt_eye[kk], mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n Times.append(list(row.keys()))\n line_count += 1\n #print(row)\n rows.append(row)\n values.append(list(row.values()))\n line_count += 1\n print(f'Processed {line_count} lines.')\n \n Times0=Times[0]\n print(Times0)\n print(values[0])\n print(position[kk])\n \n \n ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\\\n position2[kk][2]:position2[kk][3]])\n ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n size=30, **csfont)\n\n\n for i in range(0,line_count-1):\n if i==0:\n #tmp=[float(i)*0.5144444 for i in values[i]]\n tmp=[float(i) for i in values[i]]\n # elif (i!=2 and i!=3):\n else:\n tmp=[float(i) for i in values[i]]\n # else:\n # continue\n \n if hurricanes[kk]=='Katrina':\n plt.plot( Times0[:5], tmp[:5], color = colors[c+1], \\\n linestyle=list(linestyles.values())[c+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([1e-11, 3.0])\n plt.yscale('log')\n elif hurricanes[kk]=='Dorian':\n plt.plot( Times0[:-2], tmp[:-2], color = colors[c+1], \\\n linestyle=list(linestyles.values())[c+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([1e-11, 3.0])\n plt.yscale('log')\n else:\n plt.plot( Times0, tmp, color = colors[c+1], \\\n linestyle=list(linestyles.values())[c+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n plt.ylim([1e-11, 3.0])\n plt.yscale('log')\n\n c+=1\n \n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(2)\n ax.tick_params(length=5, width=2)\n fig.legend(models, bbox_to_anchor=(0.87, 0.42), prop=font, \\\n frameon=False)\n \n\n if kk==0 or kk==3:\n plt.ylabel(r'$Z_0$ (m)', **csfont, fontsize=30)\n if kk==2 or kk==3 or kk==4:\n plt.xlabel(r\"Time Series (hr)\", fontsize=30, **csfont)\n plt.title(hurricanes[kk], {'size': 30}, **csfont)\n\n \n \nplt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_ZNT_eye.png', dpi=500)\nplt.show()\n\n\n\n\n\n\n\n\n########################\n# Plot ZNT time series #\n########################\n\n\nfig = plt.figure(figsize=(20,13))\nspec = mpl.gridspec.GridSpec(ncols=23, nrows=9)\n\n\nfor kk in range(len(hurricanes)):\n \n c=0\n rows=[]\n Times=[]\n Times=[]\n values=[]\n with open(dir_znt_eyewall[kk], mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n Times.append(list(row.keys()))\n line_count += 1\n #print(row)\n rows.append(row)\n values.append(list(row.values()))\n line_count += 1\n print(f'Processed {line_count} lines.')\n \n Times0=Times[0]\n print(Times0)\n print(values[0])\n print(position[kk])\n \n \n ax = fig.add_subplot(spec[position2[kk][0]:position2[kk][1],\\\n position2[kk][2]:position2[kk][3]])\n ax.text(0.05, 0.85, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n size=30, **csfont)\n\n\n for i in range(0,line_count-1):\n if i==0:\n #tmp=[float(i)*0.5144444 for i in values[i]]\n tmp=[float(i) for i in values[i]]\n # elif (i!=2 and i!=3):\n else:\n tmp=[float(i) for i in values[i]]\n # else:\n # continue\n \n if hurricanes[kk]=='Katrina':\n plt.plot( Times0[:5], tmp[:5], color = colors[c+1], \\\n linestyle=list(linestyles.values())[c+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([1e-11, 3.0])\n plt.yscale('log')\n elif hurricanes[kk]=='Dorian':\n plt.plot( Times0[:-2], tmp[:-2], color = colors[c+1], \\\n linestyle=list(linestyles.values())[c+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.ylim([1e-11, 3.0])\n plt.yscale('log')\n else:\n plt.plot( Times0, tmp, color = colors[c+1], \\\n linestyle=list(linestyles.values())[c+1],\\\n linewidth=5, markersize=sizes[c+1])\n plt.xticks(fontsize=25, **csfont)\n plt.yticks(fontsize=25, **csfont)\n plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\n plt.ylim([1e-11, 3.0])\n plt.yscale('log')\n\n c+=1\n \n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(2)\n ax.tick_params(length=5, width=2)\n fig.legend(models, bbox_to_anchor=(0.87, 0.42), prop=font, \\\n frameon=False)\n \n\n if kk==0 or kk==3:\n plt.ylabel(r'$Z_0$ (m)', **csfont, fontsize=30)\n if kk==2 or kk==3 or kk==4:\n plt.xlabel(r\"Time Series (hr)\", fontsize=30, **csfont)\n plt.title(hurricanes[kk], {'size': 30}, **csfont)\n\n \n \nplt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_ZNT_eyewall.png', dpi=500)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n########################\n# Plot hurricane track #\n########################\n\n\n\n\n\n\nfig = plt.figure(figsize=(15,10))\nspec = mpl.gridspec.GridSpec(ncols=6, nrows=2)\n\nfor kk in range(len(hurricanes)):\n \n if hurricanes[kk]=='Katrina':\n cons=6\n elif hurricanes[kk]=='Dorian':\n cons=8\n else:\n cons=10\n \n real1=[]\n oussama1=[]\n wrf1=[]\n simu1=[]\n\n with open( dir_wt[kk], 'r' ) as f :\n data0 = f.read()\n data = json.loads('[' + data0.replace('}{', '},{') + ']')\n for i in range(0,len(data)):\n data2 = list(data[i].values())\n data3 = [e for sl in data2 for e in sl]\n for j in range(len(data3)):\n data3[j].pop(0)\n if i==0:\n real1.append(data3)\n # elif i==1:\n # oussama1.append(data3)\n # elif i==2:\n # wrf1.append(data3)\n else:\n simu1.append(data3)\n real1 = np.array(real1, dtype=np.float32)\n simu1 = np.array(simu1, dtype=np.float32)\n real_r = np.radians(real1)\n simu_r = np.radians(simu1)\n\n\n term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)\n term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \\\n np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \\\n np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)\n simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))\n\n\n\n\n # ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])\n # ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n # size=30)\n\n slp2D = pickle.load( open( dir_p[kk], \"rb\" ) )\n lats, lons = latlon_coords(slp2D)\n \n # Get the cartopy mapping object (use original data, rather than any processed data)\n cart_proj = get_cartopy(slp2D)\n\n # Set the GeoAxes to the projection used by WRF\n #ax = plt.axes(projection=cart_proj)\n ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]], projection=cart_proj)\n # ax.stock_img()\n \n\n # Download and add the states and coastlines\n states = NaturalEarthFeature(category=\"cultural\", scale=\"50m\",\n\t facecolor=\"none\",\n\t name=\"admin_1_states_provinces_shp\")\n ax.add_feature(states, linewidth=.5, edgecolor=\"black\")\n ax.coastlines('50m', linewidth=0.8)\n # Set the map bounds\n # ax.set_xlim(cartopy_xlim(slp2D))\n # ax.set_ylim(cartopy_ylim(slp2D))\n ax.set_extent(lat_log_bound[kk])\n ax.background_img(name='SR', resolution='high')\n\n # Show grid lines.\n gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,\n linewidth=1.5, color='gray', alpha=0.8, linestyle=':')\n gl.xlabel_style = {'size': 15, 'color': 'k','fontname':'Times New Roman'}\n gl.ylabel_style = {'size': 15, 'color': 'k','fontname':'Times New Roman'}\n gl.xlabels_top = False\n gl.ylabels_right = False\n\n c=0\n\n ll=[]\n rr=[]\n for i in range(real1.shape[0]):\n for j in range(real1.shape[1]):\n if j<cons:\n ll.append(real1[i][j][0])\n rr.append(real1[i][j][1])\n ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, \\\n linestyle=list(linestyles.values())[c],\\\n markersize=sizes[c], transform=crs.PlateCarree())\n c+=1\n\n\n ll=[]\n rr=[]\n for i in range(simu1.shape[0]):\n for j in range(simu1.shape[1]):\n if j<cons:\n ll.append(simu1[i][j][0])\n rr.append(simu1[i][j][1])\n ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, \\\n linestyle=list(linestyles.values())[c],\\\n markersize=sizes[c], transform=crs.PlateCarree())\n c+=1\n ll=[]\n rr=[]\n \n \n for axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(15)\n fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font_wt, \\\n frameon=False)\n \n plt.title(hurricanes[kk], {'size': 25}, **csfont)\n # plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\\\n # loc = \"upper right\", prop={'size': 7})\n # plt.xlabel(\"Lon\", fontsize=135)\n # plt.ylabel(\"Lat\", fontsize=135)\n # plt.title(hurricanes[kk], {'size': 35}, **csfont)\n # plt.show()\n\nplt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wt.png', dpi=500)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# fig = plt.figure(figsize=(15,10))\n# spec = mpl.gridspec.GridSpec(ncols=6, nrows=2)\n\n# for kk in range(len(hurricanes)):\n \n# real1=[]\n# oussama1=[]\n# wrf1=[]\n# simu1=[]\n\n# with open( dir_wt[kk], 'r' ) as f :\n# data0 = f.read()\n# data = json.loads('[' + data0.replace('}{', '},{') + ']')\n# for i in range(0,len(data)):\n# data2 = list(data[i].values())\n# data3 = [e for sl in data2 for e in sl]\n# for j in range(len(data3)):\n# data3[j].pop(0)\n# if i==0:\n# real1.append(data3)\n# # elif i==1:\n# # oussama1.append(data3)\n# # elif i==2:\n# # wrf1.append(data3)\n# else:\n# simu1.append(data3)\n# real1 = np.array(real1, dtype=np.float32)\n# simu1 = np.array(simu1, dtype=np.float32)\n# real_r = np.radians(real1)\n# simu_r = np.radians(simu1)\n\n\n# term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)\n# term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \\\n# np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \\\n# np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)\n# simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))\n\n\n# m = Basemap(projection='merc', llcrnrlat=lat_log_bound[kk][2],\\\n# urcrnrlat=lat_log_bound[kk][3], \\\n# llcrnrlon=lat_log_bound[kk][0], \\\n# urcrnrlon=lat_log_bound[kk][1], resolution= 'f' )\n# m.drawstates()\n# m.drawmeridians([-100, -90, -80, -70, -60, -50, -40, ], color='k', textcolor='k', linewidth=1.5,\n# zorder=None, dashes=[6, 1000], labels=[1, 0, 0, 1], labelstyle=None, fmt='%g', xoffset=None, \n# yoffset=None, ax=None, latmax=None, fontsize=12)\n# m.drawparallels([10, 15, 20, 25, 30, 35], color='k', textcolor='k', linewidth=1.5, zorder=None, dashes=[6, 1000], \n# \tlabels=[1, 0, 0, 1], labelstyle=None, fmt='%g', xoffset=None, yoffset=None, ax=None, latmax=None, fontsize=12)\n# m.drawmapscale(-101, 8, -96, 8, 1000, barstyle='fancy', units='km', fontsize=8)\n# m.drawcoastlines(linewidth=0.7, linestyle='solid', color='grey')\n# m.drawcountries()\n# m.shadedrelief()\n# m.drawmapboundary()\n\n\n# # ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]])\n# # ax.text(0.05, 0.9, '('+string.ascii_lowercase[kk]+')', transform=ax.transAxes, \n# # size=30)\n\n# slp2D = pickle.load( open( dir_p[kk], \"rb\" ) )\n# lats, lons = latlon_coords(slp2D)\n \n# # Get the cartopy mapping object (use original data, rather than any processed data)\n# cart_proj = get_cartopy(slp2D)\n\n# # Set the GeoAxes to the projection used by WRF\n# #ax = plt.axes(projection=cart_proj)\n# ax = fig.add_subplot(spec[position[kk][0],position[kk][1]:position[kk][2]], projection=cart_proj)\n# ax.stock_img()\n \n\n# # Download and add the states and coastlines\n# states = NaturalEarthFeature(category=\"cultural\", scale=\"50m\",\n# \t facecolor=\"none\",\n# \t name=\"admin_1_states_provinces_shp\")\n# ax.add_feature(states, linewidth=.5, edgecolor=\"black\")\n# ax.coastlines('50m', linewidth=0.8)\n# # Set the map bounds\n# # ax.set_xlim(cartopy_xlim(slp2D))\n# # ax.set_ylim(cartopy_ylim(slp2D))\n# ax.set_extent(lat_log_bound[kk])\n\n\n# # Show grid lines.\n# gl = ax.gridlines(crs=crs.PlateCarree(), draw_labels=True,\n# linewidth=1.5, color='gray', alpha=0.8, linestyle=':')\n# gl.xlabel_style = {'size': 15, 'color': 'k'}\n# gl.ylabel_style = {'size': 15, 'color': 'k'}\n# gl.xlabels_top = False\n# gl.ylabels_right = False\n\n# c=0\n\n# ll=[]\n# rr=[]\n# for i in range(real1.shape[0]):\n# for j in range(real1.shape[1]):\n# if j<6:\n# ll.append(real1[i][j][0])\n# rr.append(real1[i][j][1])\n# ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, linestyle=patterns[c],\\\n# markersize=sizes[c], transform=crs.PlateCarree())\n# c+=1\n\n\n# ll=[]\n# rr=[]\n# for i in range(simu1.shape[0]):\n# for j in range(simu1.shape[1]):\n# if j<6:\n# ll.append(simu1[i][j][0])\n# rr.append(simu1[i][j][1])\n# ax.plot( rr, ll, color = colors[c], marker=markers[c],linewidth=2, linestyle=patterns[c],\\\n# markersize=sizes[c], transform=crs.PlateCarree())\n# c+=1\n# ll=[]\n# rr=[]\n \n \n# for axis in ['top','bottom','left','right']:\n# ax.spines[axis].set_linewidth(15)\n# fig.legend(options, bbox_to_anchor=(0.87, 0.42), prop=font_wt, \\\n# frameon=False)\n \n# plt.title(hurricanes[kk], {'size': 25}, **csfont)\n# # plt.legend(['Real track','C0.0001', 'C0.01', 'C1', 'C100'],\\\n# # loc = \"upper right\", prop={'size': 7})\n# # plt.xlabel(\"Lon\", fontsize=135)\n# # plt.ylabel(\"Lat\", fontsize=135)\n# # plt.title(hurricanes[kk], {'size': 35}, **csfont)\n# # plt.show()\n\n# plt.savefig('C:/Users/limgr/Desktop/'+hurricanes[kk]+'_wt.png', dpi=500)\n# plt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# ###################\n# # Plot error bars #\n# ###################\n\n\n\n# simu_error = []\n\n# for kk in range(len(hurricanes)):\n\n# rows1=[]\n# Times1=[]\n# Times1=[]\n# values1=[]\n# real1_track=[]\n\n\n# with open(dir_wi[kk], mode='r') as csv_file:\n# csv_reader = csv.DictReader(csv_file)\n# line_count = 0\n# sim_count = 0\n# for row in csv_reader:\n# if line_count == 0:\n# print(f'Column names are {\", \".join(row)}')\n# Times1.append(list(row.keys()))\n# real1_track.append(list(row.values()))\n# line_count += 1\n# else:\n# rows1.append(row)\n# values1.append(list(row.values()))\n# line_count += 1\n# print('There is totally ',(line_count-1)*(len(row)),' data points')\n# simu1=np.array(values1, dtype=np.float32)\n# real1=np.array(real1_track, dtype=np.float32)\n# real1=real1*0.5144444\n# real1=real1\n# simu_error1=abs(simu1-real1[:,None])/real1[:,None]#/((line_count-3)*(len(row)))\n# print('absolute pressure error')\n# print(abs(simu1-real1[:,None]))\n \n# simu_error.append(simu_error1)\n\n\n\n\n\n\n# par1_error_wi=np.zeros((4, 9))\n# par2_error_wi=np.zeros((4, 9))\n# par3_erro_wir=np.zeros((4, 9))\n# par4_error_wi=np.zeros((4, 9))\n\n\n\n# simu_error1 = simu_error[0]\n# simu_error2 = simu_error[1]\n# simu_error3 = simu_error[2]\n# simu_error4 = simu_error[3]\n# simu_error5 = simu_error[4]\n\n\n# par1_error_wi=np.concatenate((simu_error1[0][0][0:5],simu_error2[0][0][:],\\\n# simu_error3[0][0][:],simu_error4[0][0][:-2],simu_error5[0][0][:]))\n# par1_error_wi=par1_error_wi.flatten()\n# par1_error_wi_mean=np.mean(par1_error_wi)\n# par1_error_wi_std=np.std(par1_error_wi)\n# par1_error_wi_low=np.percentile(par1_error_wi, 20)\n# par1_error_wi_hgh=np.percentile(par1_error_wi, 80)\n\n\n# par2_error_wi=np.concatenate((simu_error1[0][1][0:5],simu_error2[0][1][:],\\\n# simu_error3[0][1][:],simu_error4[0][1][:-2],simu_error5[0][1][:]))\n# par2_error_wi=par2_error_wi.flatten()\n# par2_error_wi_mean=np.mean(par2_error_wi)\n# par2_error_wi_std=np.std(par2_error_wi)\n# par2_error_wi_low=np.percentile(par2_error_wi, 20)\n# par2_error_wi_hgh=np.percentile(par2_error_wi, 80)\n\n\n# par3_error_wi=np.concatenate((simu_error1[0][2][0:5],simu_error2[0][2][:],\\\n# simu_error3[0][2][:],simu_error4[0][2][:-2],simu_error5[0][2][:]))\n# par3_error_wi=par3_error_wi.flatten()\n# par3_error_wi_mean=np.mean(par3_error_wi)\n# par3_error_wi_std=np.std(par3_error_wi)\n# par3_error_wi_low=np.percentile(par3_error_wi, 20)\n# par3_error_wi_hgh=np.percentile(par3_error_wi, 80)\n\n\n\n# par4_error_wi=np.concatenate((simu_error1[0][3][0:5],simu_error2[0][3][:],\\\n# simu_error3[0][3][:],simu_error4[0][3][:-2],simu_error5[0][3][:]))\n# par4_error_wi=par4_error_wi.flatten()\n# par4_error_wi_mean=np.mean(par4_error_wi)\n# par4_error_wi_std=np.std(par4_error_wi)\n# par4_error_wi_low=np.percentile(par4_error_wi, 20)\n# par4_error_wi_hgh=np.percentile(par4_error_wi, 80)\n\n\n\n\n# simu_error = []\n\n# for kk in range(len(hurricanes)):\n \n# real1=[]\n# oussama1=[]\n# wrf1=[]\n# simu1=[]\n\n# with open( dir_wt[kk], 'r' ) as f :\n# data0 = f.read()\n# data = json.loads('[' + data0.replace('}{', '},{') + ']')\n# for i in range(0,len(data)):\n# data2 = list(data[i].values())\n# data3 = [e for sl in data2 for e in sl]\n# for j in range(len(data3)):\n# data3[j].pop(0)\n# if i==0:\n# real1.append(data3)\n# # elif i==1:\n# # oussama1.append(data3)\n# # elif i==2:\n# # wrf1.append(data3)\n# else:\n# simu1.append(data3)\n# real1 = np.array(real1, dtype=np.float32)\n# simu1 = np.array(simu1, dtype=np.float32)\n# real_r = np.radians(real1)\n# simu_r = np.radians(simu1)\n\n\n# term1=np.apply_along_axis(Calculate_Distance_Haversine1, 2, simu_r-real_r)\n# term2=np.apply_along_axis(Calculate_Distance_Haversine2, 2, simu_r)* \\\n# np.apply_along_axis(Calculate_Distance_Haversine2, 2, real_r)* \\\n# np.apply_along_axis(Calculate_Distance_Haversine3, 2, simu_r-real_r)\n# simu_error1=2*R*np.arcsin(np.sqrt(term1+term2))\n# simu_error.append(simu_error1)\n\n\n# par1_error=np.zeros((4, 9))\n# par2_error=np.zeros((4, 9))\n# par3_error=np.zeros((4, 9))\n# par4_error=np.zeros((4, 9))\n\n\n# simu_error1 = simu_error[0]\n# simu_error2 = simu_error[1]\n# simu_error3 = simu_error[2]\n# simu_error4 = simu_error[3]\n# simu_error5 = simu_error[4]\n\n# par1_error_wt=np.concatenate((simu_error1[0][0:5],\\\n# simu_error2[0][:],simu_error3[0][:],\\\n# simu_error4[0][:-2],simu_error5[0][:]))\n# par1_error_wt=par1_error_wt.flatten()\n# par1_error_wt_mean=np.mean(par1_error_wt)\n# par1_error_wt_std=np.std(par1_error_wt)\n# par1_error_wt_low=np.percentile(par1_error_wt, 20)\n# par1_error_wt_hgh=np.percentile(par1_error_wt, 80)\n\n\n# par2_error_wt=np.concatenate((simu_error1[1][0:5],\\\n# simu_error2[1][:],simu_error3[1][:],\\\n# simu_error4[1][:-2],simu_error5[1][:]))\n# par2_error_wt=par2_error_wt.flatten()\n# par2_error_wt_mean=np.mean(par2_error_wt)\n# par2_error_wt_std=np.std(par2_error_wt)\n# par2_error_wt_low=np.percentile(par2_error_wt, 20)\n# par2_error_wt_hgh=np.percentile(par2_error_wt, 80)\n\n# par3_error_wt=np.concatenate((simu_error1[2][0:5],\\\n# simu_error2[2][:],simu_error3[2][:],\\\n# simu_error4[2][:-2],simu_error5[2][:]))\n# par3_error_wt=par3_error_wt.flatten()\n# par3_error_wt_mean=np.mean(par3_error_wt)\n# par3_error_wt_std=np.std(par3_error_wt)\n# par3_error_wt_low=np.percentile(par2_error_wt, 20)\n# par3_error_wt_hgh=np.percentile(par2_error_wt, 80)\n\n# par4_error_wt=np.concatenate((simu_error1[3][0:5],\\\n# simu_error2[3][:],simu_error3[3][:],\\\n# simu_error4[3][:-2],simu_error5[3][:]))\n# par4_error_wt=par4_error_wt.flatten()\n# par4_error_wt_mean=np.mean(par4_error_wt)\n# par4_error_wt_std=np.std(par4_error_wt)\n# par4_error_wt_low=np.percentile(par4_error_wt, 20)\n# par4_error_wt_hgh=np.percentile(par4_error_wt, 80)\n# print(par4_error_wt_low, par4_error_wt_mean, par4_error_wt_hgh)\n\n\n\n# x_pos = np.arange(len(models))\n\n# CTEs_wi = [par1_error_wi_mean,\\\n# par2_error_wi_mean,par3_error_wi_mean,par4_error_wi_mean]\n# errors_wi = [par1_error_wi_std,\\\n# par2_error_wi_std,par3_error_wi_std,par4_error_wi_std]\n# percentile_10_wi = np.array([par1_error_wi_mean-par1_error_wi_low,\\\n# par2_error_wi_mean-par2_error_wi_low,par3_error_wi_mean-par3_error_wi_low, \\\n# par4_error_wi_mean-par4_error_wi_low])\n# percentile_90_wi = np.array([par1_error_wi_hgh-par1_error_wi_mean,\\\n# par2_error_wi_hgh-par2_error_wi_mean,par3_error_wi_hgh-par3_error_wi_mean, \\\n# par4_error_wi_hgh-par4_error_wi_mean])\n# err_wi = np.vstack((percentile_10_wi, percentile_90_wi))\n\n# CTEs_wt = [par1_error_wt_mean,\\\n# par2_error_wt_mean,par3_error_wt_mean,par4_error_wt_mean]\n# errors_wt = [par1_error_wt_std,\\\n# par2_error_wt_std,par3_error_wt_std,par4_error_wt_std]\n# percentile_10_wt = np.array([par1_error_wt_mean-par1_error_wt_low,\\\n# par2_error_wt_mean-par2_error_wt_low,par3_error_wt_mean-par3_error_wt_low, \\\n# par4_error_wt_mean-par4_error_wt_low])\n# percentile_90_wt = np.array([par1_error_wt_hgh-par1_error_wt_mean,\\\n# par2_error_wt_hgh-par2_error_wt_mean,par3_error_wt_hgh-par3_error_wt_mean, \\\n# par4_error_wt_hgh-par4_error_wt_mean])\n# print(percentile_90_wt)\n# err_wt = np.vstack((percentile_10_wt, percentile_90_wt))\n\n\n\n# # fig, ax = plt.subplots(1, 2, figsize=(40, 8), sharex=True)\n# fig = plt.figure(figsize=(8,5))\n# spec = mpl.gridspec.GridSpec(ncols=8, nrows=5)\n\n\n# ax = fig.add_subplot(spec[1:,0:4])\n# ax.text(0.05, 0.9, '('+string.ascii_lowercase[0]+')', transform=ax.transAxes, \n# size=15, **csfont)\n# bars = ax.bar(x_pos, CTEs_wi, yerr=err_wi, align='center', \\\n# color=['blue','cyan','gray', 'red'], alpha=0.8,\\\n# ecolor='k', capsize=10, edgecolor='k', linewidth=3) \n# for i in range(len(x_pos)):\n# bars[i].set(linestyle=list(linestyles.values())[0]) \n# ax.set_ylabel(r'Normalized Intensity', **csfont, fontsize=15)\n# vals = ax.get_yticks()\n# ax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])\n# ax.set_xticks(x_pos)\n# ax.set_xticklabels(models, **csfont, fontsize=10)\n# #ax.set_title(r'COAWST', **csfont, fontsize=20)\n# ax.yaxis.grid(True)\n\n\n\n# ax = fig.add_subplot(spec[1:,4:])\n# ax.text(0.05, 0.9, '('+string.ascii_lowercase[1]+')', transform=ax.transAxes, \n# size=15, **csfont) \n# bars = ax.bar(x_pos, CTEs_wt, yerr=err_wt, align='center', \\\n# color=['blue','cyan','gray', 'red'], alpha=0.8,\\\n# ecolor='k', capsize=10, edgecolor='k', linewidth=3) \n# for i in range(len(x_pos)):\n# bars[i].set(linestyle=list(linestyles.values())[0])\n# ax.set_ylabel(r'Track Error (km)', **csfont, fontsize=15)\n# vals = ax.get_yticks()\n# ax.set_yticklabels(['{}'.format(x) for x in vals])\n# ax.set_xticks(x_pos)\n# ax.set_xticklabels(models, **csfont, fontsize=10)\n# #ax.set_title(r'COAWST', **csfont, fontsize=20)\n# ax.yaxis.grid(True)\n\n\n\n# ax = fig.add_subplot(spec[0,0:])\n# handles = [plt.Rectangle((0,0),1,1, facecolor=colors[i+1], \\\n# linestyle=list(linestyles.values())[0], edgecolor = 'k', linewidth=1.5\\\n# ) for i in range(len(models))]\n# plt.legend(handles, models, ncol=4, bbox_to_anchor=(0.9, 0.8), prop=fontbar, \\\n# frameon=False)\n# ax.axes.xaxis.set_visible(False)\n# ax.axes.yaxis.set_visible(False)\n# ax.set_yticks([])\n# ax.set_yticklabels([])\n# ax.set_xticks([])\n# ax.set_xticklabels([])\n# for axis in ['top','bottom','left','right']:\n# ax.spines[axis].set_visible(False)\n\n\n\n# # for i, v in enumerate(CTEs):\n# # ax.text(i, v+0.02, str(round(v, 3)), color='red', fontweight='bold')\n\n# # Save the figure and show\n# fig.autofmt_xdate()\n# plt.tight_layout()\n# #plt.savefig('wind_intensity_bar_plot.png')\n# plt.savefig('C:/Users/limgr/Desktop/wi_wt_bar_plots.png', dpi=500)\n# plt.show()\n\n\n", "import os\nimport math\nimport numpy as np\nimport matplotlib as matplot\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset\nimport csv\nfrom wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,\n cartopy_ylim, latlon_coords)\n\n\n\n# List the colors that will be used for tracing the track.\ncolors = ['blue', 'orange', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan', 'black', 'green', 'gold', 'lightcoral', 'turquoise']\nc =0\n\n\nmainpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/'\nHurricaneall = ['Gert','Nicole','Joaquin','Cristobal','Ike']\nReal_Hurricane_Data = ['Gert_Real_Track_Time_NOAA.csv',\n 'Nicole_Real_Track_Time_NOAA.csv',\n 'Joaquin_Real_Track_Time_NOAA.csv',\n 'Cristobal_Real_Track_Time_NOAA.csv',\n 'Ike_Real_Track_Time_NOAA.csv']\ngridsize = ['2km']\nDirall = ['_isftcflx_1_changeClz_0p0100',\n '_isftcflx_1_changeClz_1p0000',\n '_isftcflx_1_changeClz_100p0000']\noutputpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/postprocessing_WRFONLY/0_Paper_figures/section3_change_pars_for_weak_winds/source_code_outputs_change_Clz_isftcflx_1/'\n\n\n# This function returns a list of all wrf files in the directory.\ndef list_files(Dir, ncfiles):\n \tfor f in os.listdir(Dir):\n \t \tif f.startswith('wrfout'):\n \t \t \tncfiles.append(f)\n \treturn (ncfiles)\n \n\n\n\nfor grids in gridsize:\n count1=0\n for Hurricane in Hurricaneall:\n rows=[]\n \n \n for Dir in Dirall:\n\n print('Current folder is: ')\n Dir_local = mainpath+Hurricane+ '/' +grids+ '/' +'WRFONLY_NoTurb_'+grids+Dir\n print(Dir_local)\n #row.append(Hurricane+Dir)\n \n # Set the working space>\n os.chdir(Dir_local)\n # initiate the list that will contain all wrf files in Dir directory.\n ncfiles = []\n # Use the list_files function to list all the wrf files in the directory.\n ncfiles = list_files(Dir_local, ncfiles)\n ncfiles = sorted(ncfiles)\n print (ncfiles)\n # initiate the list that will contain the hurricane-track data.\n row = []\n # Identify the time step\n Time_Step = 6\n k = 0\n # initiate the list that will contain the times.\n Times = []\n for tt in range(1):\n for ncfile in ncfiles:\n ncfile = Dataset(ncfile)\n ttt = np.array(getvar(ncfile, \"times\", tt))\n print('!!!!!!',ttt)\n ZNT_2D = np.array(getvar(ncfile, \"ZNT\", tt))\n U10_2D = np.array(getvar(ncfile, \"U10\", tt))\n V10_2D = np.array(getvar(ncfile, \"V10\", tt))\n UV10_2D = np.square(U10_2D)+np.square(V10_2D)\n idx = np.where(UV10_2D == np.amax(UV10_2D))\n\n # List the maximum wind intensity for all time steps.\t\n print(idx)\n row.append(float(ZNT_2D[(np.amin(idx[0]),np.amin(idx[1]))]))\n # list all the time steps\n Times.append(Time_Step*k)\n k = k+1 \n\n print (row)\n print (Times)\n rows.append(row)\n fields = [time for time in Times]\n print (fields)\n print (rows)\n with open(outputpath+Hurricane+'_ZNT_eyewall_'+grids+'.csv', 'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(fields) \n csvwriter.writerows(rows)\n \n\n \n count1=count1+1\n\n\n\n \n \n\n ", "import os\nimport math\nimport numpy as np\nimport matplotlib as matplot\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset\nimport csv\nfrom wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,\n cartopy_ylim, latlon_coords)\n\n\n\n# List the colors that will be used for tracing the track.\ncolors = ['blue', 'orange', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan', 'black', 'green', 'gold', 'lightcoral', 'turquoise']\nc =0\n\n\n\n\n\n\n\n\nmainpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/'\nHurricaneall = ['Dorian','Maria','Irma','Katrina','Lorenzo']\nReal_Hurricane_Data = ['Dorian_Real_Track_Time_NOAA.csv',\n 'Maria_Real_Track_Time_NOAA.csv',\n 'Irma_Real_Track_Time_NOAA.csv',\n 'Katrina_Real_Track_Time_NOAA.csv',\n 'Lorenzo_Real_Track_Time_NOAA.csv']\n# Hurricaneall = ['Dorian']\n# Real_Hurricane_Data = ['Dorian_Real_Track_Time_NOAA.csv']\ngridsize = ['8km','16km']\nswansize = ['swgr8p0', 'swgr16p0']\nprefix = 'WRFSWAN_NoTurb_swdt10_cpdt7200_'\nDirall = ['_swh8_swt14_A0p12B4p5C0P11',\n '_swh8_swt14_A12B4p5C0P11',\n '_swh8_swt14_A1200B4p5C0P11',\n '_swh8_swt14_A120000B4p5C0P11']\noutputpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/postprocessing_WRFONLY/0_Paper_figures/section2_change_pars_for_strong_winds/source_code_outputs_change_A/'\n\n\n# This function returns a list of all wrf files in the directory.\ndef list_files(Dir, ncfiles):\n \tfor f in os.listdir(Dir):\n \t \tif f.startswith('wrfout'):\n \t \t \tncfiles.append(f)\n \treturn (ncfiles)\n\n\n\nfor gk in range(len(gridsize)):\n count1=0\n\n for Hurricane in Hurricaneall:\n rows=[]\n\n #Initiate the lists that will contain the real data variables\n Real_Times = []\n Real_Wnd_Ints = []\n Real_Long =[]\n #Open the file that contains the real data and extract the necessary variables\n print('Real track: '+outputpath+Real_Hurricane_Data[count1])\n with open(outputpath+Real_Hurricane_Data[count1]) as f:\n \t reader = csv.reader(f)\n \t next (reader)\n \t row_header = next(reader)\n \t #print (row_header)\n \t for row in reader:\n \t\t YYYY = (row[row_header.index('Time - year')])\n \t\t MM = (row[row_header.index('Time - month')])\n \t\t if (len(MM) == 1):\n \t\t\t MM = '0' + MM\n \t\t DD = (row[row_header.index('Time - day')])\n \t\t if (len(DD) == 1):\n \t\t\t DD = '0' + DD\n \t\t HR = (row[row_header.index('Time - hour')])\n \t\t if (len(HR) == 1):\n \t\t\t HR = '0' + HR\n \t\t MN = (row[row_header.index('Time - min')])\n \t\t if (len(MN) == 1):\n \t\t\t MN = '0' + MN\n \t\t Time = YYYY + '-' + MM + '-' + DD + '_' + HR + '_' + MN\n \t\t Real_Wnd_Ints.append(float(row[row_header.index('Wind Speed(kt)')]))\n \t\t Real_Times.append(Time)\n\t\t\n print (Real_Wnd_Ints) \n print (Real_Times) \n rows.append(Real_Wnd_Ints)\n count1=count1+1 \n \n for Dir in Dirall:\n\n print('Current folder is: ')\n Dir_local = mainpath+Hurricane+ '/' +gridsize[gk]+ '/' +prefix+swansize[gk]+Dir\n print(Dir_local)\n #row.append(Hurricane+Dir)\n \n # Set the working space>\n os.chdir(Dir_local)\n # initiate the list that will contain all wrf files in Dir directory.\n ncfiles = []\n # Use the list_files function to list all the wrf files in the directory.\n ncfiles = list_files(Dir_local, ncfiles)\n ncfiles = sorted(ncfiles)\n print (ncfiles)\n # initiate the list that will contain the hurricane-track data.\n row = []\n # Identify the time step\n Time_Step = 6\n k = 0\n # initiate the list that will contain the times.\n Times = []\n for tt in range(1):\n for ncfile in ncfiles:\n ncfile = Dataset(ncfile)\n ttt = np.array(getvar(ncfile, \"times\", tt))\n print('!!!!!!',ttt)\n # Get U and V components of wind intensity at 10m of altitude.\n U10_2D = np.array(getvar(ncfile, \"U10\", tt))\n #print (U10_2D.shape)\n V10_2D = np.array(getvar(ncfile, \"V10\", tt))\n #print (V10_2D.shape)\n slp_2D = np.array(getvar(ncfile, \"slp\", tt))\n slp_2D = slp_2D.flatten()\n # Reshape the U and V into a 1D array.\n U10_1D = U10_2D.flatten()\n #print (U10_1D.shape)\n V10_1D = V10_2D.flatten()\n #print (V10_1D.shape)\n WND_SPD_10 = U10_1D\n # Calculate the wind intensity at each point of the map.\n for i in range (WND_SPD_10.size - 1):\n WND_SPD_10[i] = math.sqrt((U10_1D[i]**2)+(V10_1D[i]**2))\n # Search for the maximum wind intensity at aspecific time step.\t\n WND_SPD_10_max = np.amax(WND_SPD_10)\n slp_min = np.amin(slp_2D)\t\n # List the maximum wind intensity for all time steps.\t\n row.append(WND_SPD_10_max)\n # list all the time steps\n Times.append(Time_Step*k)\n k = k+1 \n \n \n\n\n print (row)\n print (Times)\n rows.append(row)\n fields = [time for time in Times]\n print (fields)\n print (rows)\n with open(outputpath+Hurricane+'_wind_intensity_'+gridsize[gk]+'.csv', 'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(fields) \n csvwriter.writerows(rows)\n \n\n \n\n\n\n\n\n\n\n\n \n" ]
[ [ "numpy.amin", "numpy.array" ], [ "matplotlib.pyplot.legend", "numpy.radians", "numpy.sqrt", "numpy.concatenate", "numpy.mean", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "matplotlib.ticker.StrMethodFormatter", "numpy.sin", "numpy.std", "numpy.apply_along_axis", "matplotlib.gridspec.GridSpec", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.yscale", "numpy.cos", "numpy.percentile", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "numpy.vstack" ], [ "numpy.radians", "numpy.sqrt", "matplotlib.pyplot.gca", "matplotlib.ticker.StrMethodFormatter", "numpy.sin", "numpy.apply_along_axis", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.yscale", "numpy.cos", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks" ], [ "numpy.square", "numpy.amax", "numpy.amin" ], [ "numpy.amin", "numpy.amax" ] ]
ZurMaD/DeblurGANv2
[ "bf8ab7d178ecf32db7eba588ede3f3f121d17470" ]
[ "predict.py" ]
[ "import os\nfrom glob import glob\nfrom typing import Optional\n\nimport cv2\nimport numpy as np\nimport torch\nimport yaml\nfrom fire import Fire\nfrom tqdm import tqdm\n\nfrom aug import get_normalize\nfrom models.networks import get_generator\n\n\nclass Predictor:\n def __init__(self, weights_path: str, model_name: str = ''):\n with open('/content/DeblurGANv2/config/config.yaml') as cfg:\n config = yaml.load(cfg)\n model = get_generator(model_name or config['model'])\n v1=torch.load(weights_path)\n print(v1)\n v2=torch.load(weights_path)['model']\n print(v2)\n model.load_state_dict(torch.load(weights_path)['model'])\n self.model = model.cuda()\n self.model.train(True)\n # GAN inference should be in train mode to use actual stats in norm layers,\n # it's not a bug\n self.normalize_fn = get_normalize()\n\n @staticmethod\n def _array_to_batch(x):\n x = np.transpose(x, (2, 0, 1))\n x = np.expand_dims(x, 0)\n return torch.from_numpy(x)\n\n def _preprocess(self, x: np.ndarray, mask: Optional[np.ndarray]):\n x, _ = self.normalize_fn(x, x)\n if mask is None:\n mask = np.ones_like(x, dtype=np.float32)\n else:\n mask = np.round(mask.astype('float32') / 255)\n\n h, w, _ = x.shape\n block_size = 32\n min_height = (h // block_size + 1) * block_size\n min_width = (w // block_size + 1) * block_size\n\n pad_params = {'mode': 'constant',\n 'constant_values': 0,\n 'pad_width': ((0, min_height - h), (0, min_width - w), (0, 0))\n }\n x = np.pad(x, **pad_params)\n mask = np.pad(mask, **pad_params)\n\n return map(self._array_to_batch, (x, mask)), h, w\n\n @staticmethod\n def _postprocess(x: torch.Tensor) -> np.ndarray:\n x, = x\n x = x.detach().cpu().float().numpy()\n x = (np.transpose(x, (1, 2, 0)) + 1) / 2.0 * 255.0\n return x.astype('uint8')\n\n def __call__(self, img: np.ndarray, mask: Optional[np.ndarray], ignore_mask=True) -> np.ndarray:\n (img, mask), h, w = self._preprocess(img, mask)\n with torch.no_grad():\n inputs = [img.cuda()]\n if not ignore_mask:\n inputs += [mask]\n pred = self.model(*inputs)\n return self._postprocess(pred)[:h, :w, :]\n\ndef process_video(pairs, predictor, output_dir):\n for video_filepath, mask in tqdm(pairs):\n video_filename = os.path.basename(video_filepath)\n output_filepath = os.path.join(output_dir, os.path.splitext(video_filename)[0]+'_deblur.mp4')\n video_in = cv2.VideoCapture(video_filepath)\n fps = video_in.get(cv2.CAP_PROP_FPS)\n width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))\n total_frame_num = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT))\n video_out = cv2.VideoWriter(output_filepath, cv2.VideoWriter_fourcc(*'MP4V'), fps, (width, height))\n tqdm.write(f'process {video_filepath} to {output_filepath}, {fps}fps, resolution: {width}x{height}')\n for frame_num in tqdm(range(total_frame_num), desc=video_filename):\n res, img = video_in.read()\n if not res:\n break\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n pred = predictor(img, mask)\n pred = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)\n video_out.write(pred)\n\ndef main(img_pattern: str,\n mask_pattern: Optional[str] = None,\n weights_path='/content/best_fpn.h5',\n out_dir='/content/submit/',\n side_by_side: bool = False,\n video: bool = False):\n def sorted_glob(pattern):\n return sorted(glob(pattern))\n\n imgs = sorted_glob(img_pattern)\n masks = sorted_glob(mask_pattern) if mask_pattern is not None else [None for _ in imgs]\n pairs = zip(imgs, masks)\n names = sorted([os.path.basename(x) for x in glob(img_pattern)])\n print(weights_path)\n predictor = Predictor(weights_path=weights_path)\n\n os.makedirs(out_dir, exist_ok=True)\n if not video:\n for name, pair in tqdm(zip(names, pairs), total=len(names)):\n f_img, f_mask = pair\n img, mask = map(cv2.imread, (f_img, f_mask))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n pred = predictor(img, mask)\n if side_by_side:\n pred = np.hstack((img, pred))\n pred = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)\n cv2.imwrite(os.path.join(out_dir, name),\n pred)\n else:\n process_video(pairs, predictor, out_dir)\n\n\nif __name__ == '__main__':\n Fire(main)\n" ]
[ [ "numpy.hstack", "numpy.expand_dims", "numpy.ones_like", "numpy.pad", "torch.load", "torch.from_numpy", "torch.no_grad", "numpy.transpose" ] ]
NCcoco/kaggle-project
[ "bff565bcfa8395c87920068557678566631b8d99" ]
[ "Bird-Species/transformer/vision-transformer3.py" ]
[ "import tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow.keras as keras\nimport tensorflow.keras.layers as layers\n\nfrom PIL import Image\nfrom io import BytesIO\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport requests\nimport os\nimport platform\nimport pathlib\nimport random\nimport math\n\n\nbase_path = os.path.abspath(\".\")\ndir_separator = \"/\"\nif platform.system().lower() == 'windows':\n dir_separator = \"\\\\\"\n base_path = base_path[:(base_path.index('Bird-Species'))]\n\n\n# 超参数设置\nnum_classes = 325\nimage_size = 224\npatch_size = 32\nepochs = 30\nbatch_size = 128\nlearning_rate = keras.optimizers.schedules.InverseTimeDecay(\n initial_learning_rate=0.02,\n decay_steps=100,\n decay_rate=0.7\n)\nlearning_rate = 0.002\n\n\n# 准备数据集\ndef load_dataset(batch_size=128):\n train_path = ['Bird-Species', 'datasets', 'train']\n # 获取所有图片地址\n train_dir = base_path + dir_separator.join(train_path)\n # 下面的方式获得一个Path类型的训练图片根路径\n train_root = pathlib.Path(train_dir)\n # # Path类型提供一个glob方法将保存的根路径下所有的文件地址分割为list\n # all_image_paths = list(train_root.glob(\"*/*\"))\n # all_image_paths = [str(path) for path in all_image_paths]\n #\n # random.shuffle(all_image_paths)\n\n train_ds = keras.utils.image_dataset_from_directory(\n train_root,\n image_size=(image_size, image_size),\n batch_size=batch_size\n )\n return train_ds\n\n\n# 加载验证集\ndef load_valid_dataset():\n valid_dir = ['Bird-Species', 'datasets', 'valid']\n valid_dir = base_path + dir_separator.join(valid_dir)\n return __load_dataset(valid_dir)\n\n\ndef __load_dataset(dir, batch_size=64, image_size=(224, 224)):\n data_root = pathlib.Path(dir)\n # 获取所有的图片路径\n all_image_paths = list(data_root.glob('*/*'))\n all_image_paths = [str(path) for path in all_image_paths]\n # 打乱路径list\n random.shuffle(all_image_paths)\n image_count = len(all_image_paths)\n # print(all_image_paths[:10])\n\n # c = np.array(imageio.imread(all_image_paths[0]))\n # plt.imshow(c)\n # plt.show()\n\n train_ds = tf.keras.utils.image_dataset_from_directory(\n data_root,\n image_size=image_size,\n batch_size=batch_size)\n # print(train_ds)\n class_names = train_ds.class_names\n # print(class_names)\n # plt.figure(figsize=(10, 10))\n # for images, labels in train_ds.take(1):\n # for i in range(9):\n # ax = plt.subplot(3, 3, i + 1)\n # plt.imshow(images[i].numpy().astype(\"uint8\"))\n # plt.title(class_names[labels[i]])\n # plt.axis(\"off\")\n # plt.show()\n\n normalization_layer = tf.keras.layers.Rescaling(1. / 255)\n normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))\n\n # train_ds = normalized_ds.cache().prefetch(buffer_size=AUTOTUNE)\n return normalized_ds\n\n\ndef norm_img(image, label):\n image = tf.image.resize(image, size=(224, 224))\n return tf.cast(image, tf.float32) / 255., label\n\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\ntrain_dataset = load_dataset(batch_size)\ntrain_dataset = train_dataset.map(norm_img, num_parallel_calls=AUTOTUNE)\ntrain_dataset = train_dataset.cache()\ntrain_dataset = train_dataset.prefetch(AUTOTUNE)\n\nvalid_dataset = load_valid_dataset()\n\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy()\n\n\n\n\nmodel = tf.keras.Sequential([\n # layers.InputLayer((image_size, image_size, 3)),\n hub.KerasLayer(r\"models\", trainable=False),\n keras.layers.Dense(num_classes, activation=\"softmax\")\n])\n\nmodel.build(input_shape=(None, 224, 224, 3))\nprint(model.summary())\n# model.compile(optimizer='adam',\n# loss=keras.losses.SparseCategoricalCrossentropy(),\n# metrics=['accuracy'])\n\n# model.fit(ds_train, batch_size, epochs)\n\n\ntrain_loss = tf.keras.metrics.Mean(name='train_loss')\ntrain_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\noptimizer = keras.optimizers.Adam(learning_rate=learning_rate)\n\nvalid_loss = tf.keras.metrics.Mean(name='valid_loss')\nvalid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='valid_accuracy')\n\n\n# tf.config.experimental_run_functions_eagerly(True)\[email protected]\ndef train_step(images, labels, optimizer):\n with tf.GradientTape() as tape:\n predictions = model(images, training=True)\n loss_aux = loss_object(y_true=labels, y_pred=predictions)\n loss = 0.5 * loss_aux + 0.5 * loss_object(y_true=labels, y_pred=predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))\n\n train_loss(loss)\n train_accuracy(labels, predictions)\n\n\[email protected]\ndef valid_step(images, labels):\n predictions = model(images, training=False)\n v_loss = loss_object(labels, predictions)\n\n valid_loss(v_loss)\n valid_accuracy(labels, predictions)\n\n\n# start training\nfor epoch in range(epochs):\n train_loss.reset_states()\n train_accuracy.reset_states()\n valid_loss.reset_states()\n valid_accuracy.reset_states()\n step = 0\n for images, labels in train_dataset:\n step += 1\n\n train_step(images, labels, optimizer)\n print(f\"Epoch: {epoch + 1}/{epochs}, \"\n f\"step: {step}/{math.ceil(47332 / batch_size)},\"\n f\"learning_rate: {optimizer.lr.numpy():.7f}\"\n f\" loss: {train_loss.result():.5f},\"\n f\" accuracy: { train_accuracy.result():.5f}\")\n\n for valid_images, valid_labels in valid_dataset:\n valid_step(valid_images, valid_labels)\n\n print(f\"Epoch: {epoch + 1}/{epochs}, \"\n f\"valid loss: {valid_loss.result():.5f}, \"\n f\"valid accuracy: {valid_accuracy.result():.5f}, \")\n\n # 每训练一轮就降低80%\n learning_rate = learning_rate * 0.2\n optimizer.lr = learning_rate\n\n\n# def preprocess_image(image):\n# image = np.array(image)\n# image_resized = tf.image.resize(image, (224, 224))\n# image_resized = tf.cast(image_resized, tf.float32)\n# image_resized = (image_resized - 127.5) / 127.5\n# return tf.expand_dims(image_resized, 0).numpy()\n#\n#\n# def load_image_from_url(url):\n# response = requests.get(url)\n# image = Image.open(BytesIO(response.content))\n# image = preprocess_image(image)\n# return image\n#\n#\n# img_url = \"https://p0.pikrepo.com/preview/853/907/close-up-photo-of-gray-elephant.jpg\"\n# image = load_image_from_url(img_url)\n# #\n# # plt.imshow((image[0] + 1) / 2)\n# # plt.show()\n# predictions = model.predict(image)\n# print(predictions)\n\n# with open(\"models/ilsvrc2012_wordnet_lemmas.txt\", \"r\") as f:\n# lines = f.readlines()\n# imagenet_int_to_str = [line.rstrip() for line in lines]\n#\n# predicted_label = imagenet_int_to_str[int(np.argmax(predictions))]\n# print(predicted_label)\n\n\n" ]
[ [ "tensorflow.keras.utils.image_dataset_from_directory", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.layers.Dense", "tensorflow.cast", "tensorflow.keras.optimizers.schedules.InverseTimeDecay", "tensorflow.keras.optimizers.Adam", "tensorflow.image.resize", "tensorflow.keras.layers.Rescaling", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.metrics.Mean", "tensorflow.GradientTape" ] ]
kineticengines/text-to-text-transfer-transformer
[ "97cdc174f138e1aa5c189593ed2be77236dcb323" ]
[ "t5/data/preprocessors_test.py" ]
[ "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for from t5.preprocessors.\"\"\"\n\nimport functools\n\nfrom absl.testing import absltest\nimport gin\nfrom t5.data import preprocessors as prep\nfrom t5.data import test_utils\nfrom t5.data import utils\nfrom t5.data.dataset_providers import Feature\nimport tensorflow as tf\n\nmock = absltest.mock\nassert_dataset = test_utils.assert_dataset\n\n\nclass PreprocessorsTest(tf.test.TestCase):\n def test_regular_noise_mask(self):\n length = 800\n span_length = 2\n noise_density = 0.25\n noise_mask = prep.regular_noise_mask(length=length,\n noise_density=noise_density,\n seeds=[(0, 1), (2, 3)],\n min_span_length=span_length,\n max_span_length=span_length)\n num_masked = tf.reduce_sum(tf.cast(noise_mask, tf.int32))\n self.assertEqual(self.evaluate(num_masked), length * noise_density)\n\n def test_random_prefix_noise_mask(self):\n for _ in range(100):\n length = 10\n noise_density = 0.5\n noise_mask = prep.random_prefix_noise_mask(\n length=length, noise_density=noise_density, seeds=[(0, 1)])\n first = noise_mask[0]\n last = noise_mask[-1]\n self.assertTrue(self.evaluate(first))\n self.assertFalse(self.evaluate(last))\n\n def test_random_spans_noise_mask(self):\n length = 32\n noise_density = 0.25\n mean_noise_span_length = 2.0\n # there should be 4 noise spans with a total length of 8.\n noise_mask = prep.random_spans_noise_mask(length, noise_density,\n [(1, 2), (3, 4)],\n mean_noise_span_length)\n output = self.evaluate(tf.cast(noise_mask, tf.int32))\n expected_output = [\n 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 1\n ]\n self.assertAllEqual(output, expected_output)\n\n def test_noise_token_to_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [999, 999, 12, 13, 999, 15]\n output = self.evaluate(\n prep.noise_token_to_sentinel(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_span_to_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [999, 12, 13, 999, 15]\n output = self.evaluate(\n prep.noise_span_to_sentinel(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_nonnoise_span_to_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [10, 11, 999, 14, 999]\n output = self.evaluate(\n prep.nonnoise_span_to_sentinel(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_span_to_unique_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [999, 12, 13, 998, 15]\n output = self.evaluate(\n prep.noise_span_to_unique_sentinel(tokens, noise_mask, vocabulary,\n ()))\n self.assertAllEqual(output, expected_output)\n\n def test_drop_noise_tokens(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [12, 13, 15]\n output = self.evaluate(\n prep.drop_noise_tokens(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_drop_nonnoise_tokens(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [10, 11, 14]\n output = self.evaluate(\n prep.drop_nonnoise_tokens(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_permute_noise_tokens(self):\n tf.random.set_seed(55)\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [10, 14, 12, 13, 11, 15]\n output = self.evaluate(\n prep.permute_noise_tokens(tokens, noise_mask, vocabulary,\n [(0, 1)]))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_token_to_gathered_token(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [13, 14, 12, 13, 10, 15]\n output = self.evaluate(\n prep.noise_token_to_gathered_token(tokens, noise_mask, vocabulary,\n [(55, 56)]))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_token_to_random_token(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [961, 553, 12, 13, 60, 15]\n\n output = self.evaluate(\n prep.noise_token_to_random_token(tokens,\n noise_mask,\n vocabulary,\n seeds=[(55, 56)]))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_token_to_random_token_or_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant(list(range(10)))\n noise_mask = tf.constant(\n [True, True, False, False, True, False, True, True, True, True])\n expected_output = [999, 348, 2, 3, 108, 5, 999, 999, 999, 999]\n output = self.evaluate(\n prep.noise_token_to_random_token_or_sentinel(tokens,\n noise_mask,\n vocabulary,\n seeds=[(55, 56),\n (57, 58)],\n random_prob=0.2))\n self.assertAllEqual(output, expected_output)\n\n def test_rekey(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'text': 'That is good.',\n 'other': 'That is bad.'\n })\n dataset = prep.rekey(og_dataset, {\n 'inputs': 'other',\n 'targets': 'text'\n })\n assert_dataset(dataset, {\n 'inputs': 'That is bad.',\n 'targets': 'That is good.'\n })\n\n dataset = prep.rekey(og_dataset, {'targets': 'text'})\n assert_dataset(dataset, {'targets': 'That is good.'})\n\n dataset = prep.rekey(og_dataset, {'inputs': 'text'})\n assert_dataset(dataset, {'inputs': 'That is good.'})\n\n dataset = prep.rekey(og_dataset)\n assert_dataset(dataset, {\n 'text': 'That is good.',\n 'other': 'That is bad.'\n })\n\n dataset = prep.rekey(og_dataset, {'inputs': 'text', 'targets': None})\n assert_dataset(dataset, {'inputs': 'That is good.', 'targets': ''})\n\n def test_translate(self):\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'en': ['That is good.'],\n 'de': ['Das ist gut.']\n })\n\n dataset = prep.translate(og_dataset, 'en', 'de')\n assert_dataset(\n dataset, {\n 'inputs': 'translate English to German: That is good.',\n 'targets': 'Das ist gut.',\n })\n\n def test_summarize(self):\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'article': ['An article.'],\n 'highlights': ['A summary.']\n })\n\n dataset = prep.summarize(og_dataset, 'article', 'highlights')\n assert_dataset(\n dataset,\n {\n 'inputs': 'summarize: An article.',\n 'targets': 'A summary.'\n },\n )\n\n def assertStringEqual(self, a, b):\n self.assertTrue(tf.equal(a, b), '%s != %s' % (a, b))\n\n def test_pad_punctuation(self):\n self.assertStringEqual(\n ' \" This is a string with \" punctuation ( 1845 - 1986 ) \" . ',\n prep._pad_punctuation(\n '\"This is a string with \"punctuation (1845-1986) \".'))\n\n def test_span_answer(self):\n self.assertStringEqual(\n 'start: 2 end: 3',\n prep._span_answer(tf.constant('Called the Denver Broncos.'),\n tf.constant('Denver Broncos')))\n # Not found.\n self.assertStringEqual(\n '',\n prep._span_answer(tf.constant('Called the Denver Broncos.'),\n tf.constant('Denver Bronscos')))\n\n def test_squad(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'id': 'testid',\n 'context': 'Some context.',\n 'question': 'A question?',\n 'answers': {\n 'text': ['The answer.', 'Another answer.'],\n }\n })\n\n dataset = prep.squad(og_dataset)\n assert_dataset(\n dataset, {\n 'id': 'testid',\n 'inputs': 'question: A question ? context: Some context . ',\n 'targets': 'The answer . ',\n 'context': 'Some context . ',\n 'question': 'A question ? ',\n 'answers': ['The answer . ', 'Another answer . '],\n })\n\n def test_pad_nonspaced_languages(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': ['Hello there. 你好吗?']})\n dataset = prep.pad_nonspaced_languages(dataset)\n assert_dataset(dataset, {\n 'text': 'Hello there. 你 好 吗 ?',\n })\n\n def test_triviaqa(self):\n answers = ['key', 'keys']\n contexts = [\n 'The answer to all questions is the key.',\n 'The answer to all questions are the keys.'\n ]\n og_dataset = tf.data.Dataset.from_tensors({\n 'question': 'What is the answer?',\n 'entity_pages': {\n 'wiki_context': contexts\n },\n 'answer': {\n 'normalized_aliases': answers,\n 'normalized_value': 'key'\n }\n })\n\n dataset = prep.trivia_qa(og_dataset)\n assert_dataset(dataset, [{\n 'inputs':\n 'question: What is the answer ? context: The answer to all questions is the key . ',\n 'targets': 'key'\n }, {\n 'inputs':\n 'question: What is the answer ? context: The answer to all questions are the keys . ',\n 'targets': 'key'\n }, {\n 'inputs':\n 'question: What is the answer ? context: The answer to all questions are the keys . ',\n 'targets': 'keys'\n }])\n\n def test_squad_span_space_tokenized(self):\n answers = ['the answer', 'answer']\n d = tf.data.Dataset.from_tensors(\n {\n 'id': 'a',\n 'context': 'context with the answer.',\n 'question': 'Say what?',\n 'answers': {\n 'text': answers,\n },\n }, )\n og_dataset = d.concatenate(\n tf.data.Dataset.from_tensors(\n { # Filter this out because answer is not in context.\n 'id': 'b',\n 'context': 'context without answers.',\n 'question': 'Say what?',\n 'answers': {\n 'text': answers,\n }\n }))\n\n dataset = prep.squad_span_space_tokenized(og_dataset)\n assert_dataset(\n dataset, {\n 'id': 'a',\n 'inputs':\n 'question: Say what ? context: context with the answer . ',\n 'targets': 'start: 2 end: 3',\n 'context': 'context with the answer . ',\n 'question': 'Say what ? ',\n 'answers': answers,\n })\n\n def test_glue(self):\n test_idx = 10\n input_data = {\n 'q1': 'How so?',\n 'q2': 'Why not?',\n 'q3': 'Who?',\n 'idx': test_idx,\n 'label': 0,\n }\n og_dataset = tf.data.Dataset.from_tensors(input_data)\n benchmark_name = 'qqp'\n label_names = ['not_duplicate', 'duplicate']\n\n dataset = prep.glue(og_dataset, benchmark_name, label_names)\n assert_dataset(\n dataset,\n {\n 'inputs': 'qqp q1: How so? q2: Why not? q3: Who?',\n 'targets': 'not_duplicate',\n 'idx': test_idx,\n },\n )\n\n # Test `feature_names` argument.\n dataset = prep.glue(og_dataset,\n benchmark_name,\n label_names,\n feature_names=['q3', 'q1'])\n assert_dataset(\n dataset,\n {\n 'inputs': 'qqp q3: Who? q1: How so?',\n 'targets': 'not_duplicate',\n 'idx': test_idx,\n },\n )\n\n # Test target is <unk> when label is -1\n input_data['label'] = -1\n og_dataset = tf.data.Dataset.from_tensors(input_data)\n dataset = prep.glue(og_dataset, benchmark_name, label_names)\n assert_dataset(\n dataset,\n {\n 'inputs': 'qqp q1: How so? q2: Why not? q3: Who?',\n 'targets': '<unk>',\n 'idx': test_idx,\n },\n )\n\n # Test id_key argument\n input_data = {\n 'q1': 'How so?',\n 'q2': 'Why not?',\n 'q3': 'Who?',\n 'uid': test_idx,\n 'label': 0,\n }\n og_dataset = tf.data.Dataset.from_tensors(input_data)\n dataset = prep.glue(og_dataset,\n benchmark_name,\n label_names,\n feature_names=['q1', 'q2', 'q3'],\n id_key='uid')\n assert_dataset(\n dataset,\n {\n 'inputs': 'qqp q1: How so? q2: Why not? q3: Who?',\n 'targets': 'not_duplicate',\n 'idx': test_idx,\n },\n )\n\n def test_multirc(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'paragraph':\n '<b>Sent 1: </b>Once upon a time, there was a squirrel named Joey.<br><b>Sent 2: </b>Joey loved to go outside and play with his cousin Jimmy.',\n 'question':\n 'Why was Joey surprised the morning he woke up for breakfast?',\n 'answer': 'There was only pie to eat',\n 'label': 1,\n 'idx': {\n 'paragraph': 5,\n 'question': 1,\n 'answer': 3\n }\n })\n\n dataset = prep.glue(\n og_dataset,\n 'multirc',\n label_names=['False', 'True'],\n feature_names=('question', 'answer', 'paragraph'),\n )\n assert_dataset(\n dataset,\n {\n 'inputs':\n 'multirc question: Why was Joey surprised the morning he woke up for breakfast? answer: There was only pie to eat paragraph: Sent 1: Once upon a time, there was a squirrel named Joey. Sent 2: Joey loved to go outside and play with his cousin Jimmy.',\n 'targets': 'True',\n 'idx/paragraph': 5,\n 'idx/question': 1,\n 'idx/answer': 3,\n },\n )\n\n def test_stsb(self):\n test_idx = 10\n og_dataset = tf.data.Dataset.from_tensors(\n {\n 'sentence1': ['Big news.'],\n 'sentence2': ['No idea.'],\n 'label': [2.8],\n 'idx': test_idx,\n }, )\n\n dataset = prep.stsb(og_dataset)\n assert_dataset(\n dataset,\n {\n 'inputs': 'stsb sentence1: Big news. sentence2: No idea.',\n 'targets': '2.8',\n 'idx': test_idx,\n },\n )\n\n # Test when floating point label is not in [0., 0.2, ..., 4.8, 5.0]\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'sentence1': ['Big news.'],\n 'sentence2': ['No idea.'],\n 'label': [1.66],\n 'idx': [test_idx],\n })\n dataset = prep.stsb(og_dataset)\n assert_dataset(\n dataset,\n {\n 'inputs': 'stsb sentence1: Big news. sentence2: No idea.',\n 'targets': '1.6',\n 'idx': test_idx,\n },\n )\n\n def test_multi_translate(self):\n languages = ['en', 'de', 'fr']\n translations = ['That is good.', 'Das ist gut.', 'Ca c\\'est bon.']\n og_dataset = tf.data.Dataset.from_tensors({\n 'translations': {\n 'language': languages,\n 'translation': translations\n }\n })\n\n dataset = prep.multi_translate(og_dataset, 'en', 'de')\n assert_dataset(\n dataset, {\n 'inputs': 'translate English to German: That is good.',\n 'targets': 'Das ist gut.',\n })\n\n # Test that it skips over the whole (single-entry) dataset when we ask for\n # a language which is not in the language list\n dataset = prep.multi_translate(og_dataset, 'en', 'sk')\n assert_dataset(dataset, [])\n\n def test_fill_in_the_blank(self):\n num_tries = 1000\n original = 'This is a long test with lots of words to see if it works ok.'\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': [original] * num_tries})\n dataset = prep.fill_in_the_blank(dataset)\n for data in test_utils.dataset_as_text(dataset):\n # Remove the prefix from the start of the input string\n self.assertTrue(data['inputs'].startswith('fill: '))\n inp = data['inputs'].replace('fill: ', '')\n # Split output into chunks according to X locations.\n out_split = data['targets'].split('X')\n # Make sure that there is at least one blank\n self.assertGreater(len(out_split), 1)\n # Remove leading/trailing whitespace and any empty chunks\n out_split = [o.strip() for o in out_split if o]\n # Replace 'X' with entries from out_split by popping from the front\n reconstructed = ''.join(\n [i if i != 'X' else out_split.pop(0) for i in inp])\n self.assertEqual(reconstructed, original)\n\n def test_fill_in_the_blank_sized(self):\n def _validate_data(data, valid_bins, og_length=15):\n # Remove the prefix from the start of the input string\n self.assertTrue(data['inputs'].startswith('fill: '))\n inp = data['inputs'].replace('fill: ', '')\n # Split input into chunks according to blank locations.\n inp_split = inp.split('_')\n # Make sure that there is exactly one blank (could be at beginning/end).\n self.assertLen(inp_split, 3)\n # Make sure reconstruction is accurate.\n reconstructed = ''.join([inp_split[0], data['targets']] +\n inp_split[2:])\n self.assertEqual(reconstructed, original)\n # Make sure blank size is correctly chosen.\n blank_bin = int(inp_split[1])\n self.assertIn(blank_bin, valid_bins)\n blank_size = len(data['targets'].split())\n self.assertGreaterEqual(blank_size, min(og_length, valid_bins[0]))\n self.assertLessEqual(blank_size, valid_bins[-1])\n return blank_size, blank_bin\n\n num_tries = 250\n original = 'This is a long test with lots of words to see if it works ok.'\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': [original] * num_tries})\n dataset = prep.fill_in_the_blank_sized(dataset, [1, 4])\n num_outputs = 0\n for data in test_utils.dataset_as_text(dataset):\n blank_size, blank_bin = _validate_data(data, [1, 4])\n if blank_size <= 2:\n self.assertEqual(blank_bin, 1)\n else:\n self.assertEqual(blank_bin, 4)\n num_outputs += 1\n self.assertEqual(num_tries, num_outputs)\n\n # Check case where bin size is larger than text.\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': [original] * num_tries})\n dataset = prep.fill_in_the_blank_sized(dataset, [1024])\n self.assertEmpty(list(test_utils.dataset_as_text(dataset)))\n\n def test_random_split_text(self):\n num_tries = 10\n original = '%s' % list(range(100))\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': [original] * num_tries})\n dataset = prep.random_split_text(dataset)\n out = []\n for data in test_utils.dataset_as_text(dataset):\n out.append(data['text'])\n reconstructed = ' '.join(out)\n ref = ' '.join([original] * num_tries)\n self.assertEqual(reconstructed, ref)\n\n def test_split_tokens(self):\n original = list(range(2, 102))\n og_dataset = tf.data.Dataset.from_tensors({'targets': original})\n\n # Verify splits with no max segments.\n def _verify_split(length, n_expected_outputs):\n ds = prep.split_tokens(og_dataset,\n unused_vocabulary=None,\n max_tokens_per_segment=length)\n outputs = list(test_utils.dataset_as_text(ds))\n self.assertLen(outputs, n_expected_outputs)\n reconstructed = []\n for ex in outputs[:-1]:\n t = ex['targets']\n self.assertLen(t, length)\n reconstructed.extend(t)\n final_t = outputs[-1]['targets']\n self.assertLessEqual(len(final_t), length)\n reconstructed.extend(final_t)\n self.assertEqual(reconstructed, original)\n\n _verify_split(25, 4)\n _verify_split(30, 4)\n _verify_split(100, 1)\n _verify_split(1000, 1)\n\n def test_split_tokens_additional_features_passthrough(self):\n original = list(range(2, 102))\n original_aux = list(range(4, 104))\n original_passthrough = list(range(20))\n og_dataset = tf.data.Dataset.from_tensors({\n 'targets':\n original,\n 'aux':\n original_aux,\n 'passthrough':\n original_passthrough\n })\n\n # Verify splits with no max segments.\n def _verify_split(length, n_expected_outputs):\n ds = prep.split_tokens(og_dataset,\n unused_vocabulary=None,\n max_tokens_per_segment=length,\n additional_feature_keys=['aux'],\n passthrough_feature_keys=['passthrough'])\n outputs = list(test_utils.dataset_as_text(ds))\n self.assertLen(outputs, n_expected_outputs)\n reconstructed = []\n reconstructed_aux = []\n for ex in outputs[:-1]:\n t = ex['targets']\n self.assertLen(t, length)\n reconstructed.extend(t)\n\n a = ex['aux']\n self.assertLen(a, length)\n reconstructed_aux.extend(a)\n final_t = outputs[-1]['targets']\n self.assertLessEqual(len(final_t), length)\n reconstructed.extend(final_t)\n self.assertEqual(reconstructed, original)\n\n final_a = outputs[-1]['aux']\n self.assertLessEqual(len(final_a), length)\n reconstructed_aux.extend(final_a)\n self.assertEqual(reconstructed_aux, original_aux)\n\n for ex in outputs:\n self.assertAllEqual(original_passthrough, ex['passthrough'])\n\n _verify_split(25, 4)\n _verify_split(30, 4)\n _verify_split(100, 1)\n _verify_split(1000, 1)\n\n def test_trim_tokens_at_front(self):\n sequence_length = {'inputs': 4}\n inputs = tf.data.Dataset.from_tensors(\n {'inputs': tf.constant([10, 11, 12, 13, 14, 15])})\n output = prep.trim_tokens_at_front(inputs,\n sequence_length=sequence_length)\n\n expected_output = [{'inputs': tf.constant([13, 14, 15])}]\n test_utils.assert_dataset(output, expected_output)\n\n def test_split_text_to_words(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': ['That good.', 'That.']})\n dataset = prep._split_text_to_words(dataset)\n assert_dataset(dataset, {\n 'text': 'That good.',\n 'words': ['That', 'good.']\n })\n\n def test_definite_pronoun_resolution_simple(self):\n # Test where the pronoun is in the middle of the sentence. Also test the\n # case where the string pronoun is a substring of another word in the\n # sentence.\n og_dataset = tf.data.Dataset.from_tensors({\n 'sentence':\n 'Mitchell asked Tom if he could lend some money.',\n 'pronoun':\n 'he',\n 'candidates': ['Mitchell', 'Tom'],\n 'label':\n 1,\n })\n dataset = prep.definite_pronoun_resolution_simple(og_dataset)\n assert_dataset(\n dataset, {\n 'inputs':\n 'wsc: Mitchell asked Tom if *he* could lend some money.',\n 'targets': 'Tom',\n })\n\n # Test multiple word pronouns. The Definite Pronoun Resolution Dataset is\n # weird.\n og_dataset = tf.data.Dataset.from_tensors({\n 'sentence':\n 'Bill beat Tom at Scrabble because that newbie had all the luck.',\n 'pronoun': 'that newbie',\n 'candidates': ['Bill', 'Tom'],\n 'label': 0,\n })\n dataset = prep.definite_pronoun_resolution_simple(og_dataset)\n assert_dataset(\n dataset, {\n 'inputs':\n 'wsc: Bill beat Tom at Scrabble because *that newbie* had all the luck.',\n 'targets': 'Bill',\n })\n\n # Test pronoun at end of sentence.\n og_dataset = tf.data.Dataset.from_tensors({\n 'sentence':\n 'Carl borrowed a book from Richard, but the book was unreadable to him.',\n 'pronoun':\n 'him',\n 'candidates': ['Carl', 'Richard'],\n 'label':\n 0,\n })\n dataset = prep.definite_pronoun_resolution_simple(og_dataset)\n assert_dataset(\n dataset, {\n 'inputs':\n 'wsc: Carl borrowed a book from Richard, but the book was unreadable to *him*.',\n 'targets': 'Carl',\n })\n\n def test_wsc_simple(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'text': 'Mitchell asked Tom if he could lend some money.',\n 'span1_text': 'Tom',\n 'span2_text': 'he',\n 'span2_index': 4,\n 'idx': 1,\n })\n\n dataset = prep.wsc_simple(og_dataset, correct_referent_only=False)\n assert_dataset(\n dataset, {\n 'inputs':\n 'wsc: Mitchell asked Tom if *he* could lend some money.',\n 'targets': 'Tom',\n 'label': 0,\n 'idx': 1,\n })\n\n # Test including only examples with the correct referent.\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'text': [\n 'Mitchell asked Tom if he could lend some money.',\n 'Mitchell asked Tom if he could lend some money.',\n ],\n 'span1_text': [\n 'Tom',\n 'Mitchell',\n ],\n 'span2_text': [\n 'he',\n 'he',\n ],\n 'span2_index': [4, 4],\n 'label': [1, 0],\n 'idx': [1, 2]\n })\n dataset = prep.wsc_simple(og_dataset, correct_referent_only=True)\n assert_dataset(dataset, [{\n 'inputs': 'wsc: Mitchell asked Tom if *he* could lend some money.',\n 'targets': 'Tom',\n 'label': True,\n 'idx': 1,\n }])\n\n def test_wnli_simple(self):\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'sentence1': [\n 'Lily spoke to Donna breaking her silence.',\n 'The fish ate the worm. It was tasty.',\n 'Edward dropped adhesive tape onto his window sill, and when he pulled the tape off, some of the glue was stuck on it.',\n \"Al stole Bob's wallet and car, and then he was driving it really fast to get away.\",\n ],\n 'sentence2': [\n \"Lily spoke to Donna breaking Donna's silence.\",\n 'The worm was tasty.',\n 'Some of the glue was stuck on the window sill.',\n 'He was driving the car really fast to get away.',\n ],\n 'idx': [1, 2, 3, 4],\n 'label': [1, 0, 0, 1],\n })\n dataset = prep.wnli_simple(og_dataset)\n assert_dataset(dataset, [\n {\n 'inputs': 'wsc: Lily spoke to Donna breaking *her* silence.',\n 'targets': 'Donna',\n 'premise': 'Lily spoke to Donna breaking her silence.',\n 'hypothesis': \"Lily spoke to Donna breaking Donna's silence.\",\n 'label': 1,\n 'idx': 1,\n },\n {\n 'inputs': 'wsc: The fish ate the worm. *It* was tasty.',\n 'targets': 'The worm',\n 'premise': 'The fish ate the worm. It was tasty.',\n 'hypothesis': 'The worm was tasty.',\n 'label': 0,\n 'idx': 2,\n },\n {\n 'inputs':\n 'wsc: Edward dropped adhesive tape onto his window sill, and when he pulled the tape off, some of the glue was stuck on *it* .',\n 'targets': 'the window sill',\n 'premise':\n 'Edward dropped adhesive tape onto his window sill, and when he pulled the tape off, some of the glue was stuck on it.',\n 'hypothesis': 'Some of the glue was stuck on the window sill.',\n 'label': 0,\n 'idx': 3,\n },\n {\n 'inputs':\n \"wsc: Al stole Bob's wallet and car, and then he was driving *it* really fast to get away.\",\n 'targets': 'the car',\n 'premise':\n \"Al stole Bob's wallet and car, and then he was driving it really fast to get away.\",\n 'hypothesis':\n 'He was driving the car really fast to get away.',\n 'label': 1,\n 'idx': 4,\n },\n ])\n\n def test_next_sentence_prediction(self):\n\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'text': [\n 'This is the first sentence. This is the second sentence.',\n 'This is the third sentence. This is the fourth sentence.',\n ]\n })\n\n # Test neighboring sentences.\n dataset = prep.next_sentence_prediction(og_dataset,\n label_sentences=False,\n p_neighbors=1,\n buffer_size=1)\n assert_dataset(\n dataset,\n [\n {\n 'inputs':\n 'nsp: This is the first sentence. This is the second sentence.',\n 'targets': 'next',\n },\n {\n 'inputs':\n 'nsp: This is the third sentence. This is the fourth sentence.',\n 'targets': 'next',\n },\n ],\n )\n\n # Test non-neighboring sentences.\n dataset = prep.next_sentence_prediction(og_dataset,\n label_sentences=False,\n p_neighbors=0,\n buffer_size=1)\n assert_dataset(\n dataset,\n [\n {\n 'inputs':\n 'nsp: This is the first sentence. This is the fourth sentence.',\n 'targets': 'not_next',\n },\n {\n 'inputs':\n 'nsp: This is the third sentence. This is the second sentence.',\n 'targets': 'not_next',\n },\n ],\n )\n\n # Test labeling sentences.\n dataset = prep.next_sentence_prediction(og_dataset,\n label_sentences=True,\n p_neighbors=1,\n buffer_size=1)\n assert_dataset(\n dataset,\n [\n {\n 'inputs':\n 'nsp: sentence1: This is the first sentence. sentence2: This is the second sentence.',\n 'targets': 'next',\n },\n {\n 'inputs':\n 'nsp: sentence1: This is the third sentence. sentence2: This is the fourth sentence.',\n 'targets': 'next',\n },\n ],\n )\n\n def test_lm(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': ['That is good.']})\n dataset = prep.lm(dataset)\n assert_dataset(dataset, {'inputs': '', 'targets': 'That is good.'})\n\n def test_triviaqa_truncate_text(self):\n\n vocab = test_utils.sentencepiece_vocab()\n\n def tokenize_and_prepare_dataset(inputs, targets):\n tokenized_inputs = vocab.encode(inputs)\n tokenized_targets = vocab.encode(targets)\n\n dataset = tf.data.Dataset.from_tensors({\n 'inputs':\n tokenized_inputs,\n 'targets':\n tokenized_targets,\n })\n\n return dataset, tokenized_targets\n\n inputs = 'This is a very very long string which must contain the answer.'\n targets = 'long string'\n\n og_dataset, tokenized_targets = tokenize_and_prepare_dataset(\n inputs, targets)\n\n for _ in range(0, 10):\n dataset = prep.trivia_qa_truncate_inputs(\n og_dataset,\n output_features=None,\n sequence_length={'inputs': 20})\n\n for data in test_utils.dataset_as_text(dataset):\n self.assertLen(data['inputs'], 20)\n self.assertContainsSubset(tokenized_targets, data['inputs'])\n\n # Dummy input which exists in the vocab to be able to compare strings after\n # decoding.\n inputs = 'w h d n r t v'\n targets = 'h d'\n\n og_dataset, _ = tokenize_and_prepare_dataset(inputs, targets)\n\n for _ in range(0, 5):\n dataset = prep.trivia_qa_truncate_inputs(\n og_dataset,\n output_features=None,\n sequence_length={'inputs': 5})\n\n for data in test_utils.dataset_as_text(dataset):\n self.assertLen(data['inputs'], 5)\n truncated_inputs = vocab.decode(data['inputs'].tolist())\n new_targets = vocab.decode(data['targets'].tolist())\n self.assertRegex(truncated_inputs, '.*' + targets + '.*')\n self.assertEqual(targets, new_targets)\n\n def test_triviaqa_truncate(self):\n\n sequence_length = {\n 'inputs': 10,\n }\n\n # Answer starts from the 0th position of the inputs.\n dataset = tf.data.Dataset.from_tensors({\n 'inputs': tf.range(0, 30),\n 'targets': tf.range(0, 5)\n })\n\n dataset = prep.trivia_qa_truncate_inputs(\n dataset, output_features=None, sequence_length=sequence_length)\n\n assert_dataset(dataset, {\n 'inputs': tf.range(0, 10),\n 'targets': tf.range(0, 5)\n })\n\n # Answer is in the last n elements of the targets.\n dataset = tf.data.Dataset.from_tensors({\n 'inputs': tf.range(0, 30),\n 'targets': tf.range(27, 30)\n })\n\n dataset = prep.trivia_qa_truncate_inputs(\n dataset, output_features=None, sequence_length=sequence_length)\n\n assert_dataset(dataset, {\n 'inputs': tf.range(20, 30),\n 'targets': tf.range(27, 30)\n })\n\n # Answer is not in inputs. Example is droped from the dataset.\n no_overlap_dataset = tf.data.Dataset.from_tensors({\n 'inputs':\n tf.range(0, 30),\n 'targets':\n tf.range(27, 32)\n })\n\n dataset = prep.trivia_qa_truncate_inputs(\n no_overlap_dataset,\n output_features=None,\n sequence_length=sequence_length)\n\n i = 0\n for data in test_utils.dataset_as_text(dataset):\n i = i + 1\n\n self.assertEqual(i, 0)\n\n # Answer is in the middle of the inputs.\n for _ in range(0, 10):\n og_dataset = tf.data.Dataset.from_tensors({\n 'inputs':\n tf.range(0, 30),\n 'targets':\n tf.range(10, 15),\n })\n\n dataset = prep.trivia_qa_truncate_inputs(\n og_dataset,\n output_features=None,\n sequence_length=sequence_length)\n for data in test_utils.dataset_as_text(dataset):\n self.assertContainsSubset(data['targets'], data['inputs'])\n self.assertLen(data['inputs'], 10)\n\n def test_record(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'query':\n 'It was @placeholder.',\n 'entities': ['A', 'B', 'C'],\n 'passage': [\n 'This is the passage\\n@highlight\\nAnother sentence.\\n@highlight\\nThird sentence.'\n ],\n 'answers': ['A', 'C'],\n 'idx': {\n 'passage': 1,\n 'query': 2,\n },\n })\n\n dataset = prep.record(og_dataset)\n assert_dataset(dataset, [\n {\n 'inputs':\n 'record query: It was @placeholder. entities: A, B, C passage: This is the passage. Another sentence. Third sentence.',\n 'targets': 'A',\n 'idx/passage': 1,\n 'idx/query': 2,\n 'answers': ['A', 'C'],\n },\n {\n 'inputs':\n 'record query: It was @placeholder. entities: A, B, C passage: This is the passage. Another sentence. Third sentence.',\n 'targets': 'C',\n 'idx/passage': 1,\n 'idx/query': 2,\n 'answers': ['A', 'C'],\n },\n ])\n\n # Test a dataset without answers, as would appear in the test set\n og_test_dataset = tf.data.Dataset.from_tensors({\n 'query':\n 'It was @placeholder.',\n 'entities': ['A', 'B', 'C'],\n 'passage': [\n 'This is the passage\\n@highlight\\nAnother sentence.\\n@highlight\\nThird sentence.'\n ],\n 'answers':\n tf.constant([], dtype=tf.string),\n 'idx': {\n 'passage': 1,\n 'query': 2,\n },\n })\n\n # Test all answers.\n dataset = prep.record(og_test_dataset)\n assert_dataset(dataset, [\n {\n 'inputs':\n 'record query: It was @placeholder. entities: A, B, C passage: This is the passage. Another sentence. Third sentence.',\n 'targets': '<unk>',\n 'idx/passage': 1,\n 'idx/query': 2,\n 'answers': []\n },\n ])\n\n def test_take(self):\n og_dataset = tf.data.Dataset.from_tensor_slices({'inputs': [1] * 100})\n dataset = prep.take(og_dataset, 5)\n assert_dataset(dataset, [{'inputs': 1} for _ in range(5)])\n dataset = prep.take(og_dataset, -1)\n assert_dataset(dataset, [{'inputs': 1} for _ in range(100)])\n\n def test_parse_tsv(self):\n og_dataset = tf.data.Dataset.from_tensor_slices(['a\\tb', 'c\\td'])\n dataset = prep.parse_tsv(og_dataset, field_names=['f1', 'f2'])\n assert_dataset(dataset, [{\n 'f1': 'a',\n 'f2': 'b'\n }, {\n 'f1': 'c',\n 'f2': 'd'\n }])\n\n def test_tokenize(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'prefix': 'This is',\n 'suffix': 'a test.'\n })\n output_features = {\n 'prefix': Feature(test_utils.MockVocabulary({'This is': [0, 1]})),\n 'suffix': Feature(test_utils.MockVocabulary({'a test.': [2, 3]})),\n }\n\n assert_dataset(\n prep.tokenize(og_dataset, output_features=output_features), {\n 'prefix': [0, 1],\n 'prefix_plaintext': 'This is',\n 'suffix': [2, 3],\n 'suffix_plaintext': 'a test.'\n })\n assert_dataset(\n prep.tokenize(og_dataset,\n output_features=output_features,\n copy_plaintext=False), {\n 'prefix': [0, 1],\n 'suffix': [2, 3]\n })\n\n def test_denoise(self):\n vocab = test_utils.sentencepiece_vocab()\n target_tokens = vocab.encode('The quick brown fox.')\n\n # This is what it encodes to.\n self.assertEqual(\n target_tokens,\n [3, 2, 20, 4, 3, 2, 8, 13, 2, 3, 2, 23, 7, 19, 22, 3, 2, 7, 2])\n\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'targets': [target_tokens],\n })\n\n output_features = {\n 'targets': Feature(vocab),\n }\n\n # These are the parameters of denoise in the operative config of 'base'.\n # Except noise_density, bumped up from 0.15 to 0.3 in order to demonstrate\n # multiple corrupted spans.\n with utils.map_seed_manager(42):\n denoised_dataset = prep.denoise(\n og_dataset,\n output_features,\n noise_density=0.3,\n noise_mask_fn=prep.random_spans_noise_mask,\n inputs_fn=prep.noise_span_to_unique_sentinel,\n targets_fn=prep.nonnoise_span_to_unique_sentinel)\n\n # Two spans corrupted, [2] and [22, 3, 2, 7, 2], replaced by unique\n # sentinels 25 and 24 respectively.\n assert_dataset(denoised_dataset, [\n {\n 'inputs': [3, 2, 20, 4, 25, 2, 8, 13, 2, 3, 2, 23, 7, 19, 24],\n 'targets': [25, 3, 24, 22, 3, 2, 7, 2],\n },\n ])\n\n def test_denoise_nested_decorators(self):\n \"\"\"Test whether gin and utils.map_over_dataset decorators are compatible.\"\"\"\n bindings = \"\"\"\n preprocessors.unsupervised.preprocessors = [@preprocessors.denoise]\n preprocessors.denoise.noise_density = 0.15\n preprocessors.denoise.noise_mask_fn = @preprocessors.iid_noise_mask\n preprocessors.denoise.inputs_fn = @noise_token_to_sentinel\n \"\"\"\n gin.parse_config(bindings)\n og_dataset = tf.data.Dataset.from_tensor_slices({'targets': [1, 2, 3]})\n output_features = {\n 'targets': Feature(test_utils.sentencepiece_vocab())\n }\n # Test denoise function when it is used as a gin-configurable of another\n # gin-configurable, prep.unsupervised.\n dataset = prep.unsupervised(og_dataset,\n output_features=output_features)\n self.assertIsInstance(dataset, tf.data.Dataset)\n\n def test_prefix_lm(self):\n vocab = test_utils.sentencepiece_vocab()\n inp = list(range(1, 101))\n og_dataset = tf.data.Dataset.from_tensor_slices({'targets': [inp]})\n og_dataset = og_dataset.repeat(100)\n output_features = {'targets': Feature(vocab)}\n output_dataset = prep.prefix_lm(\n og_dataset,\n {\n 'inputs': 100,\n 'targets': 100\n },\n output_features,\n )\n input_lengths = set()\n for ex in output_dataset.as_numpy_iterator():\n self.assertListEqual(\n ex['inputs'].tolist() + ex['targets'].tolist(), inp)\n input_lengths.add(len(ex['inputs']))\n self.assertGreater(len(input_lengths), 1)\n\n def test_rank_classification(self):\n dataset = tf.data.Dataset.from_tensors({\n 'left': 'the sky is blue',\n 'right': 'cats are so cute',\n 'label_idx': 1,\n })\n preprocessor = functools.partial(\n prep.rank_classification,\n dataset,\n inputs_fn=lambda features: [features['right'], features['left']],\n targets_fn=lambda features: ['class 0', 'class 1'],\n label_key='label_idx')\n\n test_utils.assert_dataset(preprocessor(mode='train'),\n [{\n 'idx': 0,\n 'inputs': 'the sky is blue',\n 'targets': 'class 1',\n 'is_correct': True,\n }])\n\n test_utils.assert_dataset(preprocessor(mode='eval'),\n [{\n 'idx': 0,\n 'inputs': 'cats are so cute',\n 'targets': 'class 0',\n 'is_correct': False,\n }, {\n 'idx': 0,\n 'inputs': 'the sky is blue',\n 'targets': 'class 1',\n 'is_correct': True,\n }])\n\n test_utils.assert_dataset(preprocessor(mode='fewshot_eval'), [\n {\n 'idx': [0, 0],\n 'inputs': ['cats are so cute', 'the sky is blue'],\n 'targets': ['class 0', 'class 1'],\n 'is_correct': [False, True]\n },\n ])\n\n def test_rank_classification_multilabel(self):\n dataset = tf.data.Dataset.from_tensors({\n 'left': 'the sky is blue',\n 'right': 'cats are so cute',\n 'label_idx': [1, 2],\n })\n\n preprocessor = functools.partial(\n prep.rank_classification,\n dataset,\n inputs_fn=lambda features:\n [features['right'], features['left'], 'X'],\n targets_fn=lambda features: ['class 0', 'class 1', 'class 2'],\n label_key='label_idx')\n\n test_utils.assert_dataset(preprocessor(mode='train'), [\n {\n 'idx': 0,\n 'inputs': 'the sky is blue',\n 'targets': 'class 1',\n 'is_correct': True,\n },\n {\n 'idx': 0,\n 'inputs': 'X',\n 'targets': 'class 2',\n 'is_correct': True,\n },\n ])\n\n test_utils.assert_dataset(preprocessor(mode='eval'), [\n {\n 'idx': 0,\n 'inputs': 'cats are so cute',\n 'targets': 'class 0',\n 'is_correct': False,\n },\n {\n 'idx': 0,\n 'inputs': 'the sky is blue',\n 'targets': 'class 1',\n 'is_correct': True,\n },\n {\n 'idx': 0,\n 'inputs': 'X',\n 'targets': 'class 2',\n 'is_correct': True,\n },\n ])\n\n test_utils.assert_dataset(preprocessor(mode='fewshot_eval'), [\n {\n 'idx': [0, 0, 0],\n 'inputs': ['cats are so cute', 'the sky is blue', 'X'],\n 'targets': ['class 0', 'class 1', 'class 2'],\n 'is_correct': [False, True, True]\n },\n ])\n\n def test_rank_classification_errors(self):\n dataset = tf.data.Dataset.from_tensors({\n 'left': 'the sky is blue',\n 'right': 'cats are so cute',\n 'label': [0, 2],\n })\n\n with self.assertRaisesRegex(\n tf.errors.InvalidArgumentError,\n '.*`inputs_fn` and `targets_fn` must return the same size tensors.*'\n ):\n list(\n prep.rank_classification(\n dataset,\n inputs_fn=lambda features: tf.stack([features['right']]),\n targets_fn=lambda features: tf.stack(\n ['class 0', 'class 1'])))\n\n with self.assertRaisesRegex(\n tf.errors.InvalidArgumentError,\n 'Label values must be less than the number of classes.'):\n list(\n prep.rank_classification(\n dataset,\n inputs_fn=lambda features: tf.stack(\n [features['right'], features['left']]),\n targets_fn=lambda features: tf.stack(\n ['class 0', 'class 1'])))\n\n def test_rank_classification_formatter(self):\n input_examples = [\n {\n 'premise': 'The farmland needed irrigation.',\n 'question': 'effect',\n 'choice1': 'a canal was constructed',\n 'choice2': 'the crops grew tall',\n 'label': 0,\n },\n {\n 'premise': 'I decided to stay home last night.',\n 'question': 'cause',\n 'choice1': 'I wanted to see people',\n 'choice2': 'I was too tired',\n 'label': 1,\n },\n ]\n\n input_ds = tf.data.Dataset.from_generator(lambda:\n (x for x in input_examples),\n output_types={\n 'premise': tf.string,\n 'question': tf.string,\n 'choice1': tf.string,\n 'choice2': tf.string,\n 'label': tf.int32,\n },\n output_shapes={\n 'premise': [],\n 'question': [],\n 'choice1': [],\n 'choice2': [],\n 'label': [],\n })\n\n # all options\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{premise} What is the {question}? X',\n targets_formats=['I think {choice1}.', 'I think {choice2}.'],\n mode='eval')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx': 0,\n 'inputs':\n 'The farmland needed irrigation. What is the effect? X',\n 'targets': 'I think a canal was constructed.',\n 'is_correct': True\n },\n {\n 'idx': 0,\n 'inputs':\n 'The farmland needed irrigation. What is the effect? X',\n 'targets': 'I think the crops grew tall.',\n 'is_correct': False\n },\n {\n 'idx': 1,\n 'inputs':\n 'I decided to stay home last night. What is the cause? X',\n 'targets': 'I think I wanted to see people.',\n 'is_correct': False\n },\n {\n 'idx': 1,\n 'inputs':\n 'I decided to stay home last night. What is the cause? X',\n 'targets': 'I think I was too tired.',\n 'is_correct': True\n },\n ])\n\n # Reverse inputs and targets for supporting the use case when there is\n # one target, but multiple inputs to select from.\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats=['I think {choice1}.', 'I think {choice2}.'],\n targets_formats='{premise} What is the {question}? X',\n mode='eval')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx': 0,\n 'targets':\n 'The farmland needed irrigation. What is the effect? X',\n 'inputs': 'I think a canal was constructed.',\n 'is_correct': True\n },\n {\n 'idx': 0,\n 'targets':\n 'The farmland needed irrigation. What is the effect? X',\n 'inputs': 'I think the crops grew tall.',\n 'is_correct': False\n },\n {\n 'idx': 1,\n 'targets':\n 'I decided to stay home last night. What is the cause? X',\n 'inputs': 'I think I wanted to see people.',\n 'is_correct': False\n },\n {\n 'idx': 1,\n 'targets':\n 'I decided to stay home last night. What is the cause? X',\n 'inputs': 'I think I was too tired.',\n 'is_correct': True\n },\n ])\n\n # train mode\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{premise} What is the {question}? X',\n targets_formats=['I think {choice1}.', 'I think {choice2}.'],\n mode='train')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx': 0,\n 'inputs':\n 'The farmland needed irrigation. What is the effect? X',\n 'targets': 'I think a canal was constructed.',\n 'is_correct': True\n },\n {\n 'idx': 1,\n 'inputs':\n 'I decided to stay home last night. What is the cause? X',\n 'targets': 'I think I was too tired.',\n 'is_correct': True\n },\n ])\n\n # fewshot_eval mode\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{premise} What is the {question}? X',\n targets_formats=['I think {choice1}.', 'I think {choice2}.'],\n mode='fewshot_eval')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx': [0, 0],\n 'inputs': [\n 'The farmland needed irrigation. What is the effect? X',\n 'The farmland needed irrigation. What is the effect? X',\n ],\n 'targets': [\n 'I think a canal was constructed.',\n 'I think the crops grew tall.',\n ],\n 'is_correct': [True, False]\n },\n {\n 'idx': [1, 1],\n 'inputs': [\n 'I decided to stay home last night. What is the cause? X',\n 'I decided to stay home last night. What is the cause? X',\n ],\n 'targets': [\n 'I think I wanted to see people.',\n 'I think I was too tired.',\n ],\n 'is_correct': [False, True]\n },\n ])\n\n def test_nested_key_rank_classification_formatter(self):\n input_ds = tf.data.Dataset.from_tensors({\n 'answerKey': 0,\n 'fact1': 'creating paper requires cutting down trees',\n 'question': {\n 'choice_A': 'forests',\n 'choice_B': 'canyons',\n 'sub_question': {\n 'stem': 'What is the ultimate source of greeting cards?'\n }\n }\n })\n\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{fact1}. {question/sub_question/stem} X 0',\n targets_formats=[\n 'Correct Answer: {question/choice_A} X 1 Incorrect Answer: '\n '{question/choice_B} X 1',\n 'Correct Answer: {question/choice_B} X 1 Incorrect Answer: '\n '{question/choice_A} X 1',\n ],\n mode='eval',\n label_key='answerKey')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx':\n 0,\n 'inputs':\n 'creating paper requires cutting down trees. What is the '\n 'ultimate source of greeting cards? X 0',\n 'targets':\n 'Correct Answer: forests X 1 Incorrect Answer: canyons X 1',\n 'is_correct':\n True,\n },\n {\n 'idx':\n 0,\n 'inputs':\n 'creating paper requires cutting down trees. What is the '\n 'ultimate source of greeting cards? X 0',\n 'targets':\n 'Correct Answer: canyons X 1 Incorrect Answer: forests X 1',\n 'is_correct':\n False,\n },\n ])\n\n with self.assertRaisesRegex(\n ValueError,\n 'Final value of key \\'question/sub_question\\' must be a tf.string. '\n 'Got: dict'):\n prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{fact1}. {question/sub_question} X 0',\n targets_formats=['test1', 'test2'],\n mode='eval',\n label_key='answerKey')\n\n with self.assertRaises(TypeError):\n prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{fact1}. {answerKey} X 0',\n targets_formats=['test1', 'test2'],\n mode='eval',\n label_key='answerKey')\n\n def test_select_random_chunk(self):\n dataset = tf.data.Dataset.from_tensors({\n 'targets': [0, 1, 2, 3],\n 'inputs': [4, 5, 6, 7]\n })\n dataset = prep.select_random_chunk(dataset,\n feature_key='targets',\n max_length=4)\n output = list(dataset.as_numpy_iterator())\n self.assertEqual(1, len(output))\n output = output[0]\n self.assertSequenceEqual(['targets'], list(output.keys()))\n self.assertGreater(len(output['targets']), 0)\n\n def test_select_random_chunk_uniform_start(self):\n dataset = tf.data.Dataset.from_tensors({\n 'targets': [0, 1, 2, 3],\n 'inputs': [4, 5, 6, 7]\n })\n dataset = prep.select_random_chunk(dataset,\n feature_key='targets',\n max_length=4,\n uniform_random_start=True)\n output = list(dataset.as_numpy_iterator())\n self.assertEqual(1, len(output))\n output = output[0]\n self.assertSequenceEqual(['targets'], list(output.keys()))\n self.assertGreater(len(output['targets']), 0)\n\n def test_select_random_chunk_additional_features(self):\n dataset = tf.data.Dataset.from_tensors({\n 'targets': [0, 1, 2, 3],\n 'inputs': [4, 5, 6, 7]\n })\n dataset = prep.select_random_chunk(dataset,\n feature_key='targets',\n additional_feature_keys=['inputs'],\n max_length=3)\n output = list(dataset.as_numpy_iterator())\n self.assertEqual(1, len(output))\n output = output[0]\n self.assertSequenceEqual(['inputs', 'targets'],\n sorted(list(output.keys())))\n self.assertAllEqual(output['inputs'] - 4, output['targets'])\n\n def test_select_random_chunk_different_sizes(self):\n dataset = tf.data.Dataset.from_tensors({\n 'targets': [0, 1, 2, 3],\n 'inputs': [4, 5]\n })\n with self.assertRaises(tf.errors.InvalidArgumentError):\n prep.select_random_chunk(dataset,\n feature_key='targets',\n additional_feature_keys=['inputs'],\n max_length=4)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "tensorflow.data.Dataset.from_tensors", "tensorflow.constant", "tensorflow.range", "tensorflow.stack", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.cast", "tensorflow.equal", "tensorflow.data.Dataset.from_generator", "tensorflow.random.set_seed" ] ]
Xiaoxiong-Liu/gluon-ts
[ "097c492769258dd70b7f223f826b17b0051ceee9", "097c492769258dd70b7f223f826b17b0051ceee9", "097c492769258dd70b7f223f826b17b0051ceee9", "097c492769258dd70b7f223f826b17b0051ceee9" ]
[ "src/gluonts/nursery/spliced_binned_pareto/tcn.py", "test/distribution/test_mixture.py", "src/gluonts/dataset/stat.py", "src/gluonts/model/deepvar_hierarchical/_network.py" ]
[ "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Implementation taken and modified from\n# https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries, which was created\n# with the following license.\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\n# Implementation of causal CNNs partly taken and modified from\n# https://github.com/locuslab/TCN/blob/master/TCN/tcn.py, originally created\n# with the following license.\n\n# MIT License\n\n# Copyright (c) 2018 CMU Locus Lab\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport torch\n\n\nclass Chomp1d(torch.nn.Module):\n \"\"\"Removes leading or trailing elements of a time series.\n\n Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n batch size, `C` is the number of input channels, and `L` is the length of\n the input. Outputs a three-dimensional tensor (`B`, `C`, `L - s`) where `s`\n is the number of elements to remove.\n\n Args:\n chomp_size : Number of elements to remove.\n last : If True, removes the last elements in the time dimension,\n If False, removes the fist elements.\n \"\"\"\n\n def __init__(self, chomp_size: int, last: bool = True):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n self.last = last\n\n def forward(self, x):\n if self.last:\n x_chomped = x[:, :, : -self.chomp_size]\n else:\n x_chomped = x[:, :, self.chomp_size :]\n\n return x_chomped\n\n\nclass TCNBlock(torch.nn.Module):\n \"\"\"Temporal Convolutional Network block.\n\n Composed sequentially of two causal convolutions (with leaky ReLU activation functions),\n and a parallel residual connection.\n\n Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n batch size, `C` is the number of input channels, and `L` is the length of\n the input. Outputs a three-dimensional tensor (`B`, `C`, `L`).\n\n Args:\n in_channels : Number of input channels.\n out_channels : Number of output channels.\n kernel_size : Kernel size of the applied non-residual convolutions.\n dilation : Dilation parameter of non-residual convolutions.\n bias : If True, adds a learnable bias to the convolutions.\n fwd_time : If True, the network \"causal\" direction is from past to future (forward),\n if False, the relation is from future to past (backward).\n final : If True, the last activation function is disabled.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n dilation: int,\n bias: bool = True,\n fwd_time: bool = True,\n final: bool = False,\n ):\n\n super(TCNBlock, self).__init__()\n\n in_channels = int(in_channels)\n kernel_size = int(kernel_size)\n out_channels = int(out_channels)\n dilation = int(dilation)\n\n # Computes left padding so that the applied convolutions are causal\n padding = int((kernel_size - 1) * dilation)\n\n # First causal convolution\n conv1_pre = torch.nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n bias=bias,\n )\n conv1 = torch.nn.utils.weight_norm(conv1_pre)\n\n # The truncation makes the convolution causal\n chomp1 = Chomp1d(chomp_size=padding, last=fwd_time)\n\n relu1 = torch.nn.LeakyReLU()\n\n # Second causal convolution\n conv2_pre = torch.nn.Conv1d(\n in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n bias=bias,\n )\n conv2 = torch.nn.utils.weight_norm(conv2_pre)\n chomp2 = Chomp1d(padding)\n relu2 = torch.nn.LeakyReLU()\n\n # Causal network\n self.causal = torch.nn.Sequential(\n conv1, chomp1, relu1, conv2, chomp2, relu2\n )\n\n # Residual connection\n self.upordownsample = (\n torch.nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n )\n if in_channels != out_channels\n else None\n )\n\n # Final activation function\n self.activation = torch.nn.LeakyReLU() if final else None\n\n def forward(self, x):\n out_causal = self.causal(x)\n res = x if self.upordownsample is None else self.upordownsample(x)\n if self.activation is None:\n return out_causal + res\n else:\n return self.activation(out_causal + res)\n\n\nclass TCN(torch.nn.Module):\n \"\"\"Temporal Convolutional Network.\n\n Composed of a sequence of causal convolution blocks.\n\n Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n batch size, `C` is the number of input channels, and `L` is the length of\n the input. Outputs a three-dimensional tensor (`B`, `C_out`, `L`).\n\n Args:\n in_channels : Number of input channels.\n out_channels : Number of output channels.\n kernel_size : Kernel size of the applied non-residual convolutions.\n channels : Number of channels processed in the network and of output\n channels.\n layers : Depth of the network.\n bias : If True, adds a learnable bias to the convolutions.\n fwd_time : If True the network is the relation relation if from past to future (forward),\n if False, the relation from future to past (backward).\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n channels: int,\n layers: int,\n bias: bool = True,\n fwd_time: bool = True,\n ):\n\n super(TCN, self).__init__()\n\n layers = int(layers)\n\n net_layers = [] # List of sequential TCN blocks\n dilation_size = 1 # Initial dilation size\n\n for i in range(layers):\n in_channels_block = in_channels if i == 0 else channels\n net_layers.append(\n TCNBlock(\n in_channels=in_channels_block,\n out_channels=channels,\n kernel_size=kernel_size,\n dilation=dilation_size,\n bias=bias,\n fwd_time=fwd_time,\n final=False,\n )\n )\n dilation_size *= 2 # Doubles the dilation size at each step\n\n # Last layer\n net_layers.append(\n TCNBlock(\n in_channels=channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n dilation=dilation_size,\n bias=bias,\n fwd_time=fwd_time,\n final=True,\n )\n )\n\n self.network = torch.nn.Sequential(*net_layers)\n\n def forward(self, x):\n return self.network(x)\n", "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport mxnet as mx\nimport numpy as np\nimport pytest\n\nfrom gluonts.core.serde import dump_json, load_json\n\nfrom gluonts.gluonts_tqdm import tqdm\nfrom gluonts.model.common import NPArrayLike\nfrom gluonts.mx import Tensor\nfrom gluonts.mx.distribution import (\n Gamma,\n GammaOutput,\n Gaussian,\n GaussianOutput,\n GenPareto,\n GenParetoOutput,\n LaplaceOutput,\n MixtureDistribution,\n MixtureDistributionOutput,\n MultivariateGaussianOutput,\n StudentT,\n StudentTOutput,\n)\nfrom gluonts.mx.distribution.distribution import Distribution\nfrom gluonts.mx.distribution.distribution_output import DistributionOutput\nfrom gluonts.testutil import empirical_cdf\n\nserialize_fn_list = [lambda x: x, lambda x: load_json(dump_json(x))]\n\n\ndef plot_samples(s: Tensor, bins: int = 100) -> None:\n from matplotlib import pyplot as plt\n\n s = s.asnumpy()\n plt.hist(s, bins=bins)\n plt.show()\n\n\nBINS = np.linspace(-5, 5, 100)\n\n\ndef histogram(samples: NPArrayLike) -> np.ndarray:\n h, _ = np.histogram(samples, bins=BINS, density=True)\n return h\n\n\ndef diff(x: NPArrayLike, y: NPArrayLike) -> np.ndarray:\n return np.mean(np.abs(x - y))\n\n\nNUM_SAMPLES = 1_000\nNUM_SAMPLES_LARGE = 1_000_000\n\n\nSHAPE = (2, 1, 3)\n\nnp.random.seed(35120171)\nmx.random.seed(35120171)\n\n\[email protected](\n \"distr1, distr2, p\",\n [\n (\n Gaussian(\n mu=mx.nd.zeros(shape=SHAPE),\n sigma=1e-3 + 0.2 * mx.nd.ones(shape=SHAPE),\n ),\n Gaussian(\n mu=mx.nd.ones(shape=SHAPE),\n sigma=1e-3 + 0.1 * mx.nd.ones(shape=SHAPE),\n ),\n 0.2 * mx.nd.ones(shape=SHAPE),\n ),\n (\n StudentT(\n mu=mx.nd.ones(shape=SHAPE),\n sigma=1e-1 + mx.nd.zeros(shape=SHAPE),\n nu=mx.nd.zeros(shape=SHAPE) + 2.2,\n ),\n Gaussian(\n mu=-mx.nd.ones(shape=SHAPE),\n sigma=1e-1 + mx.nd.zeros(shape=SHAPE),\n ),\n mx.nd.random_uniform(shape=SHAPE),\n ),\n (\n Gaussian(\n mu=mx.nd.array([0.0]),\n sigma=mx.nd.array([1e-3 + 0.2]),\n ),\n Gaussian(\n mu=mx.nd.array([1.0]),\n sigma=mx.nd.array([1e-3 + 0.1]),\n ),\n mx.nd.array([0.2]),\n ),\n # TODO: add a multivariate case here\n ],\n)\[email protected](\"serialize_fn\", serialize_fn_list)\ndef test_mixture(\n distr1: Distribution, distr2: Distribution, p: Tensor, serialize_fn\n) -> None:\n # sample from component distributions, and select samples\n samples1 = distr1.sample(num_samples=NUM_SAMPLES_LARGE)\n samples2 = distr2.sample(num_samples=NUM_SAMPLES_LARGE)\n\n # TODO: for multivariate case, test should not sample elements from different components in the event_dim dimension\n rand = mx.nd.random.uniform(shape=(NUM_SAMPLES_LARGE, *p.shape))\n choice = (rand < p.expand_dims(axis=0)).broadcast_like(samples1)\n samples_ref = mx.nd.where(choice, samples1, samples2)\n\n # construct mixture distribution and sample from it\n\n mixture_probs = mx.nd.stack(p, 1.0 - p, axis=-1)\n\n mixture = MixtureDistribution(\n mixture_probs=mixture_probs, components=[distr1, distr2]\n )\n mixture = serialize_fn(mixture)\n\n samples_mix = mixture.sample(num_samples=NUM_SAMPLES_LARGE)\n\n # check that shapes are right\n\n assert (\n samples1.shape\n == samples2.shape\n == samples_mix.shape\n == samples_ref.shape\n )\n\n # check mean and stddev\n calc_mean = mixture.mean.asnumpy()\n calc_std = mixture.stddev.asnumpy()\n sample_mean = samples_mix.asnumpy().mean(axis=0)\n sample_std = samples_mix.asnumpy().std(axis=0)\n\n assert np.allclose(calc_mean, sample_mean, atol=1e-1)\n assert np.allclose(calc_std, sample_std, atol=2e-1)\n\n # check that histograms are close\n assert (\n diff(\n histogram(samples_mix.asnumpy()), histogram(samples_ref.asnumpy())\n )\n < 0.05\n )\n\n # can only calculated cdf for gaussians currently\n if isinstance(distr1, Gaussian) and isinstance(distr2, Gaussian):\n emp_cdf, edges = empirical_cdf(samples_mix.asnumpy())\n calc_cdf = mixture.cdf(mx.nd.array(edges)).asnumpy()\n assert np.allclose(calc_cdf[1:, :], emp_cdf, atol=1e-2)\n\n\[email protected](\n \"distribution_outputs\",\n [\n ((GaussianOutput(), GaussianOutput()),),\n ((GaussianOutput(), StudentTOutput(), LaplaceOutput()),),\n ((MultivariateGaussianOutput(3), MultivariateGaussianOutput(3)),),\n ],\n)\[email protected](\"serialize_fn\", serialize_fn_list)\ndef test_mixture_output(distribution_outputs, serialize_fn) -> None:\n mdo = MixtureDistributionOutput(*distribution_outputs)\n\n args_proj = mdo.get_args_proj()\n args_proj.initialize()\n\n input = mx.nd.ones(shape=(512, 30))\n\n distr_args = args_proj(input)\n d = mdo.distribution(distr_args)\n d = serialize_fn(d)\n\n samples = d.sample(num_samples=NUM_SAMPLES)\n\n sample = d.sample()\n\n assert samples.shape == (NUM_SAMPLES, *sample.shape)\n\n log_prob = d.log_prob(sample)\n\n assert log_prob.shape == d.batch_shape\n\n\nBATCH_SIZE = 10000\n\nzeros = mx.nd.zeros((BATCH_SIZE, 1))\nones = mx.nd.ones((BATCH_SIZE, 1))\n\nmu1 = 0.0\nmu2 = 1.0\nsigma1 = 0.2\nsigma2 = 0.1\n\np1 = 0.2\np2 = 1.0 - p1\n\nsamples1 = np.random.normal(mu1, scale=sigma1, size=(BATCH_SIZE, 1))\nsamples2 = np.random.normal(mu2, scale=sigma2, size=(BATCH_SIZE, 1))\nnp_samples = np.where(\n np.random.uniform(size=(BATCH_SIZE, 1)) > p1, samples2, samples1\n)\n\nEXPECTED_HIST = histogram(np_samples)\n\n\[email protected](\"Skip test that takes long time to run\")\ndef test_mixture_inference() -> None:\n mdo = MixtureDistributionOutput([GaussianOutput(), GaussianOutput()])\n\n args_proj = mdo.get_args_proj()\n args_proj.initialize()\n args_proj.hybridize()\n\n input = mx.nd.ones((BATCH_SIZE, 1))\n\n distr_args = args_proj(input)\n d = mdo.distribution(distr_args)\n\n # plot_samples(d.sample())\n\n trainer = mx.gluon.Trainer(\n args_proj.collect_params(), \"sgd\", {\"learning_rate\": 0.02}\n )\n\n mixture_samples = mx.nd.array(np_samples)\n\n N = 1000\n t = tqdm(list(range(N)))\n for i in t:\n with mx.autograd.record():\n distr_args = args_proj(input)\n d = mdo.distribution(distr_args)\n loss = d.loss(mixture_samples)\n loss.backward()\n loss_value = loss.mean().asnumpy()\n t.set_postfix({\"loss\": loss_value})\n trainer.step(BATCH_SIZE)\n\n distr_args = args_proj(input)\n d = mdo.distribution(distr_args)\n\n obtained_hist = histogram(d.sample().asnumpy())\n\n # uncomment to see histograms\n # pl.plot(obtained_hist)\n # pl.plot(EXPECTED_HIST)\n # pl.show()\n assert diff(obtained_hist, EXPECTED_HIST) < 0.5\n\n\ndef fit_mixture_distribution(\n x: Tensor,\n mdo: MixtureDistributionOutput,\n variate_dimensionality: int = 1,\n epochs: int = 1_000,\n):\n args_proj = mdo.get_args_proj()\n args_proj.initialize()\n args_proj.hybridize()\n\n input = mx.nd.ones((variate_dimensionality, 1))\n\n trainer = mx.gluon.Trainer(\n args_proj.collect_params(), \"sgd\", {\"learning_rate\": 0.02}\n )\n\n t = tqdm(list(range(epochs)))\n for _ in t:\n with mx.autograd.record():\n distr_args = args_proj(input)\n d = mdo.distribution(distr_args)\n loss = d.loss(x).mean()\n loss.backward()\n loss_value = loss.asnumpy()\n t.set_postfix({\"loss\": loss_value})\n trainer.step(1)\n\n distr_args = args_proj(input)\n d = mdo.distribution(distr_args)\n return d\n\n\[email protected](\n \"mixture_distribution, mixture_distribution_output, epochs\",\n [\n (\n MixtureDistribution(\n mixture_probs=mx.nd.array([[0.6, 0.4]]),\n components=[\n Gaussian(mu=mx.nd.array([-1.0]), sigma=mx.nd.array([0.2])),\n Gamma(alpha=mx.nd.array([2.0]), beta=mx.nd.array([0.5])),\n ],\n ),\n MixtureDistributionOutput([GaussianOutput(), GammaOutput()]),\n 2_000,\n ),\n (\n MixtureDistribution(\n mixture_probs=mx.nd.array([[0.7, 0.3]]),\n components=[\n Gaussian(mu=mx.nd.array([-1.0]), sigma=mx.nd.array([0.2])),\n GenPareto(xi=mx.nd.array([0.6]), beta=mx.nd.array([1.0])),\n ],\n ),\n MixtureDistributionOutput([GaussianOutput(), GenParetoOutput()]),\n 2_000,\n ),\n ],\n)\[email protected](\"serialize_fn\", serialize_fn_list)\[email protected](\"Skip test that takes long time to run\")\ndef test_inference_mixture_different_families(\n mixture_distribution: MixtureDistribution,\n mixture_distribution_output: MixtureDistributionOutput,\n epochs: int,\n serialize_fn,\n) -> None:\n # First sample from mixture distribution and then confirm the MLE are close to true parameters\n num_samples = 10_000\n samples = mixture_distribution.sample(num_samples=num_samples)\n variate_dimensionality = (\n mixture_distribution.components[0].args[0].shape[0]\n )\n fitted_dist = fit_mixture_distribution(\n samples,\n mixture_distribution_output,\n variate_dimensionality,\n epochs=epochs,\n )\n\n assert np.allclose(\n fitted_dist.mixture_probs.asnumpy(),\n mixture_distribution.mixture_probs.asnumpy(),\n atol=1e-1,\n ), f\"Mixing probability estimates {fitted_dist.mixture_probs.asnumpy()} too far from {mixture_distribution.mixture_probs.asnumpy()}\"\n for ci, c in enumerate(mixture_distribution.components):\n for ai, a in enumerate(c.args):\n assert np.allclose(\n fitted_dist.components[ci].args[ai].asnumpy(),\n a.asnumpy(),\n atol=1e-1,\n ), f\"Parameter {ai} estimate {fitted_dist.components[ci].args[ai].asnumpy()} too far from {c}\"\n\n\[email protected](\n \"distribution, values_outside_support, distribution_output\",\n [\n (\n Gamma(alpha=mx.nd.array([0.9]), beta=mx.nd.array([2.0])),\n mx.nd.array([-1.0]),\n GammaOutput(),\n ),\n (\n Gamma(alpha=mx.nd.array([0.9]), beta=mx.nd.array([2.0])),\n mx.nd.array([0.0]),\n GammaOutput(),\n ),\n (\n GenPareto(xi=mx.nd.array([1 / 3.0]), beta=mx.nd.array([1.0])),\n mx.nd.array([-1.0]),\n GenParetoOutput(),\n ),\n ],\n)\ndef test_mixture_logprob(\n distribution: Distribution,\n values_outside_support: Tensor,\n distribution_output: DistributionOutput,\n) -> None:\n\n assert np.all(\n ~np.isnan(distribution.log_prob(values_outside_support).asnumpy())\n ), f\"{distribution} should return -inf log_probs instead of NaNs\"\n\n p = 0.5\n gaussian = Gaussian(mu=mx.nd.array([0]), sigma=mx.nd.array([2.0]))\n mixture = MixtureDistribution(\n mixture_probs=mx.nd.array([[p, 1 - p]]),\n components=[gaussian, distribution],\n )\n lp = mixture.log_prob(values_outside_support)\n assert np.allclose(\n lp.asnumpy(),\n np.log(p) + gaussian.log_prob(values_outside_support).asnumpy(),\n atol=1e-6,\n ), f\"log_prob(x) should be equal to log(p)+gaussian.log_prob(x)\"\n\n fit_mixture = fit_mixture_distribution(\n values_outside_support,\n MixtureDistributionOutput([GaussianOutput(), distribution_output]),\n variate_dimensionality=1,\n epochs=3,\n )\n for ci, c in enumerate(fit_mixture.components):\n for ai, a in enumerate(c.args):\n assert ~np.isnan(a.asnumpy()), f\"NaN gradients led to {c}\"\n", "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport math\nfrom collections import defaultdict\nfrom typing import Any, List, NamedTuple, Optional, Set\n\nimport numpy as np\n\nfrom gluonts.core.component import validated\nfrom gluonts.dataset.field_names import FieldName\nfrom gluonts.exceptions import assert_data_error\nfrom gluonts.gluonts_tqdm import tqdm\n\n\nclass ScaleHistogram:\n \"\"\"\n Scale histogram of a timeseries dataset\n\n This counts the number of timeseries whose mean of absolute values is in\n the `[base ** i, base ** (i+1)]` range for all possible `i`.\n The number of entries with empty target is counted separately.\n\n Parameters\n ----------\n base\n Log-width of the histogram's buckets.\n bin_counts\n empty_target_count\n \"\"\"\n\n @validated()\n def __init__(\n self,\n base: float = 2.0,\n bin_counts: Optional[dict] = None,\n empty_target_count: int = 0,\n ) -> None:\n self._base = base\n self.bin_counts = defaultdict(\n int, {} if bin_counts is None else bin_counts\n )\n self.empty_target_count = empty_target_count\n self.__init_args__ = dict(\n base=self._base,\n bin_counts=self.bin_counts,\n empty_target_count=empty_target_count,\n )\n\n def bucket_index(self, target_values):\n assert len(target_values) > 0\n scale = np.mean(np.abs(target_values))\n scale_bin = int(math.log(scale + 1.0, self._base))\n return scale_bin\n\n def add(self, target_values):\n if len(target_values) > 0:\n bucket = self.bucket_index(target_values)\n self.bin_counts[bucket] = self.bin_counts[bucket] + 1\n else:\n self.empty_target_count = self.empty_target_count + 1\n\n def count(self, target):\n if len(target) > 0:\n return self.bin_counts[self.bucket_index(target)]\n else:\n return self.empty_target_count\n\n def __len__(self):\n return self.empty_target_count + sum(self.bin_counts.values())\n\n def __eq__(self, other):\n return (\n isinstance(other, ScaleHistogram)\n and self.bin_counts == other.bin_counts\n and self.empty_target_count == other.empty_target_count\n and self._base == other._base\n )\n\n def __str__(self):\n string_repr = [\n \"count of scales in {min}-{max}:{count}\".format(\n min=self._base ** base_index - 1,\n max=self._base ** (base_index + 1) - 1,\n count=count,\n )\n for base_index, count in sorted(\n self.bin_counts.items(), key=lambda x: x[0]\n )\n ]\n return \"\\n\".join(string_repr)\n\n\nclass DatasetStatistics(NamedTuple):\n \"\"\"\n A NamedTuple to store the statistics of a Dataset.\n \"\"\"\n\n integer_dataset: bool\n max_target: float\n mean_abs_target: float\n mean_target: float\n mean_target_length: float\n max_target_length: int\n min_target: float\n feat_static_real: List[Set[float]]\n feat_static_cat: List[Set[int]]\n num_past_feat_dynamic_real: Optional[int]\n num_feat_dynamic_real: Optional[int]\n num_feat_dynamic_cat: Optional[int]\n num_missing_values: int\n num_time_observations: int\n num_time_series: int\n scale_histogram: ScaleHistogram\n\n # DO NOT override the __str__ method, since we rely that we can load\n # DatasetStatistics again; i.e. stats == eval(str(stats))\n\n def __eq__(self, other):\n for x, y in zip(self._asdict().values(), other._asdict().values()):\n if isinstance(x, float):\n if abs(x - y) > abs(0.0001 * x):\n return False\n elif x != y:\n return False\n return True\n\n\n# TODO: reorganize modules to avoid circular dependency\n# TODO: and substitute Any with Dataset\ndef calculate_dataset_statistics(ts_dataset: Any) -> DatasetStatistics:\n \"\"\"\n Computes the statistics of a given Dataset.\n\n Parameters\n ----------\n ts_dataset\n Dataset of which to compute the statistics.\n\n Returns\n -------\n DatasetStatistics\n NamedTuple containing the statistics.\n \"\"\"\n num_time_observations = 0\n num_time_series = 0\n min_target = 1e20\n max_target = -1e20\n sum_target = 0.0\n sum_abs_target = 0.0\n integer_dataset = True\n observed_feat_static_cat: Optional[List[Set[int]]] = None\n observed_feat_static_real: Optional[List[Set[float]]] = None\n num_feat_static_real: Optional[int] = None\n num_feat_static_cat: Optional[int] = None\n num_past_feat_dynamic_real: Optional[int] = None\n num_feat_dynamic_real: Optional[int] = None\n num_feat_dynamic_cat: Optional[int] = None\n num_missing_values = 0\n\n scale_histogram = ScaleHistogram()\n\n with tqdm(enumerate(ts_dataset, start=1), total=len(ts_dataset)) as it:\n max_target_length = 0\n for num_time_series, ts in it:\n\n # TARGET\n target = ts[FieldName.TARGET]\n observed_target = target[~np.isnan(target)]\n num_observations = len(observed_target)\n\n if num_observations > 0:\n # 'nan' is handled in observed_target definition\n assert_data_error(\n np.all(np.isfinite(observed_target)),\n \"Target values have to be finite (e.g., not inf, -inf, \"\n \"or None) and cannot exceed single precision floating \"\n \"point range.\",\n )\n\n num_time_observations += num_observations\n max_target_length = max(num_observations, max_target_length)\n min_target = float(min(min_target, observed_target.min()))\n max_target = float(max(max_target, observed_target.max()))\n num_missing_values += int(np.isnan(target).sum())\n sum_target += float(observed_target.sum())\n sum_abs_target += float(np.abs(observed_target).sum())\n integer_dataset = integer_dataset and bool(\n np.all(np.mod(observed_target, 1) == 0)\n )\n\n scale_histogram.add(\n observed_target\n ) # after checks for inf and None\n\n # FEAT_STATIC_CAT\n feat_static_cat = (\n ts[FieldName.FEAT_STATIC_CAT]\n if FieldName.FEAT_STATIC_CAT in ts\n else []\n )\n\n if num_feat_static_cat is None:\n num_feat_static_cat = len(feat_static_cat)\n observed_feat_static_cat = [\n set() for _ in range(num_feat_static_cat)\n ]\n\n # needed to type check\n assert num_feat_static_cat is not None\n assert observed_feat_static_cat is not None\n\n assert_data_error(\n num_feat_static_cat == len(feat_static_cat),\n \"Not all feat_static_cat vectors have the same length {} != {}.\",\n num_feat_static_cat,\n len(feat_static_cat),\n )\n for i, c in enumerate(feat_static_cat):\n observed_feat_static_cat[i].add(c)\n\n # FEAT_STATIC_REAL\n feat_static_real = (\n ts[FieldName.FEAT_STATIC_REAL]\n if FieldName.FEAT_STATIC_REAL in ts\n else []\n )\n\n if num_feat_static_real is None:\n num_feat_static_real = len(feat_static_real)\n observed_feat_static_real = [\n set() for _ in range(num_feat_static_real)\n ]\n\n # needed to type check\n assert num_feat_static_real is not None\n assert observed_feat_static_real is not None\n\n assert_data_error(\n num_feat_static_real == len(feat_static_real),\n \"Not all feat_static_real vectors have the same length {} != {}.\",\n num_feat_static_real,\n len(feat_static_real),\n )\n for i, c in enumerate(feat_static_real):\n observed_feat_static_real[i].add(c)\n\n # FEAT_DYNAMIC_CAT\n feat_dynamic_cat = (\n ts[FieldName.FEAT_DYNAMIC_CAT]\n if FieldName.FEAT_DYNAMIC_CAT in ts\n else None\n )\n\n if feat_dynamic_cat is None:\n # feat_dynamic_cat not found, check it was the first ts we encounter or\n # that feat_dynamic_cat were seen before\n assert_data_error(\n num_feat_dynamic_cat is None or num_feat_dynamic_cat == 0,\n \"feat_dynamic_cat was found for some instances but not others.\",\n )\n num_feat_dynamic_cat = 0\n else:\n if num_feat_dynamic_cat is None:\n # first num_feat_dynamic_cat found\n num_feat_dynamic_cat = len(feat_dynamic_cat)\n else:\n assert_data_error(\n num_feat_dynamic_cat == len(feat_dynamic_cat),\n \"Found instances with different number of features in \"\n \"feat_dynamic_cat, found one with {} and another with {}.\",\n num_feat_dynamic_cat,\n len(feat_dynamic_cat),\n )\n\n assert_data_error(\n np.all(np.isfinite(feat_dynamic_cat)),\n \"Features values have to be finite and cannot exceed single \"\n \"precision floating point range.\",\n )\n num_feat_dynamic_cat_time_steps = len(feat_dynamic_cat[0])\n assert_data_error(\n num_feat_dynamic_cat_time_steps == len(target),\n \"Each feature in feat_dynamic_cat has to have the same length as \"\n \"the target. Found an instance with feat_dynamic_cat of length {} \"\n \"and a target of length {}.\",\n num_feat_dynamic_cat_time_steps,\n len(target),\n )\n\n # FEAT_DYNAMIC_REAL\n feat_dynamic_real = None\n if FieldName.FEAT_DYNAMIC_REAL in ts:\n feat_dynamic_real = ts[FieldName.FEAT_DYNAMIC_REAL]\n elif FieldName.FEAT_DYNAMIC_REAL_LEGACY in ts:\n feat_dynamic_real = ts[FieldName.FEAT_DYNAMIC_REAL_LEGACY]\n\n if feat_dynamic_real is None:\n # feat_dynamic_real not found, check it was the first ts we encounter or\n # that feat_dynamic_real were seen before\n assert_data_error(\n num_feat_dynamic_real is None\n or num_feat_dynamic_real == 0,\n \"feat_dynamic_real was found for some instances but not others.\",\n )\n num_feat_dynamic_real = 0\n else:\n if num_feat_dynamic_real is None:\n # first num_feat_dynamic_real found\n num_feat_dynamic_real = len(feat_dynamic_real)\n else:\n assert_data_error(\n num_feat_dynamic_real == len(feat_dynamic_real),\n \"Found instances with different number of features in \"\n \"feat_dynamic_real, found one with {} and another with {}.\",\n num_feat_dynamic_real,\n len(feat_dynamic_real),\n )\n\n assert_data_error(\n np.all(np.isfinite(feat_dynamic_real)),\n \"Features values have to be finite and cannot exceed single \"\n \"precision floating point range.\",\n )\n num_feat_dynamic_real_time_steps = len(feat_dynamic_real[0])\n assert_data_error(\n num_feat_dynamic_real_time_steps == len(target),\n \"Each feature in feat_dynamic_real has to have the same length as \"\n \"the target. Found an instance with feat_dynamic_real of length {} \"\n \"and a target of length {}.\",\n num_feat_dynamic_real_time_steps,\n len(target),\n )\n\n # PAST_FEAT_DYNAMIC_REAL\n past_feat_dynamic_real = None\n if FieldName.PAST_FEAT_DYNAMIC_REAL in ts:\n past_feat_dynamic_real = ts[FieldName.PAST_FEAT_DYNAMIC_REAL]\n\n if past_feat_dynamic_real is None:\n # past_feat_dynamic_real not found, check it was the first ts we encounter or\n # that past_feat_dynamic_real were seen before\n assert_data_error(\n num_past_feat_dynamic_real is None\n or num_past_feat_dynamic_real == 0,\n \"past_feat_dynamic_real was found for some instances but not others.\",\n )\n num_past_feat_dynamic_real = 0\n else:\n if num_past_feat_dynamic_real is None:\n # first num_past_feat_dynamic_real found\n num_past_feat_dynamic_real = len(past_feat_dynamic_real)\n else:\n assert_data_error(\n num_past_feat_dynamic_real\n == len(past_feat_dynamic_real),\n \"Found instances with different number of features in \"\n \"past_feat_dynamic_real, found one with {} and another with {}.\",\n num_past_feat_dynamic_real,\n len(past_feat_dynamic_real),\n )\n\n assert_data_error(\n np.all(np.isfinite(past_feat_dynamic_real)),\n \"Features values have to be finite and cannot exceed single \"\n \"precision floating point range.\",\n )\n\n assert_data_error(num_time_series > 0, \"Time series dataset is empty!\")\n assert_data_error(\n num_time_observations > 0,\n \"Only empty time series found in the dataset!\",\n )\n\n # note this require the above assumption to avoid a division by zero\n # runtime error\n mean_target_length = num_time_observations / num_time_series\n\n # note this require the above assumption to avoid a division by zero\n # runtime error\n mean_target = sum_target / num_time_observations\n mean_abs_target = sum_abs_target / num_time_observations\n\n integer_dataset = integer_dataset and min_target >= 0.0\n\n assert len(scale_histogram) == num_time_series\n\n return DatasetStatistics(\n integer_dataset=integer_dataset,\n max_target=max_target,\n mean_abs_target=mean_abs_target,\n mean_target=mean_target,\n mean_target_length=mean_target_length,\n max_target_length=max_target_length,\n min_target=min_target,\n num_missing_values=num_missing_values,\n feat_static_real=observed_feat_static_real\n if observed_feat_static_real\n else [],\n feat_static_cat=observed_feat_static_cat\n if observed_feat_static_cat\n else [],\n num_past_feat_dynamic_real=num_past_feat_dynamic_real,\n num_feat_dynamic_real=num_feat_dynamic_real,\n num_feat_dynamic_cat=num_feat_dynamic_cat,\n num_time_observations=num_time_observations,\n num_time_series=num_time_series,\n scale_histogram=scale_histogram,\n )\n", "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nfrom typing import List, Optional\nfrom itertools import product\n\n# Third-party imports\nimport mxnet as mx\nimport numpy as np\n\n# First-party imports\nfrom gluonts.core.component import validated\nfrom gluonts.mx import Tensor\nfrom gluonts.mx.distribution import Distribution, DistributionOutput\nfrom gluonts.mx.distribution import EmpiricalDistribution\nfrom gluonts.mx.util import assert_shape\nfrom gluonts.mx.distribution import LowrankMultivariateGaussian\nfrom gluonts.model.deepvar._network import (\n DeepVARNetwork,\n DeepVARTrainingNetwork,\n DeepVARPredictionNetwork,\n)\n\n\ndef reconcile_samples(\n reconciliation_mat: Tensor,\n samples: Tensor,\n seq_axis: Optional[List] = None,\n) -> Tensor:\n \"\"\"\n Computes coherent samples by multiplying unconstrained `samples` with `reconciliation_mat`.\n\n Parameters\n ----------\n reconciliation_mat\n Shape: (target_dim, target_dim)\n samples\n Unconstrained samples\n Shape: `(*batch_shape, target_dim)`\n During training: (num_samples, batch_size, seq_len, target_dim)\n During prediction: (num_parallel_samples x batch_size, seq_len, target_dim)\n seq_axis\n Specifies the list of axes that should be reconciled sequentially.\n By default, all axes are processeed in parallel.\n\n Returns\n -------\n Tensor, shape same as that of `samples`\n Coherent samples\n\n\n \"\"\"\n if not seq_axis:\n return mx.nd.dot(samples, reconciliation_mat, transpose_b=True)\n else:\n num_dims = len(samples.shape)\n\n last_dim_in_seq_axis = num_dims - 1 in seq_axis or -1 in seq_axis\n assert (\n not last_dim_in_seq_axis\n ), f\"The last dimension cannot be processed iteratively. Remove axis {num_dims - 1} (or -1) from `seq_axis`.\"\n\n # In this case, reconcile samples by going over each index in `seq_axis` iteratively.\n # Note that `seq_axis` can be more than one dimension.\n num_seq_axes = len(seq_axis)\n\n # bring the axes to iterate in the beginning\n samples = mx.nd.moveaxis(samples, seq_axis, list(range(num_seq_axes)))\n\n seq_axes_sizes = samples.shape[:num_seq_axes]\n out = [\n mx.nd.dot(samples[idx], reconciliation_mat, transpose_b=True)\n # get the sequential index from the cross-product of their sizes.\n for idx in product(*[range(size) for size in seq_axes_sizes])\n ]\n\n # put the axis in the correct order again\n out = mx.nd.concat(*out, dim=0).reshape(samples.shape)\n out = mx.nd.moveaxis(out, list(range(len(seq_axis))), seq_axis)\n return out\n\n\ndef reconciliation_error(A: Tensor, samples: Tensor) -> float:\n r\"\"\"\n Computes the maximum relative reconciliation error among all the aggregated time series\n\n .. math::\n\n \\max_i \\frac{|y_i - s_i|} {|y_i|},\n\n where :math:`i` refers to the aggregated time series index, :math:`y_i` is the (direct) forecast obtained for\n the :math:`i^{th}` time series and :math:`s_i` is its aggregated forecast obtained by summing the corresponding\n bottom-level forecasts. If :math:`y_i` is zero, then the absolute difference, :math:`|s_i|`, is used instead.\n\n This can be comupted as follows given the constraint matrix A:\n\n .. math::\n\n \\max \\frac{|A \\times samples|} {|samples[:r]|},\n\n where :math:`r` is the number aggregated time series.\n\n Parameters\n ----------\n A\n The constraint matrix A in the equation: Ay = 0 (y being the values/forecasts of all time series in the\n hierarchy).\n samples\n Samples. Shape: `(*batch_shape, target_dim)`.\n\n Returns\n -------\n Float\n Reconciliation error\n\n\n \"\"\"\n\n num_agg_ts = A.shape[0]\n forecasts_agg_ts = samples.slice_axis(\n axis=-1, begin=0, end=num_agg_ts\n ).asnumpy()\n\n abs_err = mx.nd.abs(mx.nd.dot(samples, A, transpose_b=True)).asnumpy()\n rel_err = np.where(\n forecasts_agg_ts == 0,\n abs_err,\n abs_err / np.abs(forecasts_agg_ts),\n )\n\n return np.max(rel_err)\n\n\nclass DeepVARHierarchicalNetwork(DeepVARNetwork):\n @validated()\n def __init__(\n self,\n M,\n A,\n num_layers: int,\n num_cells: int,\n cell_type: str,\n history_length: int,\n context_length: int,\n prediction_length: int,\n distr_output: DistributionOutput,\n dropout_rate: float,\n lags_seq: List[int],\n target_dim: int,\n cardinality: List[int] = [1],\n embedding_dimension: int = 1,\n scaling: bool = True,\n seq_axis: List[int] = None,\n **kwargs,\n ) -> None:\n super().__init__(\n num_layers=num_layers,\n num_cells=num_cells,\n cell_type=cell_type,\n history_length=history_length,\n context_length=context_length,\n prediction_length=prediction_length,\n distr_output=distr_output,\n dropout_rate=dropout_rate,\n lags_seq=lags_seq,\n target_dim=target_dim,\n cardinality=cardinality,\n embedding_dimension=embedding_dimension,\n scaling=scaling,\n **kwargs,\n )\n\n self.M = M\n self.A = A\n self.seq_axis = seq_axis\n\n def get_samples_for_loss(self, distr: Distribution) -> Tensor:\n \"\"\"\n Get samples to compute the final loss. These are samples directly drawn from the given `distr` if coherence is\n not enforced yet; otherwise the drawn samples are reconciled.\n\n Parameters\n ----------\n distr\n Distribution instances\n\n Returns\n -------\n samples\n Tensor with shape (num_samples, batch_size, seq_len, target_dim)\n\n \"\"\"\n samples = distr.sample_rep(\n num_samples=self.num_samples_for_loss, dtype=\"float32\"\n )\n\n # Determine which epoch we are currently in.\n self.batch_no += 1\n epoch_no = self.batch_no // self.num_batches_per_epoch + 1\n epoch_frac = epoch_no / self.epochs\n\n if (\n self.coherent_train_samples\n and epoch_frac > self.warmstart_epoch_frac\n ):\n coherent_samples = reconcile_samples(\n reconciliation_mat=self.M,\n samples=samples,\n seq_axis=self.seq_axis,\n )\n assert_shape(coherent_samples, samples.shape)\n return coherent_samples\n else:\n return samples\n\n def loss(self, F, target: Tensor, distr: Distribution) -> Tensor:\n \"\"\"\n Computes loss given the output of the network in the form of distribution.\n The loss is given by:\n\n `self.CRPS_weight` * `loss_CRPS` + `self.likelihood_weight` * `neg_likelihoods`,\n\n where\n * `loss_CRPS` is computed on the samples drawn from the predicted `distr` (optionally after reconciling them),\n * `neg_likelihoods` are either computed directly using the predicted `distr` or from the estimated\n distribution based on (coherent) samples, depending on the `sample_LH` flag.\n\n Parameters\n ----------\n F\n target\n Tensor with shape (batch_size, seq_len, target_dim)\n distr\n Distribution instances\n\n Returns\n -------\n Loss\n Tensor with shape (batch_size, seq_length, 1)\n\n \"\"\"\n\n # Sample from the predicted distribution if we are computing CRPS loss or likelihood using the distribution\n # based on (coherent) samples.\n # Samples shape: (num_samples, batch_size, seq_len, target_dim)\n if self.sample_LH or (self.CRPS_weight > 0.0):\n samples = self.get_samples_for_loss(distr=distr)\n\n if self.sample_LH:\n # Estimate the distribution based on (coherent) samples.\n distr = LowrankMultivariateGaussian.fit(F, samples=samples, rank=0)\n\n neg_likelihoods = -distr.log_prob(target).expand_dims(axis=-1)\n\n loss_CRPS = F.zeros_like(neg_likelihoods)\n if self.CRPS_weight > 0.0:\n loss_CRPS = (\n EmpiricalDistribution(samples=samples, event_dim=1)\n .crps_univariate(x=target)\n .expand_dims(axis=-1)\n )\n\n return (\n self.CRPS_weight * loss_CRPS\n + self.likelihood_weight * neg_likelihoods\n )\n\n def post_process_samples(self, samples: Tensor) -> Tensor:\n \"\"\"\n Reconcile samples if `coherent_pred_samples` is True.\n\n Parameters\n ----------\n samples\n Tensor of shape (num_parallel_samples*batch_size, 1, target_dim)\n\n Returns\n -------\n Tensor of coherent samples.\n\n \"\"\"\n if not self.coherent_pred_samples:\n return samples\n else:\n coherent_samples = reconcile_samples(\n reconciliation_mat=self.M,\n samples=samples,\n seq_axis=self.seq_axis,\n )\n assert_shape(coherent_samples, samples.shape)\n\n # assert that A*X_proj ~ 0\n if self.assert_reconciliation:\n assert (\n reconciliation_error(self.A, samples=coherent_samples)\n < self.reconciliation_tol\n )\n\n return coherent_samples\n\n\nclass DeepVARHierarchicalTrainingNetwork(\n DeepVARHierarchicalNetwork, DeepVARTrainingNetwork\n):\n def __init__(\n self,\n num_samples_for_loss: int,\n likelihood_weight: float,\n CRPS_weight: float,\n coherent_train_samples: bool,\n warmstart_epoch_frac: float,\n epochs: float,\n num_batches_per_epoch: float,\n sample_LH: bool,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.num_samples_for_loss = num_samples_for_loss\n self.likelihood_weight = likelihood_weight\n self.CRPS_weight = CRPS_weight\n self.coherent_train_samples = coherent_train_samples\n self.warmstart_epoch_frac = warmstart_epoch_frac\n self.epochs = epochs\n self.num_batches_per_epoch = num_batches_per_epoch\n self.batch_no = 0\n self.sample_LH = sample_LH\n\n # Assert CRPS_weight, likelihood_weight, and coherent_train_samples have harmonious values\n assert self.CRPS_weight >= 0.0, \"CRPS weight must be non-negative\"\n assert (\n self.likelihood_weight >= 0.0\n ), \"Likelihood weight must be non-negative!\"\n assert (\n self.likelihood_weight + self.CRPS_weight > 0.0\n ), \"At least one of CRPS or likelihood weights must be non-zero\"\n if self.CRPS_weight == 0.0 and self.coherent_train_samples:\n assert \"No sampling being performed. coherent_train_samples flag is ignored\"\n if not self.sample_LH == 0.0 and self.coherent_train_samples:\n assert \"No sampling being performed. coherent_train_samples flag is ignored\"\n if self.likelihood_weight == 0.0 and self.sample_LH:\n assert (\n \"likelihood_weight is 0 but sample likelihoods are still being calculated. \"\n \"Set sample_LH=0 when likelihood_weight=0\"\n )\n\n\nclass DeepVARHierarchicalPredictionNetwork(\n DeepVARHierarchicalNetwork, DeepVARPredictionNetwork\n):\n @validated()\n def __init__(\n self,\n num_parallel_samples: int,\n assert_reconciliation: bool,\n coherent_pred_samples: bool,\n reconciliation_tol: float,\n **kwargs,\n ) -> None:\n super().__init__(num_parallel_samples=num_parallel_samples, **kwargs)\n self.coherent_pred_samples = coherent_pred_samples\n self.assert_reconciliation = assert_reconciliation\n self.reconciliation_tol = reconciliation_tol\n" ]
[ [ "torch.nn.utils.weight_norm", "torch.nn.Sequential", "torch.nn.LeakyReLU", "torch.nn.Conv1d" ], [ "numpy.log", "numpy.allclose", "numpy.random.seed", "numpy.linspace", "numpy.abs", "numpy.random.normal", "numpy.random.uniform", "matplotlib.pyplot.show", "numpy.histogram", "matplotlib.pyplot.hist" ], [ "numpy.isnan", "numpy.mod", "numpy.abs", "numpy.isfinite" ], [ "numpy.max", "numpy.abs" ] ]
marcbue/spikeinterface
[ "d3462eeabcb9f0b9816004dd47355e40f4de1ac5", "d3462eeabcb9f0b9816004dd47355e40f4de1ac5", "d3462eeabcb9f0b9816004dd47355e40f4de1ac5" ]
[ "spikeinterface/comparison/groundtruthstudy.py", "spikeinterface/core/tests/test_baserecording.py", "spikeinterface/extractors/mdaextractors.py" ]
[ "from pathlib import Path\nimport os\nimport shutil\nimport numpy as np\nimport pandas as pd\n\nfrom spikeinterface.core import load_extractor\nfrom spikeinterface.extractors import NpzSortingExtractor\nfrom spikeinterface.sorters import sorter_dict, run_sorters\n\nfrom spikeinterface import WaveformExtractor\nfrom spikeinterface.toolkit import compute_quality_metrics\n\nfrom .comparisontools import _perf_keys\nfrom .groundtruthcomparison import compare_sorter_to_ground_truth\n\nfrom .studytools import (setup_comparison_study, get_rec_names, get_recordings,\n iter_output_folders, iter_computed_names, iter_computed_sorting, collect_run_times)\n\n\nclass GroundTruthStudy:\n def __init__(self, study_folder=None):\n self.study_folder = Path(study_folder)\n self._is_scanned = False\n self.computed_names = None\n self.rec_names = None\n self.sorter_names = None\n\n self.scan_folder()\n\n self.comparisons = None\n self.exhaustive_gt = None\n\n def __repr__(self):\n t = 'Groud truth study\\n'\n t += ' ' + str(self.study_folder) + '\\n'\n t += ' recordings: {} {}\\n'.format(len(self.rec_names), self.rec_names)\n if len(self.sorter_names):\n t += ' sorters: {} {}\\n'.format(len(self.sorter_names), self.sorter_names)\n\n return t\n\n def scan_folder(self):\n self.rec_names = get_rec_names(self.study_folder)\n # scan computed names\n self.computed_names = list(iter_computed_names(self.study_folder)) # list of pair (rec_name, sorter_name)\n self.sorter_names = np.unique([e for _, e in iter_computed_names(self.study_folder)]).tolist()\n self._is_scanned = True\n\n @classmethod\n def create(cls, study_folder, gt_dict, **job_kwargs):\n setup_comparison_study(study_folder, gt_dict, **job_kwargs)\n return cls(study_folder)\n\n def run_sorters(self, sorter_list, mode_if_folder_exists='keep', **kwargs):\n\n sorter_folders = self.study_folder / 'sorter_folders'\n recording_dict = get_recordings(self.study_folder)\n\n run_sorters(sorter_list, recording_dict, sorter_folders,\n with_output=False, mode_if_folder_exists=mode_if_folder_exists, **kwargs)\n\n # results are copied so the heavy sorter_folders can be removed\n self.copy_sortings()\n\n def _check_rec_name(self, rec_name):\n if not self._is_scanned:\n self.scan_folder()\n if len(self.rec_names) > 1 and rec_name is None:\n raise Exception(\"Pass 'rec_name' parameter to select which recording to use.\")\n elif len(self.rec_names) == 1:\n rec_name = self.rec_names[0]\n else:\n rec_name = self.rec_names[self.rec_names.index(rec_name)]\n return rec_name\n\n def get_ground_truth(self, rec_name=None):\n rec_name = self._check_rec_name(rec_name)\n sorting = load_extractor(self.study_folder / 'ground_truth' / rec_name)\n return sorting\n\n def get_recording(self, rec_name=None):\n rec_name = self._check_rec_name(rec_name)\n rec = load_extractor(self.study_folder / 'raw_files' / rec_name)\n return rec\n\n def get_sorting(self, sort_name, rec_name=None):\n rec_name = self._check_rec_name(rec_name)\n\n selected_sorting = None\n if sort_name in self.sorter_names:\n for r_name, sorter_name, sorting in iter_computed_sorting(self.study_folder):\n if sort_name == sorter_name and r_name == rec_name:\n selected_sorting = sorting\n return selected_sorting\n\n def copy_sortings(self):\n\n sorter_folders = self.study_folder / 'sorter_folders'\n sorting_folders = self.study_folder / 'sortings'\n log_olders = self.study_folder / 'sortings' / 'run_log'\n\n log_olders.mkdir(parents=True, exist_ok=True)\n\n for rec_name, sorter_name, output_folder in iter_output_folders(sorter_folders):\n SorterClass = sorter_dict[sorter_name]\n fname = rec_name + '[#]' + sorter_name\n npz_filename = sorting_folders / (fname + '.npz')\n\n sorting = SorterClass.get_result_from_folder(output_folder)\n try:\n sorting = SorterClass.get_result_from_folder(output_folder)\n NpzSortingExtractor.write_sorting(sorting, npz_filename)\n except:\n if npz_filename.is_file():\n npz_filename.unlink()\n if (output_folder / 'spikeinterface_log.json').is_file():\n shutil.copyfile(output_folder / 'spikeinterface_log.json',\n sorting_folders / 'run_log' / (fname + '.json'))\n\n self.scan_folder()\n\n def run_comparisons(self, exhaustive_gt=False, **kwargs):\n self.comparisons = {}\n for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder):\n gt_sorting = self.get_ground_truth(rec_name)\n sc = compare_sorter_to_ground_truth(gt_sorting, sorting, exhaustive_gt=exhaustive_gt, **kwargs)\n self.comparisons[(rec_name, sorter_name)] = sc\n self.exhaustive_gt = exhaustive_gt\n\n def aggregate_run_times(self):\n return collect_run_times(self.study_folder)\n\n def aggregate_performance_by_unit(self):\n assert self.comparisons is not None, 'run_comparisons first'\n\n perf_by_unit = []\n for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder):\n comp = self.comparisons[(rec_name, sorter_name)]\n\n perf = comp.get_performance(method='by_unit', output='pandas')\n perf['rec_name'] = rec_name\n perf['sorter_name'] = sorter_name\n perf = perf.reset_index()\n perf_by_unit.append(perf)\n\n perf_by_unit = pd.concat(perf_by_unit)\n perf_by_unit = perf_by_unit.set_index(['rec_name', 'sorter_name', 'gt_unit_id'])\n\n return perf_by_unit\n\n def aggregate_count_units(self, well_detected_score=None, redundant_score=None, overmerged_score=None):\n assert self.comparisons is not None, 'run_comparisons first'\n\n index = pd.MultiIndex.from_tuples(self.computed_names, names=['rec_name', 'sorter_name'])\n\n count_units = pd.DataFrame(index=index, columns=['num_gt', 'num_sorter', 'num_well_detected', 'num_redundant',\n 'num_overmerged'])\n\n if self.exhaustive_gt:\n count_units['num_false_positive'] = None\n count_units['num_bad'] = None\n\n for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder):\n gt_sorting = self.get_ground_truth(rec_name)\n comp = self.comparisons[(rec_name, sorter_name)]\n\n count_units.loc[(rec_name, sorter_name), 'num_gt'] = len(gt_sorting.get_unit_ids())\n count_units.loc[(rec_name, sorter_name), 'num_sorter'] = len(sorting.get_unit_ids())\n count_units.loc[(rec_name, sorter_name), 'num_well_detected'] = \\\n comp.count_well_detected_units(well_detected_score)\n if self.exhaustive_gt:\n count_units.loc[(rec_name, sorter_name), 'num_overmerged'] = \\\n comp.count_overmerged_units(overmerged_score)\n count_units.loc[(rec_name, sorter_name), 'num_redundant'] = \\\n comp.count_redundant_units(redundant_score)\n count_units.loc[(rec_name, sorter_name), 'num_false_positive'] = \\\n comp.count_false_positive_units(redundant_score)\n count_units.loc[(rec_name, sorter_name), 'num_bad'] = comp.count_bad_units()\n\n return count_units\n\n def aggregate_dataframes(self, copy_into_folder=True, **karg_thresh):\n dataframes = {}\n dataframes['run_times'] = self.aggregate_run_times().reset_index()\n perfs = self.aggregate_performance_by_unit()\n\n dataframes['perf_by_unit'] = perfs.reset_index()\n dataframes['count_units'] = self.aggregate_count_units(**karg_thresh).reset_index()\n\n if copy_into_folder:\n tables_folder = self.study_folder / 'tables'\n tables_folder.mkdir(parents=True, exist_ok=True)\n\n for name, df in dataframes.items():\n df.to_csv(str(tables_folder / (name + '.csv')), sep='\\t', index=False)\n\n return dataframes\n\n def compute_metrics(self, rec_name, metric_names=['snr'],\n ms_before=3., ms_after=4., max_spikes_per_unit=500,\n n_jobs=-1, total_memory='1G', **snr_kwargs):\n\n rec = self.get_recording(rec_name)\n gt_sorting = self.get_ground_truth(rec_name)\n\n # waveform extractor\n waveform_folder = self.study_folder / 'metrics' / f'waveforms_{rec_name}'\n if waveform_folder.is_dir():\n shutil.rmtree(waveform_folder)\n we = WaveformExtractor.create(rec, gt_sorting, waveform_folder)\n we.set_params(ms_before=ms_before, ms_after=ms_after, max_spikes_per_unit=max_spikes_per_unit)\n we.run(n_jobs=n_jobs, total_memory=total_memory)\n\n # metrics\n metrics = compute_quality_metrics(we, metric_names=metric_names)\n filename = self.study_folder / 'metrics' / f'metrics _{rec_name}.txt'\n metrics.to_csv(filename, sep='\\t', index=True)\n\n return metrics\n\n def get_metrics(self, rec_name=None, **metric_kwargs):\n \"\"\"\n Load or compute units metrics for a given recording.\n \"\"\"\n rec_name = self._check_rec_name(rec_name)\n metrics_folder = self.study_folder / 'metrics'\n metrics_folder.mkdir(parents=True, exist_ok=True)\n\n filename = self.study_folder / 'metrics' / f'metrics _{rec_name}.txt'\n if filename.is_file():\n metrics = pd.read_csv(filename, sep='\\t', index_col=0)\n gt_sorting = self.get_ground_truth(rec_name)\n metrics.index = gt_sorting.unit_ids\n else:\n metrics = self.compute_metrics(rec_name, **metric_kwargs)\n\n metrics.index.name = 'unit_id'\n # add rec name columns \n metrics['rec_name'] = rec_name\n\n return metrics\n\n def get_units_snr(self, rec_name=None, **metric_kwargs):\n \"\"\"\n \n \"\"\"\n metric = self.get_metrics(rec_name=rec_name, **metric_kwargs)\n return metric['snr']\n\n def concat_all_snr(self):\n snr = []\n for rec_name in self.rec_names:\n df = self.get_units_snr(rec_name)\n df = df.reset_index()\n snr.append(df)\n snr = pd.concat(snr)\n snr = snr.set_index(['rec_name', 'gt_unit_id'])\n return snr\n", "\"\"\"\ntest for BaseRecording are done with BinaryRecordingExtractor.\nbut check only for BaseRecording general methods.\n\"\"\"\nimport shutil\nfrom pathlib import Path\nimport pytest\nimport numpy as np\n\nfrom probeinterface import Probe\n\nfrom spikeinterface.core import BinaryRecordingExtractor, NumpyRecording, load_extractor\nfrom spikeinterface.core.base import BaseExtractor\n\n\n# file and folder created\n\n\ndef _clean_all():\n cache_folder = './my_cache_folder'\n if Path(cache_folder).exists():\n shutil.rmtree(cache_folder)\n\n\ndef setup_module():\n _clean_all()\n\n\ndef teardown_module():\n _clean_all()\n\n\ndef test_BaseRecording():\n num_seg = 2\n num_chan = 3\n num_samples = 30\n sampling_frequency = 10000\n dtype = 'int16'\n\n file_paths = [f'test_base_recording_{i}.raw' for i in range(num_seg)]\n for i in range(num_seg):\n a = np.memmap(file_paths[i], dtype=dtype, mode='w+', shape=(num_samples, num_chan))\n a[:] = np.random.randn(*a.shape).astype(dtype)\n rec = BinaryRecordingExtractor(file_paths, sampling_frequency, num_chan, dtype)\n\n assert rec.get_num_segments() == 2\n assert rec.get_num_channels() == 3\n\n assert np.all(rec.ids_to_indices([0, 1, 2]) == [0, 1, 2])\n assert np.all(rec.ids_to_indices([0, 1, 2], prefer_slice=True) == slice(0, 3, None))\n\n # annotations / properties\n rec.annotate(yep='yop')\n assert rec.get_annotation('yep') == 'yop'\n\n rec.set_channel_groups([0, 0, 1])\n\n rec.set_property('quality', [1., 3.3, np.nan])\n values = rec.get_property('quality')\n assert np.all(values[:2] == [1., 3.3, ])\n\n # dump/load dict\n d = rec.to_dict()\n rec2 = BaseExtractor.from_dict(d)\n rec3 = load_extractor(d)\n\n # dump/load json\n rec.dump_to_json('test_BaseRecording.json')\n rec2 = BaseExtractor.load('test_BaseRecording.json')\n rec3 = load_extractor('test_BaseRecording.json')\n\n # dump/load pickle\n rec.dump_to_pickle('test_BaseRecording.pkl')\n rec2 = BaseExtractor.load('test_BaseRecording.pkl')\n rec3 = load_extractor('test_BaseRecording.pkl')\n\n # dump/load dict - relative\n d = rec.to_dict(relative_to=\".\")\n rec2 = BaseExtractor.from_dict(d, base_folder=\".\")\n rec3 = load_extractor(d, base_folder=\".\")\n\n # dump/load json\n rec.dump_to_json('test_BaseRecording_rel.json', relative_to=\".\")\n rec2 = BaseExtractor.load('test_BaseRecording_rel.json', base_folder=\".\")\n rec3 = load_extractor('test_BaseRecording_rel.json', base_folder=\".\")\n\n # cache to binary\n cache_folder = Path('./my_cache_folder')\n folder = cache_folder / 'simple_recording'\n rec.save(format='binary', folder=folder)\n rec2 = BaseExtractor.load_from_folder(folder)\n assert 'quality' in rec2.get_property_keys()\n values = rec2.get_property('quality')\n assert values[0] == 1.\n assert values[1] == 3.3\n assert np.isnan(values[2])\n\n groups = rec2.get_channel_groups()\n assert np.array_equal(groups, [0, 0, 1])\n\n # but also possible\n rec3 = BaseExtractor.load('./my_cache_folder/simple_recording')\n\n # cache to memory\n rec4 = rec3.save(format='memory')\n\n traces4 = rec4.get_traces(segment_index=0)\n traces = rec.get_traces(segment_index=0)\n assert np.array_equal(traces4, traces)\n\n # cache joblib several jobs\n folder = cache_folder / 'simple_recording2'\n rec2 = rec.save(folder=folder, chunk_size=10, n_jobs=4)\n traces2 = rec2.get_traces(segment_index=0)\n\n # set/get Probe only 2 channels\n probe = Probe(ndim=2)\n positions = [[0., 0.], [0., 15.], [0, 30.]]\n probe.set_contacts(positions=positions, shapes='circle', shape_params={'radius': 5})\n probe.set_device_channel_indices([2, -1, 0])\n probe.create_auto_shape()\n\n rec_p = rec.set_probe(probe, group_mode='by_shank')\n rec_p = rec.set_probe(probe, group_mode='by_probe')\n positions2 = rec_p.get_channel_locations()\n assert np.array_equal(positions2, [[0, 30.], [0., 0.]])\n\n probe2 = rec_p.get_probe()\n positions3 = probe2.contact_positions\n assert np.array_equal(positions2, positions3)\n\n assert np.array_equal(probe2.device_channel_indices, [0, 1])\n\n # test save with probe\n folder = cache_folder / 'simple_recording3'\n rec2 = rec_p.save(folder=folder, chunk_size=10, n_jobs=2)\n rec2 = load_extractor(folder)\n probe2 = rec2.get_probe()\n assert np.array_equal(probe2.contact_positions, [[0, 30.], [0., 0.]])\n positions2 = rec_p.get_channel_locations()\n assert np.array_equal(positions2, [[0, 30.], [0., 0.]])\n traces2 = rec2.get_traces(segment_index=0)\n assert np.array_equal(traces2, rec_p.get_traces(segment_index=0))\n\n # from probeinterface.plotting import plot_probe_group, plot_probe\n # import matplotlib.pyplot as plt\n # plot_probe(probe)\n # plot_probe(probe2)\n # plt.show()\n\n # test return_scale\n sampling_frequency = 30000\n traces = np.zeros((1000, 5), dtype='int16')\n rec_int16 = NumpyRecording([traces], sampling_frequency)\n assert rec_int16.get_dtype() == 'int16'\n\n traces_int16 = rec_int16.get_traces()\n assert traces_int16.dtype == 'int16'\n # return_scaled raise error when no gain_to_uV/offset_to_uV properties\n with pytest.raises(ValueError):\n traces_float32 = rec_int16.get_traces(return_scaled=True)\n rec_int16.set_property('gain_to_uV', [.195] * 5)\n rec_int16.set_property('offset_to_uV', [0.] * 5)\n traces_float32 = rec_int16.get_traces(return_scaled=True)\n assert traces_float32.dtype == 'float32'\n\n\nif __name__ == '__main__':\n _clean_all()\n test_BaseRecording()\n", "from spikeinterface.core import BaseRecording, BaseRecordingSegment, BaseSorting, BaseSortingSegment\nfrom spikeinterface.core.core_tools import write_binary_recording\n\nfrom typing import Union, List\nimport json\nimport numpy as np\nfrom pathlib import Path\nimport struct\nimport os\nimport tempfile\nimport traceback\n\n\nclass MdaRecordingExtractor(BaseRecording):\n extractor_name = 'MdaRecording'\n has_default_locations = True\n has_unscaled = False\n installed = True # check at class level if installed or not\n is_writable = True\n mode = 'folder'\n installation_mesg = \"\" # error message when not installed\n\n def __init__(self, folder_path, raw_fname='raw.mda', params_fname='params.json', geom_fname='geom.csv'):\n folder_path = Path(folder_path)\n self._folder_path = folder_path\n self._dataset_params = read_dataset_params(self._folder_path, params_fname)\n self._timeseries_path = self._folder_path / raw_fname\n geom = np.loadtxt(self._folder_path / geom_fname, delimiter=',', ndmin=2)\n self._diskreadmda = DiskReadMda(str(self._timeseries_path))\n dtype = self._diskreadmda.dt()\n num_channels = self._diskreadmda.N1()\n assert geom.shape[0] == self._diskreadmda.N1(), f'Incompatible dimensions between geom.csv and timeseries ' \\\n f'file: {geom.shape[0]} <> {self._diskreadmda.N1()}'\n BaseRecording.__init__(self, sampling_frequency=self._dataset_params['samplerate'] * 1.0,\n channel_ids=np.arange(num_channels), dtype=dtype)\n rec_segment = MdaRecordingSegment(self._diskreadmda)\n self.add_recording_segment(rec_segment)\n self.set_dummy_probe_from_locations(geom)\n self._kwargs = {'folder_path': str(Path(folder_path).absolute()),\n 'raw_fname': raw_fname, 'params_fname': params_fname,\n 'geom_fname': geom_fname}\n\n @staticmethod\n def write_recording(recording, save_path, params=dict(), raw_fname='raw.mda', params_fname='params.json',\n geom_fname='geom.csv', verbose=True, dtype=None, **job_kwargs):\n \"\"\"\n Writes recording to file in MDA format.\n\n Parameters\n ----------\n recording: RecordingExtractor\n The recording extractor to be saved\n save_path: str or Path\n The folder in which the Mda files are saved\n params: dictionary\n Dictionary with optional parameters to save metadata. Sampling frequency is appended to this dictionary.\n raw_fname: str\n File name of raw file (default raw.mda)\n params_fname: str\n File name of params file (default params.json)\n geom_fname: str\n File name of geom file (default geom.csv)\n dtype: dtype\n dtype to be used. If None dtype is same as recording traces.\n verbose: bool\n If True, output is verbose\n **job_kwargs:\n Use by job_tools modules to set:\n * chunk_size or chunk_memory, or total_memory\n * n_jobs\n * progress_bar\n \"\"\"\n assert recording.get_num_segments() == 1, \"MdaRecording.write_recording() can only write a single segment \" \\\n \"recording\"\n save_path = Path(save_path)\n save_path.mkdir(parents=True, exist_ok=True)\n save_file_path = save_path / raw_fname\n parent_dir = save_path\n num_chan = recording.get_num_channels()\n num_frames = recording.get_num_frames(0)\n\n geom = recording.get_channel_locations()\n\n if dtype is None:\n dtype = recording.get_dtype()\n\n if dtype == 'float':\n dtype = 'float32'\n if dtype == 'int':\n dtype = 'int16'\n\n header = MdaHeader(dt0=dtype, dims0=(num_chan, num_frames))\n header_size = header.header_size\n\n write_binary_recording(recording, file_paths=save_file_path, dtype=dtype,\n byte_offset=header_size, verbose=verbose, add_file_extension=False, **job_kwargs)\n\n with save_file_path.open('rb+') as f:\n header.write(f)\n\n params[\"samplerate\"] = float(recording.get_sampling_frequency())\n with (parent_dir / params_fname).open('w') as f:\n json.dump(params, f)\n np.savetxt(str(parent_dir / geom_fname), geom, delimiter=',')\n\n\nclass MdaRecordingSegment(BaseRecordingSegment):\n def __init__(self, diskreadmda):\n self._diskreadmda = diskreadmda\n BaseRecordingSegment.__init__(self)\n self._num_samples = self._diskreadmda.N2()\n\n def get_num_samples(self):\n \"\"\"Returns the number of samples in this signal block\n\n Returns:\n SampleIndex: Number of samples in the signal block\n \"\"\"\n return self._num_samples\n\n def get_traces(self,\n start_frame: Union[int, None] = None,\n end_frame: Union[int, None] = None,\n channel_indices: Union[List, None] = None,\n ) -> np.ndarray:\n if start_frame is None:\n start_frame = 0\n if end_frame is None:\n end_frame = self.get_num_samples()\n recordings = self._diskreadmda.readChunk(i1=0, i2=start_frame, N1=self._diskreadmda.N1(),\n N2=end_frame - start_frame)\n recordings = recordings[channel_indices, :].T\n return recordings\n\n\nclass MdaSortingExtractor(BaseSorting):\n extractor_name = 'MdaSorting'\n installed = True # check at class level if installed or not\n is_writable = True\n mode = 'file'\n installation_mesg = \"\" # error message when not installed\n\n def __init__(self, file_path, sampling_frequency):\n firings = readmda(str(file_path))\n labels = firings[2, :]\n unit_ids = np.unique(labels).astype(int)\n BaseSorting.__init__(self, unit_ids=unit_ids, sampling_frequency=sampling_frequency)\n\n sorting_segment = MdaSortingSegment(firings)\n self.add_sorting_segment(sorting_segment)\n\n self._kwargs = {'file_path': str(Path(file_path).absolute()), 'sampling_frequency': sampling_frequency}\n\n @staticmethod\n def write_sorting(sorting, save_path, write_primary_channels=False):\n assert sorting.get_num_segments() == 1, \"MdaSorting.write_sorting() can only write a single segment \" \\\n \"sorting\"\n unit_ids = sorting.get_unit_ids()\n times_list = []\n labels_list = []\n primary_channels_list = []\n for unit_id in unit_ids:\n times = sorting.get_unit_spike_train(unit_id=unit_id)\n times_list.append(times)\n labels_list.append(np.ones(times.shape) * unit_id)\n if write_primary_channels:\n if 'max_channel' in sorting.get_unit_property_names(unit_id):\n primary_channels_list.append([sorting.get_unit_property(unit_id, 'max_channel')] * times.shape[0])\n else:\n raise ValueError(\n \"Unable to write primary channels because 'max_channel' spike feature not set in unit \" + str(\n unit_id))\n else:\n primary_channels_list.append(np.zeros(times.shape))\n all_times = _concatenate(times_list)\n all_labels = _concatenate(labels_list)\n all_primary_channels = _concatenate(primary_channels_list)\n sort_inds = np.argsort(all_times)\n all_times = all_times[sort_inds]\n all_labels = all_labels[sort_inds]\n all_primary_channels = all_primary_channels[sort_inds]\n L = len(all_times)\n firings = np.zeros((3, L))\n firings[0, :] = all_primary_channels\n firings[1, :] = all_times\n firings[2, :] = all_labels\n\n writemda64(firings, save_path)\n\n\nclass MdaSortingSegment(BaseSortingSegment):\n def __init__(self, firings):\n self._firings = firings\n self._max_channels = self._firings[0, :]\n self._spike_times = self._firings[1, :]\n self._labels = self._firings[2, :]\n BaseSortingSegment.__init__(self)\n\n def get_unit_spike_train(self,\n unit_id,\n start_frame: Union[int, None] = None,\n end_frame: Union[int, None] = None,\n ) -> np.ndarray:\n # must be implemented in subclass\n if start_frame is None:\n start_frame = 0\n if end_frame is None:\n end_frame = np.inf\n inds = np.where(\n (self._labels == unit_id) & (start_frame <= self._spike_times) & (self._spike_times < end_frame))\n return np.rint(self._spike_times[inds]).astype(int)\n\n\ndef read_mda_recording(folder_path, **kwargs):\n recording = MdaRecordingExtractor(folder_path, **kwargs)\n return recording\n\n\ndef read_mda_sorting(file_path, **kwargs):\n sorting = MdaSortingExtractor(file_path, **kwargs)\n return sorting\n\n\ndef _concatenate(list):\n if len(list) == 0:\n return np.array([])\n return np.concatenate(list)\n\n\ndef read_dataset_params(dsdir, params_fname):\n fname1 = dsdir / params_fname\n if not fname1.is_file():\n raise Exception('Dataset parameter file does not exist: ' + fname1)\n with open(fname1) as f:\n return json.load(f)\n\n\n######### MDAIO ###########\nclass MdaHeader:\n def __init__(self, dt0, dims0):\n uses64bitdims = (max(dims0) > 2e9)\n self.uses64bitdims = uses64bitdims\n self.dt_code = _dt_code_from_dt(dt0)\n self.dt = dt0\n self.num_bytes_per_entry = get_num_bytes_per_entry_from_dt(dt0)\n self.num_dims = len(dims0)\n self.dimprod = np.prod(dims0)\n self.dims = dims0\n if uses64bitdims:\n self.header_size = 3 * 4 + self.num_dims * 8\n else:\n self.header_size = (3 + self.num_dims) * 4\n\n def write(self, f):\n H = self\n _write_int32(f, H.dt_code)\n _write_int32(f, H.num_bytes_per_entry)\n if H.uses64bitdims:\n _write_int32(f, -H.num_dims)\n for j in range(0, H.num_dims):\n _write_int64(f, H.dims[j])\n else:\n _write_int32(f, H.num_dims)\n for j in range(0, H.num_dims):\n _write_int32(f, H.dims[j])\n\n\ndef npy_dtype_to_string(dt):\n str = dt.str[1:]\n map = {\n \"f2\": 'float16',\n \"f4\": 'float32',\n \"f8\": 'float64',\n \"i1\": 'int8',\n \"i2\": 'int16',\n \"i4\": 'int32',\n \"u2\": 'uint16',\n \"u4\": 'uint32'\n }\n return map[str]\n\n\nclass DiskReadMda:\n def __init__(self, path, header=None):\n self._npy_mode = False\n self._path = path\n if file_extension(path) == '.npy':\n raise Exception('DiskReadMda implementation has not been tested for npy files')\n self._npy_mode = True\n if header:\n raise Exception('header not allowed in npy mode for DiskReadMda')\n if header:\n self._header = header\n self._header.header_size = 0\n else:\n self._header = _read_header(self._path)\n\n def dims(self):\n if self._npy_mode:\n A = np.load(self._path, mmap_mode='r')\n return A.shape\n return self._header.dims\n\n def N1(self):\n return self.dims()[0]\n\n def N2(self):\n return self.dims()[1]\n\n def N3(self):\n return self.dims()[2]\n\n def dt(self):\n if self._npy_mode:\n A = np.load(self._path, mmap_mode='r')\n return npy_dtype_to_string(A.dtype)\n return self._header.dt\n\n def numBytesPerEntry(self):\n if self._npy_mode:\n A = np.load(self._path, mmap_mode='r')\n return A.itemsize\n return self._header.num_bytes_per_entry\n\n def readChunk(self, i1=-1, i2=-1, i3=-1, N1=1, N2=1, N3=1):\n # print(\"Reading chunk {} {} {} {} {} {}\".format(i1,i2,i3,N1,N2,N3))\n if i2 < 0:\n if self._npy_mode:\n A = np.load(self._path, mmap_mode='r')\n return A[:, :, i1:i1 + N1]\n return self._read_chunk_1d(i1, N1)\n elif i3 < 0:\n if N1 != self.N1():\n print(\"Unable to support N1 {} != {}\".format(N1, self.N1()))\n return None\n X = self._read_chunk_1d(i1 + N1 * i2, N1 * N2)\n\n if X is None:\n print('Problem reading chunk from file: ' + self._path)\n return None\n if self._npy_mode:\n A = np.load(self._path, mmap_mode='r')\n return A[:, i2:i2 + N2]\n return np.reshape(X, (N1, N2), order='F')\n else:\n if N1 != self.N1():\n print(\"Unable to support N1 {} != {}\".format(N1, self.N1()))\n return None\n if N2 != self.N2():\n print(\"Unable to support N2 {} != {}\".format(N2, self.N2()))\n return None\n if self._npy_mode:\n A = np.load(self._path, mmap_mode='r')\n return A[:, :, i3:i3 + N3]\n X = self._read_chunk_1d(i1 + N1 * i2 + N1 * N2 * i3, N1 * N2 * N3)\n return np.reshape(X, (N1, N2, N3), order='F')\n\n def _read_chunk_1d(self, i, N):\n offset = self._header.header_size + self._header.num_bytes_per_entry * i\n if is_url(self._path):\n tmp_fname = _download_bytes_to_tmpfile(self._path, offset, offset + self._header.num_bytes_per_entry * N)\n try:\n ret = self._read_chunk_1d_helper(tmp_fname, N, offset=0)\n except:\n ret = None\n return ret\n return self._read_chunk_1d_helper(self._path, N, offset=offset)\n\n def _read_chunk_1d_helper(self, path0, N, *, offset):\n f = open(path0, \"rb\")\n try:\n f.seek(offset)\n ret = np.fromfile(f, dtype=self._header.dt, count=N)\n f.close()\n return ret\n except Exception as e: # catch *all* exceptions\n print(e)\n f.close()\n return None\n\n\ndef is_url(path):\n return path.startswith('http://') or path.startswith('https://')\n\n\ndef _download_bytes_to_tmpfile(url, start, end):\n try:\n import requests\n except:\n raise Exception('Unable to import module: requests')\n headers = {\"Range\": \"bytes={}-{}\".format(start, end - 1)}\n r = requests.get(url, headers=headers, stream=True)\n fd, tmp_fname = tempfile.mkstemp()\n with open(tmp_fname, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n return tmp_fname\n\n\ndef _read_header(path):\n if is_url(path):\n tmp_fname = _download_bytes_to_tmpfile(path, 0, 200)\n if not tmp_fname:\n raise Exception('Problem downloading bytes from ' + path)\n try:\n ret = _read_header(tmp_fname)\n except:\n ret = None\n Path(tmp_fname).unlink()\n return ret\n\n f = open(path, \"rb\")\n try:\n dt_code = _read_int32(f)\n num_bytes_per_entry = _read_int32(f)\n num_dims = _read_int32(f)\n uses64bitdims = False\n if num_dims < 0:\n uses64bitdims = True\n num_dims = -num_dims\n if num_dims < 1 or num_dims > 6: # allow single dimension as of 12/6/17\n print(\"Invalid number of dimensions: {}\".format(num_dims))\n f.close()\n return None\n dims = []\n dimprod = 1\n if uses64bitdims:\n for j in range(0, num_dims):\n tmp0 = _read_int64(f)\n dimprod = dimprod * tmp0\n dims.append(tmp0)\n else:\n for j in range(0, num_dims):\n tmp0 = _read_int32(f)\n dimprod = dimprod * tmp0\n dims.append(tmp0)\n dt = _dt_from_dt_code(dt_code)\n if dt is None:\n print(\"Invalid data type code: {}\".format(dt_code))\n f.close()\n return None\n H = MdaHeader(dt, dims)\n if uses64bitdims:\n H.uses64bitdims = True\n H.header_size = 3 * 4 + H.num_dims * 8\n f.close()\n return H\n except Exception as e: # catch *all* exceptions\n print(e)\n f.close()\n return None\n\n\ndef _dt_from_dt_code(dt_code):\n if dt_code == -2:\n dt = 'uint8'\n elif dt_code == -3:\n dt = 'float32'\n elif dt_code == -4:\n dt = 'int16'\n elif dt_code == -5:\n dt = 'int32'\n elif dt_code == -6:\n dt = 'uint16'\n elif dt_code == -7:\n dt = 'float64'\n elif dt_code == -8:\n dt = 'uint32'\n else:\n dt = None\n return dt\n\n\ndef _dt_code_from_dt(dt):\n if dt == 'uint8':\n return -2\n if dt == 'float32':\n return -3\n if dt == 'int16':\n return -4\n if dt == 'int32':\n return -5\n if dt == 'uint16':\n return -6\n if dt == 'float64':\n return -7\n if dt == 'uint32':\n return -8\n return None\n\n\ndef get_num_bytes_per_entry_from_dt(dt):\n if dt == 'uint8':\n return 1\n if dt == 'float32':\n return 4\n if dt == 'int16':\n return 2\n if dt == 'int32':\n return 4\n if dt == 'uint16':\n return 2\n if dt == 'float64':\n return 8\n if dt == 'uint32':\n return 4\n return None\n\n\ndef readmda_header(path):\n if file_extension(path) == '.npy':\n raise Exception('Cannot read mda header for .npy file.')\n return _read_header(path)\n\n\ndef _write_header(path, H, rewrite=False):\n if rewrite:\n f = open(path, \"r+b\")\n else:\n f = open(path, \"wb\")\n try:\n _write_int32(f, H.dt_code)\n _write_int32(f, H.num_bytes_per_entry)\n if H.uses64bitdims:\n _write_int32(f, -H.num_dims)\n for j in range(0, H.num_dims):\n _write_int64(f, H.dims[j])\n else:\n _write_int32(f, H.num_dims)\n for j in range(0, H.num_dims):\n _write_int32(f, H.dims[j])\n f.close()\n return True\n except Exception as e: # catch *all* exceptions\n print(e)\n f.close()\n return False\n\n\ndef readmda(path):\n if file_extension(path) == '.npy':\n return readnpy(path);\n H = _read_header(path)\n if H is None:\n print(\"Problem reading header of: {}\".format(path))\n return None\n f = open(path, \"rb\")\n try:\n f.seek(H.header_size)\n # This is how I do the column-major order\n ret = np.fromfile(f, dtype=H.dt, count=H.dimprod)\n ret = np.reshape(ret, H.dims, order='F')\n f.close()\n return ret\n except Exception as e: # catch *all* exceptions\n print(e)\n f.close()\n return None\n\n\ndef writemda32(X, fname):\n if file_extension(fname) == '.npy':\n return writenpy32(X, fname)\n return _writemda(X, fname, 'float32')\n\n\ndef writemda64(X, fname):\n if file_extension(fname) == '.npy':\n return writenpy64(X, fname)\n return _writemda(X, fname, 'float64')\n\n\ndef writemda8(X, fname):\n if file_extension(fname) == '.npy':\n return writenpy8(X, fname)\n return _writemda(X, fname, 'uint8')\n\n\ndef writemda32i(X, fname):\n if file_extension(fname) == '.npy':\n return writenpy32i(X, fname)\n return _writemda(X, fname, 'int32')\n\n\ndef writemda32ui(X, fname):\n if file_extension(fname) == '.npy':\n return writenpy32ui(X, fname)\n return _writemda(X, fname, 'uint32')\n\n\ndef writemda16i(X, fname):\n if file_extension(fname) == '.npy':\n return writenpy16i(X, fname)\n return _writemda(X, fname, 'int16')\n\n\ndef writemda16ui(X, fname):\n if file_extension(fname) == '.npy':\n return writenpy16ui(X, fname)\n return _writemda(X, fname, 'uint16')\n\n\ndef writemda(X, fname, *, dtype):\n return _writemda(X, fname, dtype)\n\n\ndef _writemda(X, fname, dt):\n num_bytes_per_entry = get_num_bytes_per_entry_from_dt(dt)\n dt_code = _dt_code_from_dt(dt)\n if dt_code is None:\n print(\"Unexpected data type: {}\".format(dt))\n return False\n\n if type(fname) == str:\n f = open(fname, 'wb')\n else:\n f = fname\n try:\n _write_int32(f, dt_code)\n _write_int32(f, num_bytes_per_entry)\n _write_int32(f, X.ndim)\n for j in range(0, X.ndim):\n _write_int32(f, X.shape[j])\n # This is how I do column-major order\n # A=np.reshape(X,X.size,order='F').astype(dt)\n # A.tofile(f)\n\n bytes0 = X.astype(dt).tobytes(order='F')\n f.write(bytes0)\n\n if type(fname) == str:\n f.close()\n return True\n except Exception as e: # catch *all* exceptions\n traceback.print_exc()\n print(e)\n if type(fname) == str:\n f.close()\n return False\n\n\ndef readnpy(path):\n return np.load(path)\n\n\ndef writenpy8(X, path):\n return _writenpy(X, path, dtype='int8')\n\n\ndef writenpy32(X, path):\n return _writenpy(X, path, dtype='float32')\n\n\ndef writenpy64(X, path):\n return _writenpy(X, path, dtype='float64')\n\n\ndef writenpy16i(X, path):\n return _writenpy(X, path, dtype='int16')\n\n\ndef writenpy16ui(X, path):\n return _writenpy(X, path, dtype='uint16')\n\n\ndef writenpy32i(X, path):\n return _writenpy(X, path, dtype='int32')\n\n\ndef writenpy32ui(X, path):\n return _writenpy(X, path, dtype='uint32')\n\n\ndef writenpy(X, path, *, dtype):\n return _writenpy(X, path, dtype=dtype)\n\n\ndef _writenpy(X, path, *, dtype):\n np.save(path, X.astype(dtype=dtype, copy=False)) # astype will always create copy if dtype does not match\n # apparently allowing pickling is a security issue. (according to the docs) ??\n # np.save(path,X.astype(dtype=dtype,copy=False),allow_pickle=False) # astype will always create copy if dtype does not match\n return True\n\n\ndef appendmda(X, path):\n if file_extension(path) == '.npy':\n raise Exception('appendmda not yet implemented for .npy files')\n H = _read_header(path)\n if H is None:\n print(\"Problem reading header of: {}\".format(path))\n return None\n if len(H.dims) != len(X.shape):\n print(\"Incompatible number of dimensions in appendmda\", H.dims, X.shape)\n return None\n num_entries_old = np.product(H.dims)\n num_dims = len(H.dims)\n for j in range(num_dims - 1):\n if X.shape[j] != X.shape[j]:\n print(\"Incompatible dimensions in appendmda\", H.dims, X.shape)\n return None\n H.dims[num_dims - 1] = H.dims[num_dims - 1] + X.shape[num_dims - 1]\n try:\n _write_header(path, H, rewrite=True)\n f = open(path, \"r+b\")\n f.seek(H.header_size + H.num_bytes_per_entry * num_entries_old)\n A = np.reshape(X, X.size, order='F').astype(H.dt)\n A.tofile(f)\n f.close()\n except Exception as e: # catch *all* exceptions\n print(e)\n f.close()\n return False\n\n\ndef file_extension(fname):\n if type(fname) == str:\n filename, ext = os.path.splitext(fname)\n return ext\n else:\n return None\n\n\ndef _read_int32(f):\n return struct.unpack('<i', f.read(4))[0]\n\n\ndef _read_int64(f):\n return struct.unpack('<q', f.read(8))[0]\n\n\ndef _write_int32(f, val):\n f.write(struct.pack('<i', val))\n\n\ndef _write_int64(f, val):\n f.write(struct.pack('<q', val))\n\n\ndef _header_from_file(f):\n try:\n dt_code = _read_int32(f)\n num_bytes_per_entry = _read_int32(f)\n num_dims = _read_int32(f)\n uses64bitdims = False\n if num_dims < 0:\n uses64bitdims = True\n num_dims = -num_dims\n if num_dims < 1 or num_dims > 6: # allow single dimension as of 12/6/17\n print(\"Invalid number of dimensions: {}\".format(num_dims))\n return None\n dims = []\n dimprod = 1\n if uses64bitdims:\n for j in range(0, num_dims):\n tmp0 = _read_int64(f)\n dimprod = dimprod * tmp0\n dims.append(tmp0)\n else:\n for j in range(0, num_dims):\n tmp0 = _read_int32(f)\n dimprod = dimprod * tmp0\n dims.append(tmp0)\n dt = _dt_from_dt_code(dt_code)\n if dt is None:\n print(\"Invalid data type code: {}\".format(dt_code))\n return None\n H = MdaHeader(dt, dims)\n if uses64bitdims:\n H.uses64bitdims = True\n H.header_size = 3 * 4 + H.num_dims * 8\n return H\n except Exception as e: # catch *all* exceptions\n print(e)\n return None\n" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.MultiIndex.from_tuples", "pandas.DataFrame" ], [ "numpy.array_equal", "numpy.isnan", "numpy.memmap", "numpy.all", "numpy.random.randn", "numpy.zeros" ], [ "numpy.product", "numpy.fromfile", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.rint", "numpy.ones", "numpy.concatenate", "numpy.prod", "numpy.argsort", "numpy.load", "numpy.array", "numpy.zeros", "numpy.where", "numpy.loadtxt" ] ]
mrjavoman/Image-Super-Resolution-via-Iterative-Refinement
[ "d353bbcbc667e7ad5da739c7d1b343a44afb88c9" ]
[ "sr.py" ]
[ "import torch\nimport data as Data\nimport model as Model\nimport argparse\nimport logging\nimport core.logger as Logger\nimport core.metrics as Metrics\nfrom tensorboardX import SummaryWriter\nimport os\nimport numpy as np\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', type=str, default='config/sr_sr3_16_128.json',\n help='JSON file for configuration')\n parser.add_argument('-p', '--phase', type=str, choices=['train', 'val'],\n help='Run either train(training) or val(generation)', default='train')\n parser.add_argument('-gpu', '--gpu_ids', type=str, default=None)\n parser.add_argument('-debug', '-d', action='store_true')\n\n # parse configs\n args = parser.parse_args()\n opt = Logger.parse(args)\n # Convert to NoneDict, which return None for missing key.\n opt = Logger.dict_to_nonedict(opt)\n\n # logging\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n\n Logger.setup_logger(None, opt['path']['log'],\n 'train', level=logging.INFO, screen=True)\n Logger.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO)\n logger = logging.getLogger('base')\n logger.info(Logger.dict2str(opt))\n tb_logger = SummaryWriter(log_dir=opt['path']['tb_logger'])\n\n # dataset\n for phase, dataset_opt in opt['datasets'].items():\n if phase == 'train' and args.phase != 'val':\n train_set = Data.create_dataset(dataset_opt, phase)\n train_loader = Data.create_dataloader(\n train_set, dataset_opt, phase)\n elif phase == 'val':\n val_set = Data.create_dataset(dataset_opt, phase)\n val_loader = Data.create_dataloader(\n val_set, dataset_opt, phase)\n logger.info('Initial Dataset Finished')\n\n # model\n diffusion = Model.create_model(opt)\n logger.info('Initial Model Finished')\n\n # Train\n current_step = diffusion.begin_step\n current_epoch = diffusion.begin_epoch\n n_iter = opt['train']['n_iter']\n\n if opt['path']['resume_state']:\n logger.info('Resuming training from epoch: {}, iter: {}.'.format(\n current_epoch, current_step))\n\n diffusion.set_new_noise_schedule(\n opt['model']['beta_schedule'][opt['phase']], schedule_phase=opt['phase'])\n if opt['phase'] == 'train':\n while current_step < n_iter:\n current_epoch += 1\n for _, train_data in enumerate(train_loader):\n current_step += 1\n if current_step > n_iter:\n break\n diffusion.feed_data(train_data)\n diffusion.optimize_parameters()\n # log\n if current_step % opt['train']['print_freq'] == 0:\n logs = diffusion.get_current_log()\n message = '<epoch:{:3d}, iter:{:8,d}> '.format(\n current_epoch, current_step)\n for k, v in logs.items():\n message += '{:s}: {:.4e} '.format(k, v)\n tb_logger.add_scalar(k, v, current_step)\n logger.info(message)\n\n # validation\n if current_step % opt['train']['val_freq'] == 0:\n avg_psnr = 0.0\n idx = 0\n result_path = '{}/{}'.format(opt['path']\n ['results'], current_epoch)\n os.makedirs(result_path, exist_ok=True)\n\n diffusion.set_new_noise_schedule(\n opt['model']['beta_schedule']['val'], schedule_phase='val')\n for _, val_data in enumerate(val_loader):\n idx += 1\n diffusion.feed_data(val_data)\n diffusion.test(continous=False)\n visuals = diffusion.get_current_visuals()\n sr_img = Metrics.tensor2img(visuals['SR']) # uint8\n hr_img = Metrics.tensor2img(visuals['HR']) # uint8\n lr_img = Metrics.tensor2img(visuals['LR']) # uint8\n fake_img = Metrics.tensor2img(visuals['INF']) # uint8\n\n # generation\n Metrics.save_img(\n hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n sr_img, '{}/{}_{}_sr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx))\n tb_logger.add_image(\n 'Iter_{}'.format(current_step),\n np.transpose(np.concatenate(\n (fake_img, sr_img, hr_img), axis=1), [2, 0, 1]),\n idx)\n avg_psnr += Metrics.calculate_psnr(\n sr_img, hr_img)\n\n avg_psnr = avg_psnr / idx\n diffusion.set_new_noise_schedule(\n opt['model']['beta_schedule']['train'], schedule_phase='train')\n # log\n logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))\n logger_val = logging.getLogger('val') # validation logger\n logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}'.format(\n current_epoch, current_step, avg_psnr))\n # tensorboard logger\n tb_logger.add_scalar('psnr', avg_psnr, current_step)\n\n if current_step % opt['train']['save_checkpoint_freq'] == 0:\n logger.info('Saving models and training states.')\n diffusion.save_network(current_epoch, current_step)\n # save model\n logger.info('End of training.')\n else:\n logger.info('Begin Model Evaluation.')\n avg_psnr = 0.0\n avg_ssim = 0.0\n idx = 0\n result_path = '{}'.format(opt['path']['results'])\n os.makedirs(result_path, exist_ok=True)\n for _, val_data in enumerate(val_loader):\n idx += 1\n diffusion.feed_data(val_data)\n diffusion.test(continous=True)\n visuals = diffusion.get_current_visuals()\n\n hr_img = Metrics.tensor2img(visuals['HR']) # uint8\n lr_img = Metrics.tensor2img(visuals['LR']) # uint8\n fake_img = Metrics.tensor2img(visuals['INF']) # uint8\n\n sr_img_mode = 'grid'\n if sr_img_mode == 'single':\n # single img series\n sr_img = visuals['SR'] # uint8\n sample_num = sr_img.shape[0]\n for iter in range(0, sample_num):\n Metrics.save_img(\n Metrics.tensor2img(sr_img[iter]), '{}/{}_{}_sr_{}.png'.format(result_path, current_step, idx, iter))\n else:\n # grid img\n sr_img = Metrics.tensor2img(visuals['SR']) # uint8\n Metrics.save_img(\n sr_img, '{}/{}_{}_sr_process.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n Metrics.tensor2img(visuals['SR'][-1]), '{}/{}_{}_sr.png'.format(result_path, current_step, idx))\n\n Metrics.save_img(\n hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx))\n\n # generation\n avg_psnr += Metrics.calculate_psnr(\n Metrics.tensor2img(visuals['SR'][-1]), hr_img)\n avg_ssim += Metrics.calculate_ssim(\n Metrics.tensor2img(visuals['SR'][-1]), hr_img)\n avg_psnr = avg_psnr / idx\n avg_ssim = avg_ssim / idx\n\n # log\n logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))\n logger.info('# Validation # SSIM: {:.4e}'.format(avg_ssim))\n logger_val = logging.getLogger('val') # validation logger\n logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}, ssim:{:.4e}'.format(\n current_epoch, current_step, avg_psnr, avg_ssim))\n" ]
[ [ "numpy.concatenate" ] ]
ShiKaiWi/python-practice
[ "2ce82bd778b9a4022bdd26d0a3e1bee2ebec6f51", "2ce82bd778b9a4022bdd26d0a3e1bee2ebec6f51" ]
[ "CVlib/GaussianFilter.py", "tf_mnist_tutorial_cnn.py" ]
[ "import numpy as np\nimport pylab as plt\nimport mahotas as mh\n\nclass GaussianFilter:\n def __init__(self,img,sigma = 1,windsize = 3):\n self.img = mh.imread(img)\n self.M,self.N = self.img.shape\n self.windsize = windsize \n self.sigma = sigma\n self.gaussian_kernel = self.kernel()\n self.halfsize = self.windsize // 2\n\n def convolution(self,window):\n size = self.gaussian_kernel.size\n if size != window.size:\n return None\n return np.sum(self.gaussian_kernel * window)\n\n def kernel(self):\n N = self.windsize // 2\n x = np.linspace(-N,N,2*N+1)\n y = np.linspace(-N,N,2*N+1)\n xv,yv = np.meshgrid(x,y,indexing='xy')\n H = np.exp(-(np.square(xv)+np.square(yv))/(2*self.sigma*self.sigma))\n H = H / H.sum()\n return np.reshape(H,(self.windsize*self.windsize,1))\n\n def filter(self):\n imgnew = np.zeros((self.M,self.N))\n w = self.halfsize\n for i in range(0,self.M):\n for j in range(0,self.N):\n if i<w or j<w or i>self.M-1-w or j>self.N-1-w:\n imgnew[i][j] = self.img[i][j]\n continue\n imgnew[i][j]= self.convolution(np.reshape(self.img[i-w:i+1+w,j-w:j+1+w],(self.windsize*self.windsize,1)))\n return imgnew \n\n def demo(self):\n plt.imshow(self.filter())\n plt.gray()\n plt.show()\n\n", "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape,stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1,shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(x,W):\n return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n\nmnist = input_data.read_data_sets('MNIST_data',one_hot=True)\n\nsess = tf.InteractiveSession();\nx = tf.placeholder(tf.float32,shape=[None,784])\ny_ = tf.placeholder(tf.float32,shape=[None,10])\n\nW = tf.Variable(tf.zeros([784,10]))\nb = tf.Variable(tf.zeros([10]))\n\n\ny = tf.matmul(x,W) + b\n\n\n\nW_conv1 = weight_variable([5,5,1,32])\nb_conv1 = bias_variable([32])\n\nx_image = tf.reshape(x,[-1,28,28,1])\n\n\nh_conv1 = tf.nn.relu(conv2d(x_image,W_conv1) + b_conv1)\nprint(h_conv1.get_shape())\nh_pool1 = max_pool_2x2(h_conv1)\n\n\nW_conv2 = weight_variable([5,5,32,64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2) + b_conv2)\nprint(h_conv2.get_shape())\nh_pool2 = max_pool_2x2(h_conv2)\n\nW_fc1 = weight_variable([7*7*64,1024])\nb_fc1 = bias_variable([1024])\n\nh_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)\n\n\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)\n\n\nW_fc2 = weight_variable([1024,10])\nb_fc2 = bias_variable([10])\n\ny = tf.matmul(h_fc1_drop,W_fc2) + b_fc2\n\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y,y_))\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\ncorrect_predication = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_predication,tf.float32))\n\nsess.run(tf.initialize_all_variables())\n\nfor i in range(20000):\n batch = mnist.train.next_batch(100)\n if i%100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x:batch[0],y_:batch[1],keep_prob:1.0})\n print(\"step %d, trainning accuracy %g\" % (i,train_accuracy))\n\n train_step.run(feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5})\n\n\n\nprint(\"test accurary %g\" % accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels,keep_prob:1.0}))\n\n\n\n\n\n\n" ]
[ [ "numpy.square", "numpy.linspace", "numpy.reshape", "numpy.meshgrid", "numpy.zeros", "numpy.sum" ], [ "tensorflow.matmul", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.truncated_normal", "tensorflow.InteractiveSession", "tensorflow.Variable", "tensorflow.constant", "tensorflow.nn.max_pool", "tensorflow.zeros", "tensorflow.reshape", "tensorflow.cast", "tensorflow.placeholder", "tensorflow.initialize_all_variables", "tensorflow.train.AdamOptimizer", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.nn.conv2d", "tensorflow.nn.dropout" ] ]
mihirp1998/EmbLang
[ "169b0468ccda554896973bcc226afb3e762a70e7", "169b0468ccda554896973bcc226afb3e762a70e7", "169b0468ccda554896973bcc226afb3e762a70e7" ]
[ "vis_imagine_static_voxels/lib_classes/modules/embnet2.py", "vis_imagine_static_voxels/test_voxel.py", "vis_imagine_static_voxels/utils/pointcloud.py" ]
[ "\nfrom lib_classes.modules.utils_basic import *\nfrom lib_classes.modules import utils_improc\nimport constants as const\nimport ipdb\nst = ipdb.set_trace\nfrom sklearn.decomposition import PCA\n\n\nclass SimpleNetBlock(tf.keras.Model):\n def __init__(self,out_chans, blk_num,istrain):\n super(SimpleNetBlock, self).__init__()\n\n self.out_chans = out_chans\n self.istrain = istrain\n self.blk_num = blk_num\n\n \n self.conv2d = tf.keras.layers.Conv2D(out_chans*(2**self.blk_num) ,kernel_size=3, strides=2, activation=tf.nn.leaky_relu,\\\n padding='VALID',kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=1e-3))\n self.batchnorm = tf.keras.layers.BatchNormalization()\n \n self.conv2d_1 = tf.keras.layers.Conv2D(out_chans*(2**self.blk_num) ,kernel_size=3, dilation_rate=2, activation=tf.nn.leaky_relu,\\\n padding='VALID',kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=1e-3))\n self.batchnorm_1 = tf.keras.layers.BatchNormalization()\n\n self.conv2d_transpose = tf.keras.layers.Conv2DTranspose(out_chans, kernel_size=[4,4], strides=2,padding='SAME',\\\n activation=tf.nn.leaky_relu,kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=1e-3))\n self.batchnorm_2 = tf.keras.layers.BatchNormalization()\n\n def call(self,feat,blk_num):\n feat = tf.pad(tensor=feat, paddings=[[0,0],[1,1],[1,1],[0,0]], mode='SYMMETRIC')\n feat = self.conv2d(feat)\n print_shape(feat)\n feat = self.batchnorm(feat, self.istrain)\n \n feat = tf.pad(tensor=feat, paddings=[[0,0],[2,2],[2,2],[0,0]], mode='SYMMETRIC')\n feat = self.conv2d_1(feat)\n print_shape(feat)\n feat = self.batchnorm_1(feat, self.istrain)\n if blk_num > 0:\n upfeat = self.conv2d_transpose(feat)\n print_shape(upfeat)\n upfeat = self.batchnorm_2(upfeat, self.istrain)\n else:\n upfeat = feat\n return feat, upfeat\n\nclass SimpleNet(tf.keras.Model):\n # slim = tf.contrib.slim\n def __init__(self,out_chans,istrain):\n super(SimpleNet, self).__init__()\n nblocks = 2\n \n self.out_chans = out_chans\n self.nblocks = nblocks\n self.SimpleNetBlocks = []\n self.istrain = istrain\n self.conv2d = tf.keras.layers.Conv2D( out_chans ,kernel_size=5, activation=None)\n for blk_num in range(self.nblocks):\n self.SimpleNetBlocks.append(SimpleNetBlock(out_chans,blk_num, self.istrain))\n\n def call(self,input):\n print(\"rgb\")\n print_shape(input)\n B, H, W, C = input.shape.as_list()\n normalizer_fn = None\n weights_initializer = tf.compat.v1.initializers.truncated_normal(stddev=1e-3)\n\n upfeats = list()\n feat = input\n # tf.compat.v1.summary.histogram(feat.name, feat)\n for blk_num in range(self.nblocks):\n feat, upfeat = self.SimpleNetBlocks[blk_num](feat, blk_num)\n upfeats.append(upfeat)\n upfeat = tf.concat(upfeats, axis = 3)\n # st()\n upfeat = tf.pad(tensor=upfeat, paddings=[[0,0],[2,2],[2,2],[0,0]], mode='SYMMETRIC')\n emb = self.conv2d(upfeat)\n # emb = slim.conv2d(upfeat, out_chans, kernel_size=1, activation_fn=None,\n # normalizer_fn=None, scope='conv_final')\n print_shape(emb)\n print(\"rgb_trans\")\n return emb\n\n\nclass embnet2(tf.keras.Model):\n def __init__(self,istrain):\n super(embnet2, self).__init__()\n self.simpleNet = SimpleNet(const.emb_dim,istrain=istrain)\n self.beta = tf.Variable(1.2, dtype=tf.float32, name='margin_beta')\n\n def batch_norm(x, istrain):\n # return tf.identity(x)\n # decay of 0.99 can take ~1k steps to learn (according to my plots)\n return self.batchnorm(x, decay=0.9, \n is_training=istrain,\n # updates_collections=None,\n center=True,\n scale=True,\n reuse=False)\n def get_distance(self,x):\n n = x.shape.as_list()[0]\n square = tf.reduce_sum(input_tensor=x**2, axis=1, keepdims=True)\n dis_square = square + tf.transpose(a=square) - 2.0 * tf.matmul(x, tf.transpose(a=x)) + EPS \n # st()\n return tf.sqrt(dis_square + tf.eye(n))\n\n def reduce_emb(self,emb, inbound=None, together=False):\n ## emb -- [S,H/2,W/2,C], inbound -- [S,H/2,W/2,1]\n ## Reduce number of chans to 3 with PCA. For vis.\n S,H,W,C = emb.shape.as_list()\n keep = 3\n if together:\n # emb = tf.py_function(self.pca_embed_together, [emb,keep], tf.float32)\n emb = tf.convert_to_tensor(self.pca_embed_together(emb,keep))\n\n else:\n emb = tf.py_function(self.pca_embed, [emb,keep], tf.float32)\n emb.set_shape([S,H,W,keep])\n emb = normalize(emb) - 0.5\n if inbound is not None:\n emb_inbound = emb*inbound\n else:\n emb_inbound = None\n return emb, emb_inbound\n\n def pca_embed_together(self,emb, keep):\n ## emb -- [S,H/2,W/2,C]\n ## keep is the number of principal components to keep\n ## Helper function for reduce_emb.\n S, H, W, K = np.shape(emb)\n if np.isnan(emb).any():\n out_img = np.zeros([S,H,W,keep], dtype=emb.dtype)\n pixelskd = np.reshape(emb, (S*H*W, K))\n P = PCA(keep)\n P.fit(pixelskd)\n pixels3d = P.transform(pixelskd)\n out_img = np.reshape(pixels3d, [S,H,W,keep]).astype(np.float32)\n if np.isnan(out_img).any():\n out_img = np.zeros([S,H,W,keep], dtype=np.float32)\n return out_img\n def distance_sampling(self,x, cutoff, nonzero_loss_cutoff, n_split):\n n, d = x.shape.as_list()\n split = n/n_split\n # st()\n distance = tf.maximum(self.get_distance(x), cutoff)\n log_weights = ((2.0 - float(d)) * tf.math.log(distance)\n - (float(d-3)/2) * tf.math.log(1.0 - 0.25*(distance**2)))\n # st()\n weights = tf.exp(log_weights - tf.reduce_max(input_tensor=log_weights))\n\n mask = np.ones(weights.shape)\n for i in range(0, n):\n for idx_split in range(n_split):\n #mask[i,i] = 0\n # st()\n mask[i,int((i+split*idx_split)%n)] = 0\n # st()\n mask = tf.constant(mask, tf.float32)\n weights = weights * mask * tf.cast((distance < nonzero_loss_cutoff), tf.float32)\n weights = weights / tf.reduce_sum(input_tensor=weights, axis=1, keepdims=True)\n #a_indices = tf.random.uniform([n, 1], maxval=n, dtype=tf.int32)\n a_indices = tf.random.shuffle(tf.range(start=0, limit=n, delta=1, dtype=tf.int32))\n a_indices = tf.reshape(a_indices, [n, 1])\n #positive samples: interval equals to split\n # st()\n split_indices =int(split)*tf.random.uniform([n,1], minval=1, maxval=n_split, dtype=tf.int32)\n p_indices = tf.floormod((a_indices + split_indices), tf.constant(n, dtype=tf.int32))\n weights_sampled = tf.gather_nd(weights, a_indices)\n n_indices = tf.random.categorical(tf.math.log(weights_sampled), 1)\n n_indices = tf.reshape(n_indices, [n, 1])\n #print(a_indices.shape.as_list(), p_indices.shape.as_list(), n_indices.shape.as_list())\n return a_indices, p_indices, n_indices #shape: [n, 1]\n\n\n # def SimpleNetBlock(feat, blk_num, out_chans, istrain):\n # from tensorflow.contrib.slim import conv2d, conv2d_transpose\n\n # with tf.compat.v1.variable_scope('Block%d' % blk_num):\n # feat = tf.pad(tensor=feat, paddings=[[0,0],[1,1],[1,1],[0,0]], mode='SYMMETRIC')\n # feat = conv2d(feat, out_chans*(2**blk_num), stride=2, scope='conv')\n # print_shape(feat)\n # feat = batch_norm(feat, istrain)\n \n # feat = tf.pad(tensor=feat, paddings=[[0,0],[2,2],[2,2],[0,0]], mode='SYMMETRIC')\n # feat = conv2d(feat, out_chans*(2**blk_num), rate=2, scope='dilconv')\n # print_shape(feat)\n # feat = batch_norm(feat, istrain)\n # if blk_num > 0:\n # upfeat = conv2d_transpose(feat, out_chans, kernel_size=[4,4], stride=2,\n # padding='SAME', scope='deconv')\n # print_shape(upfeat)\n # upfeat = batch_norm(upfeat, istrain)\n # else:\n # upfeat = feat\n # return feat, upfeat\n def margin_loss(self,emb, n_sampling, n_split):\n alpha = 0.2\n cutoff = 0.5\n nonzero_loss_cutoff = 1.4\n a_indices, p_indices, n_indices = self.distance_sampling(emb, cutoff, nonzero_loss_cutoff, n_split)\n emb_a = tf.gather_nd(emb, a_indices)\n emb_p = tf.gather_nd(emb, p_indices)\n emb_n = tf.gather_nd(emb, n_indices)\n d_ap = tf.sqrt(tf.reduce_sum(input_tensor=(emb_p - emb_a)**2, axis=1) + 1e-8)\n d_an = tf.sqrt(tf.reduce_sum(input_tensor=(emb_n - emb_a)**2, axis=1) + 1e-8)\n\n loss_p = tf.maximum(d_ap - self.beta + alpha, 0.0)\n loss_n = tf.maximum(self.beta - d_an + alpha, 0.0)\n\n pair_num = tf.reduce_sum(input_tensor=tf.cast(loss_p > 0.0, tf.float32)+tf.cast(loss_n > 0.0, tf.float32))\n loss = tf.reduce_sum(input_tensor=loss_p + loss_n)/pair_num\n\n return loss\n def emb_vis(self,rgb, emb, emb_pred, inbound):\n ## emb,emb_pred -- [S,H/2,W/2,C] where C is length of emb vector per pixel.\n ## rgb -- [S,H/2,W/2,3], inbound -- [S,H/2,W/2,1]\n S,H,W,C = emb.shape.as_list()\n embs = tf.concat([emb, emb_pred], axis=0)\n inbounds = tf.concat([inbound, inbound], axis=0)\n # emb, emb_inbound = reduce_emb(emb, inbound)\n # emb_pred, emb_pred_inbound = reduce_emb(emb_pred, inbound)\n \n embs, embs_inbound = self.reduce_emb(embs, inbounds, together=True)\n # emb_inbound, emb_pred_inbound = tf.split(embs_inbound, 2, axis=0)\n emb, emb_pred = tf.split(embs, 2, axis=0)\n rgb_emb_vis = tf.concat([rgb, emb, emb_pred], axis=2)\n # utils_improc.summ_rgb('rgb_emb_embpred', rgb_emb_vis)\n # return emb_inbound, emb_pred_inbound\n return emb, emb_pred\n\n\n # def EmbNet3D(emb_pred, emb, istrain):\n # total_loss = 0.0\n\n # with tf.variable_scope('emb3D'):\n # print 'EmbNet3D...'\n\n # B, H, W, D, C = emb_pred.shape.as_list()\n # # assert(C==hyp.emb_dim)\n \n # loss = margin_loss_3D(emb, emb_pred)\n # emb_pca, emb_pred_pca = emb_vis(rgb, emb, emb_pred)\n # total_loss = utils_misc.add_loss(total_loss, loss,\n # hyp.emb_coeff, 'margin_3D')\n\n # # smooth_loss = edge_aware_smooth_loss(emb, rgb)\n # # smooth_loss += edge_aware_smooth_loss(emb_pred, rgb)\n # # total_loss = utils_misc.add_loss(total_loss, smooth_loss,\n # # hyp.emb_smooth_coeff, 'smooth')\n\n # # l1_loss = l1_on_axis(emb-emb_pred)\n # # utils_improc.summ_oned('l1_loss', l1_loss)\n # # # l1_loss = reduce_masked_mean(l1_loss, inbound)\n # # total_loss = utils_misc.add_loss(total_loss, l1_loss,\n # # hyp.emb_l1_coeff, 'l1')\n\n # # # emb = emb / l2_on_axis(emb, axis=3)\n # # # emb_pred = emb_pred / l2_on_axis(emb_pred, axis=3)\n # # return total_loss, emb, emb_pred, emb_pca, emb_pred_pca\n # # # return total_loss\n # return total_loss\n\n\n # def margin_loss_3D(emb0, emb1):\n # # emb0 and emb1 are B x H x W x D x C\n # B,H,W,D,C = emb0.shape.as_list()\n # loss = 0.0\n # emb0_all = []\n # emb1_all = []\n # for s in range(B):\n # n_sampling = 960\n # sample_indicies = tf.random.uniform([n_sampling, 1], maxval=H*W*D, dtype=tf.int32)\n # emb0_s_ = tf.reshape(emb0[s], [H*W*D, C])\n # emb1_s_ = tf.reshape(emb1[s], [H*W*D, C])\n # emb0_s_ = tf.gather_nd(emb0_s_, sample_indicies)\n # emb1_s_ = tf.gather_nd(emb1_s_, sample_indicies)\n # # these are N x D\n # emb0_all.append(emb0_s_)\n # emb1_all.append(emb1_s_)\n # emb0_all = tf.concat(emb0_all, axis=0)\n # emb1_all = tf.concat(emb1_all, axis=0)\n # emb_all = tf.concat([emb0_all, emb1_all], axis=0)\n # n_split = 2\n # loss = margin_loss(emb_all, n_sampling, n_split) / float(B)\n # return loss\n def margin_loss_2D(self,emb, emb_pred):\n ## emb,emb_pred,emb_aug -- [S,H/2,W/2,C]\n ## Use lifted_struct_loss between emb,emb_pred,emb_aug treating\n ## every s in S as a separate loss.\n\n # losstype = hyp.emb_loss\n # assert losstype in {'lifted', 'npairs'}\n # losstype = 'lifted'\n B,H,W,C = emb.shape.as_list()\n losstype = 'margin'\n # S,H,W,C = emb.shape.as_list()\n loss = 0.0\n emb_all = []\n emb_pred_all = []\n for s in range(B):\n n_sampling = 960\n sample_indicies = tf.random.uniform([n_sampling, 1], maxval=H*W, dtype=tf.int32)\n emb_s_ = tf.reshape(emb[s], [H*W, C])\n emb_s_ = tf.gather_nd(emb_s_, sample_indicies)\n emb_pred_s_ = tf.reshape(emb_pred[s], [H*W, C])\n emb_pred_s_ = tf.gather_nd(emb_pred_s_, sample_indicies)\n emb_all.append(emb_s_)\n emb_pred_all.append(emb_pred_s_)\n\n emb_all = tf.concat(emb_all, axis=0)\n emb_pred_all = tf.concat(emb_pred_all, axis=0)\n emb_all = tf.concat([emb_all, emb_pred_all], axis=0)\n n_split = 2\n loss = self.margin_loss(emb_all, n_sampling, n_split) / float(B)\n return loss\n\n\n\n @tf.function\n def call(self,rgb, emb_pred):\n # rgb is [S,H,W,3]\n # inbound is [S,H,W,1]\n # emb_pred -- [S,H/2,W/2,C] where C is length of emb vector per pixel.\n\n ## Compute embs for `rgb` using EmbNet(SimpleNet) and\n ## compare/loss against `emb_pred`. Use loss only within\n ## the mask `inbound`.\n\n total_loss = 0.0\n # st()\n with tf.compat.v1.name_scope('emb'):\n # print 'EmbNet...'\n\n B, H, W, C = emb_pred.shape.as_list()\n assert(C==const.emb_dim)\n \n # inbound = tf.image.resize_nearest_neighbor(inbound, [H, W])\n inbound = tf.ones([B,H,W,1])\n\n # if hyp.emb_use_aug:\n # # ignore/replace emb_pred\n # rgb_aug = random_color_augs(rgb)\n # rgb_all = tf.concat([rgb, rgb_aug], axis=0)\n # emb_all = SimpleNet(rgb_all, istrain, C)\n # emb, emb_pred = tf.split(emb_all, 2, axis=0)\n # inbound = tf.ones_like(inbound)\n # emb_aug = None # support old code that used BOTH aug and pred\n # else:\n emb = self.simpleNet(rgb)\n \n emb = emb / (EPS + l2_on_axis(emb, axis=3))\n emb_pred = emb_pred / (EPS + l2_on_axis(emb_pred, axis=3))\n # st()\n emb_aug = None # support old code that used BOTH aug and pred\n \n rgb = tf.image.resize(rgb, [H, W], method=tf.image.ResizeMethod.BILINEAR)\n\n loss = self.margin_loss_2D(emb, emb_pred)\n # emb_pca, emb_pred_pca = self.emb_vis(rgb, emb, emb_pred, inbound)\n\n total_loss = add_loss(total_loss, loss,\n const.emb_coeff, 'metric')\n\n # loss = metric_loss(rgb, emb, emb_pred, emb_aug, inbound)\n # emb_pca, emb_pred_pca = emb_vis(rgb, emb, emb_pred, inbound)\n # total_loss = utils_misc.add_loss(total_loss, loss,\n # hyp.emb_coeff, 'metric')\n\n # smooth_loss = edge_aware_smooth_loss(emb, rgb)\n # smooth_loss += edge_aware_smooth_loss(emb_pred, rgb)\n # total_loss = utils_misc.add_loss(total_loss, smooth_loss,\n # hyp.emb_smooth_coeff, 'smooth')\n\n l1_loss_im = l1_on_chans(emb-emb_pred)\n # utils_improc.summ_oned('l1_loss', l1_loss_im*inbound)\n l1_loss = reduce_masked_mean(l1_loss_im, inbound)\n total_loss = add_loss(total_loss, l1_loss,\n const.emb_l1_coeff, 'l1')\n\n # loss_3D = margin_loss_3D(emb3D_g, emb3D_e)\n # total_loss = utils_misc.add_loss(total_loss, loss_3D,\n # hyp.emb_3D_coeff, '3D')\n\n # dx, dy, dz = gradient3D(emb3D_e, absolute=True)\n # smooth_vox = tf.reduce_mean(dx+dy+dx, axis=4, keepdims=True)\n # smooth_loss = tf.reduce_mean(smooth_vox)\n # total_loss = utils_misc.add_loss(total_loss, smooth_loss, hyp.emb_smooth3D_coeff, 'smooth3D')\n # total_loss, emb, emb_pred, inbound, emb_pca, emb_pred_pca\n # emb = emb / l2_on_axis(emb, axis=3)\n # emb_pred = emb_pred / l2_on_axis(emb_pred, axis=3)\n return total_loss,rgb,emb,emb_pred\n # return total_loss", "import tensorflow as tf\n# tf.enable_eager_execution()\nfrom utils import binvox_rw\nfrom utils import voxel\nimport constants as const\nimport numpy as np\nfrom scipy.misc import imsave\nimport ipdb\nimport imageio\n\nst = ipdb.set_trace\n\ndef save_voxel(voxel_, filename, THRESHOLD=0.5):\n\tS1 = voxel_.shape[2]\n\tS2 = voxel_.shape[1]\n\tS3 = voxel_.shape[0]\n\t# st()\n\tbinvox_obj = binvox_rw.Voxels(\n\t\tnp.transpose(voxel_, [2, 1, 0]) >= THRESHOLD,\n\t\tdims = [S1, S2, S3],\n\t\ttranslate = [0.0, 0.0, 0.0],\n\t\tscale = 1.0,\n\t\taxis_order = 'xyz'\n\t) \n\n\twith open(filename, \"wb\") as f:\n\t\tbinvox_obj.write(f)\n\ndef rotate_voxels(rep,angle):\n\ta = binvox_rw.read_as_3d_array(open(\"tmpTest/outline_scale_47.0.binvox\",\"rb\"))\n\tval = a.data\n\n\tval = tf.convert_to_tensor(np.expand_dims(np.expand_dims(val,0),-1))\n\tphi,theta = angle\n\trot_mat = voxel.get_transform_matrix_tf([theta], [phi])\n\n\tproj_val = voxel.rotate_voxel(val,rot_mat)\n\tnum = np.where(proj_val>0.5)[0]\n\n\tproj_val = np.squeeze(proj_val)\n\tproj_val = proj_val >0.5\n\t# st()\n\tproj_imgZ = np.mean(proj_val,0)\n\n\timsave('{}/valRotate_phi_{}_theta_{}_fov_{}_Z.png'.format(rep,phi,theta,const.fov), proj_imgZ)\n\n\t# st()\n\tsave_voxel(np.squeeze(proj_val),\"{}/valRotate_THETA_{}_PHI_{}_fov_{}_.binvox\".format(rep,theta,phi,const.fov))\n# rotate_voxels\n# rotate_voxels(\"tmpTest\",[-20.0,0.0])\n\ndef project_voxel(rep):\n\ta = binvox_rw.read_as_3d_array(open(\"/Users/ashar/work/visual_imagination/prob_scene_gen/3dProbNeuralProgNet/data/CLEVR/clevr-dataset-gen/image_generation/output_90_20.binvox\",\"rb\"))\n\tval = a.data\n\tval = tf.convert_to_tensor(np.expand_dims(np.expand_dims(val,0),-1))\n\tproj_val = voxel.project_voxel(val)\n\tnum = np.where(proj_val>0.5)[0]\n\t# if len(num) > 0:\n\t# \tprint(\"found\")\n\t# \tfovs_working[fov] = len(num)\n\tproj_val = np.squeeze(proj_val)\n\tproj_val = proj_val >0.5\n\tproj_imgZ = np.mean(proj_val,0)\n\tproj_imgY = np.mean(proj_val,1)\n\tproj_imgX = np.mean(proj_val,2)\n\timsave('{}/valProject_fov_{}_Z.png'.format(rep,const.fov), proj_imgZ)\n\timsave('{}/valProject_fov_{}_Y.png'.format(rep,const.fov), proj_imgY)\n\timsave('{}/valProject_fov_{}_X.png'.format(rep,const.fov), proj_imgX)\n\n\tsave_voxel(proj_val,\"{}/valProject_fov_{}.binvox\".format(rep,const.fov))\n\n# project_voxel(\"tmpTest\")\n\n# unprojected_depth = (tf.expand_dims(inputs[:,:,:,:,1], 4) - const.radius) * (1/const.SCENE_SIZE)\ndef unproject(resize = False):\n # st()\n depth = np.array(imageio.imread(\"/Users/ashar/work/visual_imagination/prob_scene_gen/3dProbNeuralProgNet/data/CLEVR/clevr-dataset-gen/image_generation/rendered_depth_90_20.exr\", format='EXR-FI'))[:,:,0]\n depth = np.array(imageio.imread(\"/Users/ashar/work/visual_imagination/prob_scene_gen/3dProbNeuralProgNet/data/CLEVR/clevr-dataset-gen/output/CLEVR_64_36_MAYHEM_AGAIN/depth/train/CLEVR_new_000000/CLEVR_new_000000_180_40.exr\", format='EXR-FI'))[:,:,0]\n # depth =np.transpose(depth, [1, 0])\n inputs = depth * (100 - 0) + 0\n inputs.astype(np.float32)\n # st()\n if resize:\n inputs = tf.image.resize(inputs, (const.S, const.S))\n size = int(inputs.shape[1])\n inputs = np.expand_dims(np.expand_dims(inputs,axis=-1),0)\n #now unproject, to get our starting point\n inputs = voxel.unproject_image(inputs)\n\n #in addition, add on a z-map, and a local bias\n #copied from components.py\n meshgridz = tf.range(size, dtype = tf.float32)\n meshgridz = tf.reshape(meshgridz, (1, size, 1, 1))\n meshgridz = tf.tile(meshgridz, (1, 1, size, size))\n meshgridz = tf.expand_dims(meshgridz, axis = 4) \n meshgridz = (meshgridz + 0.5) / (size/2) - 1.0 #now (-1,1)\n # st()\n #get the rough outline\n # unprojected_mask = tf.expand_dims(inputs[:,:,:,:,0], 4)\n # unprojected_depth = tf.expand_dims(inputs[:,:,:,:,0], 4)\n unprojected_depth = (tf.expand_dims(inputs[:,:,:,:,0], 4) - const.radius) * (1/const.SCENE_SIZE)\n # return unprojected_depth\n if const.H > 32:\n outline_thickness = 0.1\n else:\n outline_thickness = 0.2\n # depth shell\n outline = tf.cast(tf.logical_and(\n unprojected_depth <= meshgridz,\n unprojected_depth + outline_thickness > meshgridz\n ), tf.float32)\n # outline *= unprojected_mask\n if True:\n #return tf.expand_dims(inputs[:,:,:,:,0], 4) #this is the unprojected mask\n unprojected_depth = np.squeeze(unprojected_depth)\n val = np.squeeze(outline)\n save_voxel(val, \"tmpTest/outline_scale_{}_{}.binvox\".format(const.fov, 180))\n save_voxel(unprojected_depth, \"tmpTest/unproj_depths_{}_{}.binvox\".format(const.fov, 180))\n return outline,unprojected_depth\n\n inputs_ = [inputs]\n if const.USE_MESHGRID:\n inputs_.append(meshgridz)\n if const.USE_OUTLINE:\n inputs_.append(outline)\n inputs = tf.concat(inputs_, axis = 4)\n return inputs\n\nunproject()\n\n\n", "import tensorflow as tf\nimport constants as const\nimport numpy as np\n\nfrom . import tfutil\nfrom . import camera\n\n\ndef Z_to_PC(Z):\n #BS x H x W x 1 -> BS x H*W x 3\n shape = Z.get_shape()\n bs = int(shape[0])\n h = int(shape[1])\n w = int(shape[2])\n Z = tf.reshape(Z, (bs, h, w))\n [grid_x1, grid_y1] = tfutil.meshgrid2D(bs, h, w)\n XYZ = camera.Camera2World(grid_x1, grid_y1, Z)\n return XYZ\n\n\ndef Z_to_PC_dxdy(Z, dx, dy):\n #BS x H x W x 1 -> BS x H*W x 3\n shape = Z.get_shape()\n bs = int(shape[0])\n h = int(shape[1])\n w = int(shape[2])\n Z = tf.reshape(Z, (bs, h, w))\n [grid_x1, grid_y1] = tfutil.meshgrid2D(bs, h, w)\n if len(dx.get_shape()) == 4:\n dx = tf.squeeze(dx, axis=3)\n dy = tf.squeeze(dy, axis=3)\n grid_x1 += dx\n grid_y1 += dy\n XYZ = camera.Camera2World(grid_x1, grid_y1, Z)\n return XYZ\n\n\ndef normalize_pc(pc, i):\n #remove offset\n X, Y, Z = tf.split(pc, 3, axis=2)\n Z -= 4.0\n pc = tf.concat([X, Y, Z], axis=2)\n\n r = camera.rotate_matrix(i * 20, -10 * const.PHI_IDX, order='phi').astype(np.float32)\n r = tf.expand_dims(r, axis=0)\n pc_t = tf.transpose(a=pc, perm=(0, 2, 1))\n pc = tf.transpose(a=tf.matmul(r, pc_t), perm=(0, 2, 1))\n return pc\n\n\ndef Zs_to_PC(Zs, dxs=None, dys=None):\n #in: BS x H x W x V x 1\n #out: BS x H*W*V x 3\n Zs = tf.unstack(Zs, axis=3)\n if dxs is not None:\n dxs_ = tf.unstack(dxs, axis=3)\n dys_ = tf.unstack(dys, axis=3)\n else:\n dxs_ = list(range(len(Zs)))\n dys_ = list(range(len(Zs)))\n\n pcs = []\n for i, (Z, dx, dy) in enumerate(zip(Zs, dxs_, dys_)):\n #BS x H x W x 1\n\n if dxs is None and dys is None:\n pc = Z_to_PC(Z)\n else:\n assert (dxs is not None) and (dys is not None)\n pc = Z_to_PC_dxdy(Z, dx, dy)\n\n #remove offset\n X, Y, Z = tf.split(pc, 3, axis=2)\n Z -= 4.0\n pc = tf.concat([X, Y, Z], axis=2)\n\n r = camera.rotate_matrix(i * 20, -10 * const.PHI_IDX, order='phi').astype(np.float32)\n r = tf.expand_dims(r, axis=0)\n pc_t = tf.transpose(a=pc, perm=(0, 2, 1))\n pc = tf.transpose(a=tf.matmul(r, pc_t), perm=(0, 2, 1))\n\n pcs.append(pc)\n return tf.concat(pcs, axis=1)\n\n\ndef RGBAZs_to_CPC(RGBs, Zs, As, dx=None, dy=None):\n #in: BS x H x W x V x 1, BS x H x W x V x 3, BS x H x W x V x 1\n #out: BS x H*W*V x (3+3+1)\n PC = Zs_to_PC(Zs, dx, dy)\n f = lambda x, d: tf.reshape(tf.transpose(a=x, perm=(0, 3, 1, 2, 4)), (const.BS, -1, d))\n RGBs = f(RGBs, 3)\n As = f(As, 1)\n CPC = tf.concat([PC, RGBs, As], axis=2)\n return CPC\n\n\ndef normalize_point_coords(pts, theta, phi):\n coords = pts[:, :3]\n tail = pts[:, 3:]\n\n rot_mat = camera.tf_rotate_matrix(-theta, phi)\n coords = tf.transpose(a=tf.matmul(rot_mat, tf.transpose(a=coords)))\n coords = coords + tf.constant([0.0, 0.0, 4.0])\n\n pts = tf.concat([coords, tail], axis=1)\n return pts\n\n\ndef preprocess_threshold_sparsity(pts):\n raise Exception('percentile() not implemented in tf 2.0. Replace threshold variable later')\n nb = 10000\n sparsity = 0.1\n\n alpha = pts[:, 6]\n\n #_, keep_top = tf.nn.top_k(alpha, k=int(nb/sparsity), sorted=False)\n #pts = tf.gather(pts, keep_top)\n #alpha = pts[:,6]\n \n threshold = 0.5\n # threshold = tf.contrib.distributions.percentile(alpha, 50.0)\n\n keep_nz = tf.squeeze(tf.where(alpha >= threshold), axis=1)\n pts = tf.gather(pts, keep_nz)\n alpha = pts[:, 6]\n\n n = tf.cast(tf.shape(input=alpha)[0], tf.float32)\n sparsity = nb / n\n\n keep_rnd = tf.squeeze(tf.where(tf.random.uniform(tf.shape(input=alpha)) < sparsity), axis=1)\n pts = tf.gather(pts, keep_rnd)\n\n return pts\n" ]
[ [ "sklearn.decomposition.PCA" ], [ "numpy.expand_dims", "tensorflow.concat", "tensorflow.range", "numpy.squeeze", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.image.resize", "numpy.mean", "numpy.transpose", "numpy.where", "tensorflow.tile", "tensorflow.logical_and" ], [ "tensorflow.matmul", "tensorflow.transpose", "tensorflow.concat", "tensorflow.unstack", "tensorflow.constant", "tensorflow.shape", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.squeeze", "tensorflow.gather", "tensorflow.where", "tensorflow.split" ] ]
jsikyoon/dreamer
[ "c422d14bba523083c69a862d8c16b41d686c5028" ]
[ "models.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers as tfkl\nfrom tensorflow_probability import distributions as tfd\nfrom tensorflow.keras.mixed_precision import experimental as prec\n\nimport tools\nfrom trxls import TrXL\n\n\nclass RSSM(tools.Module):\n\n def __init__(self, stoch=30, deter=200, hidden=200, act=tf.nn.elu,\n model='trxl',\n pre_lnorm=False, gate='plus',\n n_layer=2, n_head=10, mem_len=64):\n super().__init__()\n self._activation = act\n self._stoch_size = stoch\n self._deter_size = deter\n self._hidden_size = hidden\n\n # memory module\n self._model = model\n self._deter = deter\n self._n_layer = n_layer\n self._mem_len = mem_len\n self._num_var = n_layer * mem_len * deter\n\n assert model in ['gru', 'trxl']\n\n if self._model=='gru':\n self._cell = tfkl.GRUCell(self._deter_size)\n else:\n self._cell = TrXL(pre_lnorm=pre_lnorm,\n gate=gate,\n n_layer=n_layer,\n d_model=deter,\n n_head=n_head,\n d_head=deter//n_head,\n d_inner=deter,\n mem_len=mem_len)\n\n def initial(self, batch_size):\n dtype = prec.global_policy().compute_dtype\n if self._model=='gru':\n deter = self._cell.get_initial_state(None, batch_size, dtype)\n else:\n deter = tf.zeros([self._n_layer,\n self._mem_len,\n batch_size,\n self._deter], dtype)\n deter = tf.transpose(deter, perm=[2,1,0,3])\n deter = tf.reshape(deter, [deter.shape[0], -1])\n deter = tf.concat([tf.zeros([batch_size, self._deter], dtype),\n deter],\n axis=-1)\n return dict(\n mean=tf.zeros([batch_size, self._stoch_size], dtype),\n std=tf.zeros([batch_size, self._stoch_size], dtype),\n stoch=tf.zeros([batch_size, self._stoch_size], dtype),\n deter=deter)\n\n @tf.function\n def observe(self, embed, action, state=None):\n if state is None:\n state = self.initial(tf.shape(action)[0])\n embed = tf.transpose(embed, [1, 0, 2])\n action = tf.transpose(action, [1, 0, 2])\n post, prior = tools.static_scan(\n lambda prev, inputs: self.obs_step(prev[0], *inputs),\n (action, embed), (state, state))\n post = {k: tf.transpose(v, [1, 0, 2]) for k, v in post.items()}\n prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()}\n return post, prior\n\n @tf.function\n def imagine(self, action, state=None):\n if state is None:\n state = self.initial(tf.shape(action)[0])\n assert isinstance(state, dict), state\n action = tf.transpose(action, [1, 0, 2])\n prior = tools.static_scan(self.img_step, action, state)\n prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()}\n return prior\n\n def get_feat(self, state):\n if self._model=='gru':\n return tf.concat([state['stoch'], state['deter']], -1)\n else:\n deter = tf.split(state['deter'],\n [self._deter, self._num_var], axis=-1)[0]\n return tf.concat([state['stoch'], deter], -1)\n\n def get_dist(self, state):\n return tfd.MultivariateNormalDiag(state['mean'], state['std'])\n\n @tf.function\n def obs_step(self, prev_state, prev_action, embed):\n prior = self.img_step(prev_state, prev_action)\n if self._model=='gru':\n x = tf.concat([prior['deter'], embed], -1)\n else:\n deter = tf.split(prior['deter'],\n [self._deter, self._num_var], axis=-1)[0]\n x = tf.concat([deter, embed], -1)\n x = self.get('obs1', tfkl.Dense, self._hidden_size, self._activation)(x)\n x = self.get('obs2', tfkl.Dense, 2 * self._stoch_size, None)(x)\n mean, std = tf.split(x, 2, -1)\n std = tf.nn.softplus(std) + 0.1\n stoch = self.get_dist({'mean': mean, 'std': std}).sample()\n post = {'mean': mean, 'std': std, 'stoch': stoch, 'deter': prior['deter']}\n return post, prior\n\n @tf.function\n def img_step(self, prev_state, prev_action):\n x = tf.concat([prev_state['stoch'], prev_action], -1)\n x = self.get('img1', tfkl.Dense, self._hidden_size, self._activation)(x)\n if self._model=='gru':\n x, deter = self._cell(x, [prev_state['deter']])\n deter = deter[0] # Keras wraps the state in a list.\n else:\n deter = tf.split(prev_state['deter'],\n [self._deter, self._num_var], axis=-1)[1]\n deter = tf.reshape(deter, [deter.shape[0], self._mem_len,\n self._n_layer, self._deter])\n deter = tf.transpose(deter, perm=[2,1,0,3])\n x, deter = self._cell(dec_inp=tf.expand_dims(x, axis=0),\n mems=deter)\n deter = tf.transpose(deter, perm=[2,1,0,3])\n deter = tf.reshape(deter, [deter.shape[0], -1])\n deter = tf.concat([x, deter], axis=-1)\n x = self.get('img2', tfkl.Dense, self._hidden_size, self._activation)(x)\n x = self.get('img3', tfkl.Dense, 2 * self._stoch_size, None)(x)\n mean, std = tf.split(x, 2, -1)\n std = tf.nn.softplus(std) + 0.1\n stoch = self.get_dist({'mean': mean, 'std': std}).sample()\n prior = {'mean': mean, 'std': std, 'stoch': stoch, 'deter': deter}\n return prior\n\n\nclass ConvEncoder(tools.Module):\n\n def __init__(self, depth=32, act=tf.nn.relu):\n self._act = act\n self._depth = depth\n\n def __call__(self, obs):\n kwargs = dict(strides=2, activation=self._act)\n x = tf.reshape(obs['image'], (-1,) + tuple(obs['image'].shape[-3:]))\n x = self.get('h1', tfkl.Conv2D, 1 * self._depth, 4, **kwargs)(x)\n x = self.get('h2', tfkl.Conv2D, 2 * self._depth, 4, **kwargs)(x)\n x = self.get('h3', tfkl.Conv2D, 4 * self._depth, 4, **kwargs)(x)\n x = self.get('h4', tfkl.Conv2D, 8 * self._depth, 4, **kwargs)(x)\n shape = tf.concat([tf.shape(obs['image'])[:-3], [32 * self._depth]], 0)\n return tf.reshape(x, shape)\n\n\nclass ConvDecoder(tools.Module):\n\n def __init__(self, depth=32, act=tf.nn.relu, shape=(64, 64, 3)):\n self._act = act\n self._depth = depth\n self._shape = shape\n\n def __call__(self, features):\n kwargs = dict(strides=2, activation=self._act)\n x = self.get('h1', tfkl.Dense, 32 * self._depth, None)(features)\n x = tf.reshape(x, [-1, 1, 1, 32 * self._depth])\n x = self.get('h2', tfkl.Conv2DTranspose, 4 * self._depth, 5, **kwargs)(x)\n x = self.get('h3', tfkl.Conv2DTranspose, 2 * self._depth, 5, **kwargs)(x)\n x = self.get('h4', tfkl.Conv2DTranspose, 1 * self._depth, 6, **kwargs)(x)\n x = self.get('h5', tfkl.Conv2DTranspose, self._shape[-1], 6, strides=2)(x)\n mean = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0))\n return tfd.Independent(tfd.Normal(mean, 1), len(self._shape))\n\n\nclass DenseDecoder(tools.Module):\n\n def __init__(self, shape, layers, units, dist='normal', act=tf.nn.elu):\n self._shape = shape\n self._layers = layers\n self._units = units\n self._dist = dist\n self._act = act\n\n def __call__(self, features):\n x = features\n for index in range(self._layers):\n x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)\n x = self.get(f'hout', tfkl.Dense, np.prod(self._shape))(x)\n x = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0))\n if self._dist == 'normal':\n return tfd.Independent(tfd.Normal(x, 1), len(self._shape))\n if self._dist == 'binary':\n return tfd.Independent(tfd.Bernoulli(x), len(self._shape))\n raise NotImplementedError(self._dist)\n\n\nclass ActionDecoder(tools.Module):\n\n def __init__(\n self, size, layers, units, dist='tanh_normal', act=tf.nn.elu,\n min_std=1e-4, init_std=5, mean_scale=5):\n self._size = size\n self._layers = layers\n self._units = units\n self._dist = dist\n self._act = act\n self._min_std = min_std\n self._init_std = init_std\n self._mean_scale = mean_scale\n\n def __call__(self, features):\n raw_init_std = np.log(np.exp(self._init_std) - 1)\n x = features\n for index in range(self._layers):\n x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)\n if self._dist == 'tanh_normal':\n # https://www.desmos.com/calculator/rcmcf5jwe7\n x = self.get(f'hout', tfkl.Dense, 2 * self._size)(x)\n mean, std = tf.split(x, 2, -1)\n mean = self._mean_scale * tf.tanh(mean / self._mean_scale)\n std = tf.nn.softplus(std + raw_init_std) + self._min_std\n dist = tfd.Normal(mean, std)\n dist = tfd.TransformedDistribution(dist, tools.TanhBijector())\n dist = tfd.Independent(dist, 1)\n dist = tools.SampleDist(dist)\n elif self._dist == 'onehot':\n x = self.get(f'hout', tfkl.Dense, self._size)(x)\n dist = tools.OneHotDist(x)\n else:\n raise NotImplementedError(dist)\n return dist\n" ]
[ [ "tensorflow.keras.layers.GRUCell", "tensorflow.concat", "tensorflow.transpose", "tensorflow.zeros", "tensorflow.shape", "tensorflow.reshape", "tensorflow.keras.mixed_precision.experimental.global_policy", "tensorflow.expand_dims", "tensorflow.tanh", "numpy.prod", "tensorflow.split", "numpy.exp", "tensorflow.nn.softplus" ] ]
JE-Chen/je_old_repo
[ "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5", "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5", "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5", "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5", "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5" ]
[ "LogSystem_JE/venv/Lib/site-packages/tqdm/gui.py", "DatabaseControlWrapper_JE/venv/Lib/site-packages/setuptools/msvc.py", "Python_OCR_JE/venv/Lib/site-packages/numpy/ctypeslib.py", "Python_OCR_JE/venv/Lib/site-packages/numpy/typing/tests/data/pass/ufunc_config.py", "Python_OCR_JE/venv/Lib/site-packages/numpy/polynomial/polyutils.py" ]
[ "\"\"\"\r\nGUI progressbar decorator for iterators.\r\nIncludes a default `range` iterator printing to `stderr`.\r\n\r\nUsage:\r\n>>> from tqdm.gui import trange, tqdm\r\n>>> for i in trange(10):\r\n... ...\r\n\"\"\"\r\n# future division is important to divide integers and get as\r\n# a result precise floating numbers (instead of truncated int)\r\nfrom __future__ import division, absolute_import\r\n# import compatibility functions and utilities\r\nfrom .utils import _range\r\n# to inherit from the tqdm class\r\nfrom .std import tqdm as std_tqdm\r\nfrom .std import TqdmExperimentalWarning\r\nfrom warnings import warn\r\n\r\n\r\n__author__ = {\"github.com/\": [\"casperdcl\", \"lrq3000\"]}\r\n__all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange']\r\n\r\n\r\nclass tqdm_gui(std_tqdm): # pragma: no cover\r\n \"\"\"\r\n Experimental GUI version of tqdm!\r\n \"\"\"\r\n\r\n # TODO: @classmethod: write() on GUI?\r\n\r\n def __init__(self, *args, **kwargs):\r\n import matplotlib as mpl\r\n import matplotlib.pyplot as plt\r\n from collections import deque\r\n kwargs['gui'] = True\r\n\r\n super(tqdm_gui, self).__init__(*args, **kwargs)\r\n\r\n # Initialize the GUI display\r\n if self.disable or not kwargs['gui']:\r\n return\r\n\r\n warn('GUI is experimental/alpha', TqdmExperimentalWarning, stacklevel=2)\r\n self.mpl = mpl\r\n self.plt = plt\r\n self.sp = None\r\n\r\n # Remember if external environment uses toolbars\r\n self.toolbar = self.mpl.rcParams['toolbar']\r\n self.mpl.rcParams['toolbar'] = 'None'\r\n\r\n self.mininterval = max(self.mininterval, 0.5)\r\n self.fig, ax = plt.subplots(figsize=(9, 2.2))\r\n # self.fig.subplots_adjust(bottom=0.2)\r\n total = self.__len__() # avoids TypeError on None #971\r\n if total is not None:\r\n self.xdata = []\r\n self.ydata = []\r\n self.zdata = []\r\n else:\r\n self.xdata = deque([])\r\n self.ydata = deque([])\r\n self.zdata = deque([])\r\n self.line1, = ax.plot(self.xdata, self.ydata, color='b')\r\n self.line2, = ax.plot(self.xdata, self.zdata, color='k')\r\n ax.set_ylim(0, 0.001)\r\n if total is not None:\r\n ax.set_xlim(0, 100)\r\n ax.set_xlabel('percent')\r\n self.fig.legend((self.line1, self.line2), ('cur', 'est'),\r\n loc='center right')\r\n # progressbar\r\n self.hspan = plt.axhspan(0, 0.001,\r\n xmin=0, xmax=0, color='g')\r\n else:\r\n # ax.set_xlim(-60, 0)\r\n ax.set_xlim(0, 60)\r\n ax.invert_xaxis()\r\n ax.set_xlabel('seconds')\r\n ax.legend(('cur', 'est'), loc='lower left')\r\n ax.grid()\r\n # ax.set_xlabel('seconds')\r\n ax.set_ylabel((self.unit if self.unit else 'it') + '/s')\r\n if self.unit_scale:\r\n plt.ticklabel_format(style='sci', axis='y',\r\n scilimits=(0, 0))\r\n ax.yaxis.get_offset_text().set_x(-0.15)\r\n\r\n # Remember if external environment is interactive\r\n self.wasion = plt.isinteractive()\r\n plt.ion()\r\n self.ax = ax\r\n\r\n def __iter__(self):\r\n # TODO: somehow allow the following:\r\n # if not self.gui:\r\n # return super(tqdm_gui, self).__iter__()\r\n iterable = self.iterable\r\n if self.disable:\r\n for obj in iterable:\r\n yield obj\r\n return\r\n\r\n # ncols = self.ncols\r\n mininterval = self.mininterval\r\n maxinterval = self.maxinterval\r\n miniters = self.miniters\r\n dynamic_miniters = self.dynamic_miniters\r\n last_print_t = self.last_print_t\r\n last_print_n = self.last_print_n\r\n n = self.n\r\n # dynamic_ncols = self.dynamic_ncols\r\n smoothing = self.smoothing\r\n avg_time = self.avg_time\r\n time = self._time\r\n\r\n for obj in iterable:\r\n yield obj\r\n # Update and possibly print the progressbar.\r\n # Note: does not call self.update(1) for speed optimisation.\r\n n += 1\r\n # check counter first to avoid calls to time()\r\n if n - last_print_n >= self.miniters:\r\n miniters = self.miniters # watch monitoring thread changes\r\n delta_t = time() - last_print_t\r\n if delta_t >= mininterval:\r\n cur_t = time()\r\n delta_it = n - last_print_n\r\n # EMA (not just overall average)\r\n if smoothing and delta_t and delta_it:\r\n rate = delta_t / delta_it\r\n avg_time = self.ema(rate, avg_time, smoothing)\r\n self.avg_time = avg_time\r\n\r\n self.n = n\r\n self.display()\r\n\r\n # If no `miniters` was specified, adjust automatically\r\n # to the max iteration rate seen so far between 2 prints\r\n if dynamic_miniters:\r\n if maxinterval and delta_t >= maxinterval:\r\n # Adjust miniters to time interval by rule of 3\r\n if mininterval:\r\n # Set miniters to correspond to mininterval\r\n miniters = delta_it * mininterval / delta_t\r\n else:\r\n # Set miniters to correspond to maxinterval\r\n miniters = delta_it * maxinterval / delta_t\r\n elif smoothing:\r\n # EMA-weight miniters to converge\r\n # towards the timeframe of mininterval\r\n rate = delta_it\r\n if mininterval and delta_t:\r\n rate *= mininterval / delta_t\r\n miniters = self.ema(rate, miniters, smoothing)\r\n else:\r\n # Maximum nb of iterations between 2 prints\r\n miniters = max(miniters, delta_it)\r\n\r\n # Store old values for next call\r\n self.n = self.last_print_n = last_print_n = n\r\n self.last_print_t = last_print_t = cur_t\r\n self.miniters = miniters\r\n\r\n # Closing the progress bar.\r\n # Update some internal variables for close().\r\n self.last_print_n = last_print_n\r\n self.n = n\r\n self.miniters = miniters\r\n self.close()\r\n\r\n def update(self, n=1):\r\n # if not self.gui:\r\n # return super(tqdm_gui, self).close()\r\n if self.disable:\r\n return\r\n\r\n if n < 0:\r\n self.last_print_n += n # for auto-refresh logic to work\r\n self.n += n\r\n\r\n # check counter first to reduce calls to time()\r\n if self.n - self.last_print_n >= self.miniters:\r\n delta_t = self._time() - self.last_print_t\r\n if delta_t >= self.mininterval:\r\n cur_t = self._time()\r\n delta_it = self.n - self.last_print_n # >= n\r\n # elapsed = cur_t - self.start_t\r\n # EMA (not just overall average)\r\n if self.smoothing and delta_t and delta_it:\r\n rate = delta_t / delta_it\r\n self.avg_time = self.ema(\r\n rate, self.avg_time, self.smoothing)\r\n\r\n self.display()\r\n\r\n # If no `miniters` was specified, adjust automatically to the\r\n # maximum iteration rate seen so far between two prints.\r\n # e.g.: After running `tqdm.update(5)`, subsequent\r\n # calls to `tqdm.update()` will only cause an update after\r\n # at least 5 more iterations.\r\n if self.dynamic_miniters:\r\n if self.maxinterval and delta_t >= self.maxinterval:\r\n if self.mininterval:\r\n self.miniters = delta_it * self.mininterval \\\r\n / delta_t\r\n else:\r\n self.miniters = delta_it * self.maxinterval \\\r\n / delta_t\r\n elif self.smoothing:\r\n self.miniters = self.smoothing * delta_it * \\\r\n (self.mininterval / delta_t\r\n if self.mininterval and delta_t\r\n else 1) + \\\r\n (1 - self.smoothing) * self.miniters\r\n else:\r\n self.miniters = max(self.miniters, delta_it)\r\n\r\n # Store old values for next call\r\n self.last_print_n = self.n\r\n self.last_print_t = cur_t\r\n return True\r\n\r\n def close(self):\r\n # if not self.gui:\r\n # return super(tqdm_gui, self).close()\r\n if self.disable:\r\n return\r\n\r\n self.disable = True\r\n\r\n with self.get_lock():\r\n self._instances.remove(self)\r\n\r\n # Restore toolbars\r\n self.mpl.rcParams['toolbar'] = self.toolbar\r\n # Return to non-interactive mode\r\n if not self.wasion:\r\n self.plt.ioff()\r\n if not self.leave:\r\n self.plt.close(self.fig)\r\n\r\n def display(self):\r\n n = self.n\r\n cur_t = self._time()\r\n elapsed = cur_t - self.start_t\r\n delta_it = n - self.last_print_n\r\n delta_t = cur_t - self.last_print_t\r\n\r\n # Inline due to multiple calls\r\n total = self.total\r\n xdata = self.xdata\r\n ydata = self.ydata\r\n zdata = self.zdata\r\n ax = self.ax\r\n line1 = self.line1\r\n line2 = self.line2\r\n # instantaneous rate\r\n y = delta_it / delta_t\r\n # overall rate\r\n z = n / elapsed\r\n # update line data\r\n xdata.append(n * 100.0 / total if total else cur_t)\r\n ydata.append(y)\r\n zdata.append(z)\r\n\r\n # Discard old values\r\n # xmin, xmax = ax.get_xlim()\r\n # if (not total) and elapsed > xmin * 1.1:\r\n if (not total) and elapsed > 66:\r\n xdata.popleft()\r\n ydata.popleft()\r\n zdata.popleft()\r\n\r\n ymin, ymax = ax.get_ylim()\r\n if y > ymax or z > ymax:\r\n ymax = 1.1 * y\r\n ax.set_ylim(ymin, ymax)\r\n ax.figure.canvas.draw()\r\n\r\n if total:\r\n line1.set_data(xdata, ydata)\r\n line2.set_data(xdata, zdata)\r\n try:\r\n poly_lims = self.hspan.get_xy()\r\n except AttributeError:\r\n self.hspan = self.plt.axhspan(\r\n 0, 0.001, xmin=0, xmax=0, color='g')\r\n poly_lims = self.hspan.get_xy()\r\n poly_lims[0, 1] = ymin\r\n poly_lims[1, 1] = ymax\r\n poly_lims[2] = [n / total, ymax]\r\n poly_lims[3] = [poly_lims[2, 0], ymin]\r\n if len(poly_lims) > 4:\r\n poly_lims[4, 1] = ymin\r\n self.hspan.set_xy(poly_lims)\r\n else:\r\n t_ago = [cur_t - i for i in xdata]\r\n line1.set_data(t_ago, ydata)\r\n line2.set_data(t_ago, zdata)\r\n\r\n ax.set_title(self.format_meter(\r\n n, total, elapsed, 0,\r\n self.desc, self.ascii, self.unit, self.unit_scale,\r\n 1 / self.avg_time if self.avg_time else None, self.bar_format,\r\n self.postfix, self.unit_divisor),\r\n fontname=\"DejaVu Sans Mono\", fontsize=11)\r\n self.plt.pause(1e-9)\r\n\r\n\r\ndef tgrange(*args, **kwargs):\r\n \"\"\"\r\n A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`.\r\n On Python3+, `range` is used instead of `xrange`.\r\n \"\"\"\r\n return tqdm_gui(_range(*args), **kwargs)\r\n\r\n\r\n# Aliases\r\ntqdm = tqdm_gui\r\ntrange = tgrange\r\n", "\"\"\"\r\nImproved support for Microsoft Visual C++ compilers.\r\n\r\nKnown supported compilers:\r\n--------------------------\r\nMicrosoft Visual C++ 9.0:\r\n Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)\r\n Microsoft Windows SDK 6.1 (x86, x64, ia64)\r\n Microsoft Windows SDK 7.0 (x86, x64, ia64)\r\n\r\nMicrosoft Visual C++ 10.0:\r\n Microsoft Windows SDK 7.1 (x86, x64, ia64)\r\n\r\nMicrosoft Visual C++ 14.X:\r\n Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)\r\n Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)\r\n Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64)\r\n\r\nThis may also support compilers shipped with compatible Visual Studio versions.\r\n\"\"\"\r\n\r\nimport json\r\nfrom io import open\r\nfrom os import listdir, pathsep\r\nfrom os.path import join, isfile, isdir, dirname\r\nimport sys\r\nimport contextlib\r\nimport platform\r\nimport itertools\r\nimport subprocess\r\nimport distutils.errors\r\nfrom setuptools.extern.packaging.version import LegacyVersion\r\nfrom setuptools.extern.more_itertools import unique_everseen\r\n\r\nfrom .monkey import get_unpatched\r\n\r\nif platform.system() == 'Windows':\r\n import winreg\r\n from os import environ\r\nelse:\r\n # Mock winreg and environ so the module can be imported on this platform.\r\n\r\n class winreg:\r\n HKEY_USERS = None\r\n HKEY_CURRENT_USER = None\r\n HKEY_LOCAL_MACHINE = None\r\n HKEY_CLASSES_ROOT = None\r\n\r\n environ = dict()\r\n\r\n_msvc9_suppress_errors = (\r\n # msvc9compiler isn't available on some platforms\r\n ImportError,\r\n\r\n # msvc9compiler raises DistutilsPlatformError in some\r\n # environments. See #1118.\r\n distutils.errors.DistutilsPlatformError,\r\n)\r\n\r\ntry:\r\n from distutils.msvc9compiler import Reg\r\nexcept _msvc9_suppress_errors:\r\n pass\r\n\r\n\r\ndef msvc9_find_vcvarsall(version):\r\n \"\"\"\r\n Patched \"distutils.msvc9compiler.find_vcvarsall\" to use the standalone\r\n compiler build for Python\r\n (VCForPython / Microsoft Visual C++ Compiler for Python 2.7).\r\n\r\n Fall back to original behavior when the standalone compiler is not\r\n available.\r\n\r\n Redirect the path of \"vcvarsall.bat\".\r\n\r\n Parameters\r\n ----------\r\n version: float\r\n Required Microsoft Visual C++ version.\r\n\r\n Return\r\n ------\r\n str\r\n vcvarsall.bat path\r\n \"\"\"\r\n vc_base = r'Software\\%sMicrosoft\\DevDiv\\VCForPython\\%0.1f'\r\n key = vc_base % ('', version)\r\n try:\r\n # Per-user installs register the compiler path here\r\n productdir = Reg.get_value(key, \"installdir\")\r\n except KeyError:\r\n try:\r\n # All-user installs on a 64-bit system register here\r\n key = vc_base % ('Wow6432Node\\\\', version)\r\n productdir = Reg.get_value(key, \"installdir\")\r\n except KeyError:\r\n productdir = None\r\n\r\n if productdir:\r\n vcvarsall = join(productdir, \"vcvarsall.bat\")\r\n if isfile(vcvarsall):\r\n return vcvarsall\r\n\r\n return get_unpatched(msvc9_find_vcvarsall)(version)\r\n\r\n\r\ndef msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):\r\n \"\"\"\r\n Patched \"distutils.msvc9compiler.query_vcvarsall\" for support extra\r\n Microsoft Visual C++ 9.0 and 10.0 compilers.\r\n\r\n Set environment without use of \"vcvarsall.bat\".\r\n\r\n Parameters\r\n ----------\r\n ver: float\r\n Required Microsoft Visual C++ version.\r\n arch: str\r\n Target architecture.\r\n\r\n Return\r\n ------\r\n dict\r\n environment\r\n \"\"\"\r\n # Try to get environment from vcvarsall.bat (Classical way)\r\n try:\r\n orig = get_unpatched(msvc9_query_vcvarsall)\r\n return orig(ver, arch, *args, **kwargs)\r\n except distutils.errors.DistutilsPlatformError:\r\n # Pass error if Vcvarsall.bat is missing\r\n pass\r\n except ValueError:\r\n # Pass error if environment not set after executing vcvarsall.bat\r\n pass\r\n\r\n # If error, try to set environment directly\r\n try:\r\n return EnvironmentInfo(arch, ver).return_env()\r\n except distutils.errors.DistutilsPlatformError as exc:\r\n _augment_exception(exc, ver, arch)\r\n raise\r\n\r\n\r\ndef _msvc14_find_vc2015():\r\n \"\"\"Python 3.8 \"distutils/_msvccompiler.py\" backport\"\"\"\r\n try:\r\n key = winreg.OpenKey(\r\n winreg.HKEY_LOCAL_MACHINE,\r\n r\"Software\\Microsoft\\VisualStudio\\SxS\\VC7\",\r\n 0,\r\n winreg.KEY_READ | winreg.KEY_WOW64_32KEY\r\n )\r\n except OSError:\r\n return None, None\r\n\r\n best_version = 0\r\n best_dir = None\r\n with key:\r\n for i in itertools.count():\r\n try:\r\n v, vc_dir, vt = winreg.EnumValue(key, i)\r\n except OSError:\r\n break\r\n if v and vt == winreg.REG_SZ and isdir(vc_dir):\r\n try:\r\n version = int(float(v))\r\n except (ValueError, TypeError):\r\n continue\r\n if version >= 14 and version > best_version:\r\n best_version, best_dir = version, vc_dir\r\n return best_version, best_dir\r\n\r\n\r\ndef _msvc14_find_vc2017():\r\n \"\"\"Python 3.8 \"distutils/_msvccompiler.py\" backport\r\n\r\n Returns \"15, path\" based on the result of invoking vswhere.exe\r\n If no install is found, returns \"None, None\"\r\n\r\n The version is returned to avoid unnecessarily changing the function\r\n result. It may be ignored when the path is not None.\r\n\r\n If vswhere.exe is not available, by definition, VS 2017 is not\r\n installed.\r\n \"\"\"\r\n root = environ.get(\"ProgramFiles(x86)\") or environ.get(\"ProgramFiles\")\r\n if not root:\r\n return None, None\r\n\r\n try:\r\n path = subprocess.check_output([\r\n join(root, \"Microsoft Visual Studio\", \"Installer\", \"vswhere.exe\"),\r\n \"-latest\",\r\n \"-prerelease\",\r\n \"-requiresAny\",\r\n \"-requires\", \"Microsoft.VisualStudio.Component.VC.Tools.x86.x64\",\r\n \"-requires\", \"Microsoft.VisualStudio.Workload.WDExpress\",\r\n \"-property\", \"installationPath\",\r\n \"-products\", \"*\",\r\n ]).decode(encoding=\"mbcs\", errors=\"strict\").strip()\r\n except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):\r\n return None, None\r\n\r\n path = join(path, \"VC\", \"Auxiliary\", \"Build\")\r\n if isdir(path):\r\n return 15, path\r\n\r\n return None, None\r\n\r\n\r\nPLAT_SPEC_TO_RUNTIME = {\r\n 'x86': 'x86',\r\n 'x86_amd64': 'x64',\r\n 'x86_arm': 'arm',\r\n 'x86_arm64': 'arm64'\r\n}\r\n\r\n\r\ndef _msvc14_find_vcvarsall(plat_spec):\r\n \"\"\"Python 3.8 \"distutils/_msvccompiler.py\" backport\"\"\"\r\n _, best_dir = _msvc14_find_vc2017()\r\n vcruntime = None\r\n\r\n if plat_spec in PLAT_SPEC_TO_RUNTIME:\r\n vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]\r\n else:\r\n vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'\r\n\r\n if best_dir:\r\n vcredist = join(best_dir, \"..\", \"..\", \"redist\", \"MSVC\", \"**\",\r\n vcruntime_plat, \"Microsoft.VC14*.CRT\",\r\n \"vcruntime140.dll\")\r\n try:\r\n import glob\r\n vcruntime = glob.glob(vcredist, recursive=True)[-1]\r\n except (ImportError, OSError, LookupError):\r\n vcruntime = None\r\n\r\n if not best_dir:\r\n best_version, best_dir = _msvc14_find_vc2015()\r\n if best_version:\r\n vcruntime = join(best_dir, 'redist', vcruntime_plat,\r\n \"Microsoft.VC140.CRT\", \"vcruntime140.dll\")\r\n\r\n if not best_dir:\r\n return None, None\r\n\r\n vcvarsall = join(best_dir, \"vcvarsall.bat\")\r\n if not isfile(vcvarsall):\r\n return None, None\r\n\r\n if not vcruntime or not isfile(vcruntime):\r\n vcruntime = None\r\n\r\n return vcvarsall, vcruntime\r\n\r\n\r\ndef _msvc14_get_vc_env(plat_spec):\r\n \"\"\"Python 3.8 \"distutils/_msvccompiler.py\" backport\"\"\"\r\n if \"DISTUTILS_USE_SDK\" in environ:\r\n return {\r\n key.lower(): value\r\n for key, value in environ.items()\r\n }\r\n\r\n vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec)\r\n if not vcvarsall:\r\n raise distutils.errors.DistutilsPlatformError(\r\n \"Unable to find vcvarsall.bat\"\r\n )\r\n\r\n try:\r\n out = subprocess.check_output(\r\n 'cmd /u /c \"{}\" {} && set'.format(vcvarsall, plat_spec),\r\n stderr=subprocess.STDOUT,\r\n ).decode('utf-16le', errors='replace')\r\n except subprocess.CalledProcessError as exc:\r\n raise distutils.errors.DistutilsPlatformError(\r\n \"Error executing {}\".format(exc.cmd)\r\n ) from exc\r\n\r\n env = {\r\n key.lower(): value\r\n for key, _, value in\r\n (line.partition('=') for line in out.splitlines())\r\n if key and value\r\n }\r\n\r\n if vcruntime:\r\n env['py_vcruntime_redist'] = vcruntime\r\n return env\r\n\r\n\r\ndef msvc14_get_vc_env(plat_spec):\r\n \"\"\"\r\n Patched \"distutils._msvccompiler._get_vc_env\" for support extra\r\n Microsoft Visual C++ 14.X compilers.\r\n\r\n Set environment without use of \"vcvarsall.bat\".\r\n\r\n Parameters\r\n ----------\r\n plat_spec: str\r\n Target architecture.\r\n\r\n Return\r\n ------\r\n dict\r\n environment\r\n \"\"\"\r\n\r\n # Always use backport from CPython 3.8\r\n try:\r\n return _msvc14_get_vc_env(plat_spec)\r\n except distutils.errors.DistutilsPlatformError as exc:\r\n _augment_exception(exc, 14.0)\r\n raise\r\n\r\n\r\ndef msvc14_gen_lib_options(*args, **kwargs):\r\n \"\"\"\r\n Patched \"distutils._msvccompiler.gen_lib_options\" for fix\r\n compatibility between \"numpy.distutils\" and \"distutils._msvccompiler\"\r\n (for Numpy < 1.11.2)\r\n \"\"\"\r\n if \"numpy.distutils\" in sys.modules:\r\n import numpy as np\r\n if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):\r\n return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)\r\n return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)\r\n\r\n\r\ndef _augment_exception(exc, version, arch=''):\r\n \"\"\"\r\n Add details to the exception message to help guide the user\r\n as to what action will resolve it.\r\n \"\"\"\r\n # Error if MSVC++ directory not found or environment not set\r\n message = exc.args[0]\r\n\r\n if \"vcvarsall\" in message.lower() or \"visual c\" in message.lower():\r\n # Special error message if MSVC++ not installed\r\n tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.'\r\n message = tmpl.format(**locals())\r\n msdownload = 'www.microsoft.com/download/details.aspx?id=%d'\r\n if version == 9.0:\r\n if arch.lower().find('ia64') > -1:\r\n # For VC++ 9.0, if IA64 support is needed, redirect user\r\n # to Windows SDK 7.0.\r\n # Note: No download link available from Microsoft.\r\n message += ' Get it with \"Microsoft Windows SDK 7.0\"'\r\n else:\r\n # For VC++ 9.0 redirect user to Vc++ for Python 2.7 :\r\n # This redirection link is maintained by Microsoft.\r\n # Contact [email protected] if it needs updating.\r\n message += ' Get it from http://aka.ms/vcpython27'\r\n elif version == 10.0:\r\n # For VC++ 10.0 Redirect user to Windows SDK 7.1\r\n message += ' Get it with \"Microsoft Windows SDK 7.1\": '\r\n message += msdownload % 8279\r\n elif version >= 14.0:\r\n # For VC++ 14.X Redirect user to latest Visual C++ Build Tools\r\n message += (' Get it with \"Microsoft C++ Build Tools\": '\r\n r'https://visualstudio.microsoft.com'\r\n r'/visual-cpp-build-tools/')\r\n\r\n exc.args = (message, )\r\n\r\n\r\nclass PlatformInfo:\r\n \"\"\"\r\n Current and Target Architectures information.\r\n\r\n Parameters\r\n ----------\r\n arch: str\r\n Target architecture.\r\n \"\"\"\r\n current_cpu = environ.get('processor_architecture', '').lower()\r\n\r\n def __init__(self, arch):\r\n self.arch = arch.lower().replace('x64', 'amd64')\r\n\r\n @property\r\n def target_cpu(self):\r\n \"\"\"\r\n Return Target CPU architecture.\r\n\r\n Return\r\n ------\r\n str\r\n Target CPU\r\n \"\"\"\r\n return self.arch[self.arch.find('_') + 1:]\r\n\r\n def target_is_x86(self):\r\n \"\"\"\r\n Return True if target CPU is x86 32 bits..\r\n\r\n Return\r\n ------\r\n bool\r\n CPU is x86 32 bits\r\n \"\"\"\r\n return self.target_cpu == 'x86'\r\n\r\n def current_is_x86(self):\r\n \"\"\"\r\n Return True if current CPU is x86 32 bits..\r\n\r\n Return\r\n ------\r\n bool\r\n CPU is x86 32 bits\r\n \"\"\"\r\n return self.current_cpu == 'x86'\r\n\r\n def current_dir(self, hidex86=False, x64=False):\r\n \"\"\"\r\n Current platform specific subfolder.\r\n\r\n Parameters\r\n ----------\r\n hidex86: bool\r\n return '' and not '\\x86' if architecture is x86.\r\n x64: bool\r\n return '\\x64' and not '\\amd64' if architecture is amd64.\r\n\r\n Return\r\n ------\r\n str\r\n subfolder: '\\target', or '' (see hidex86 parameter)\r\n \"\"\"\r\n return (\r\n '' if (self.current_cpu == 'x86' and hidex86) else\r\n r'\\x64' if (self.current_cpu == 'amd64' and x64) else\r\n r'\\%s' % self.current_cpu\r\n )\r\n\r\n def target_dir(self, hidex86=False, x64=False):\r\n r\"\"\"\r\n Target platform specific subfolder.\r\n\r\n Parameters\r\n ----------\r\n hidex86: bool\r\n return '' and not '\\x86' if architecture is x86.\r\n x64: bool\r\n return '\\x64' and not '\\amd64' if architecture is amd64.\r\n\r\n Return\r\n ------\r\n str\r\n subfolder: '\\current', or '' (see hidex86 parameter)\r\n \"\"\"\r\n return (\r\n '' if (self.target_cpu == 'x86' and hidex86) else\r\n r'\\x64' if (self.target_cpu == 'amd64' and x64) else\r\n r'\\%s' % self.target_cpu\r\n )\r\n\r\n def cross_dir(self, forcex86=False):\r\n r\"\"\"\r\n Cross platform specific subfolder.\r\n\r\n Parameters\r\n ----------\r\n forcex86: bool\r\n Use 'x86' as current architecture even if current architecture is\r\n not x86.\r\n\r\n Return\r\n ------\r\n str\r\n subfolder: '' if target architecture is current architecture,\r\n '\\current_target' if not.\r\n \"\"\"\r\n current = 'x86' if forcex86 else self.current_cpu\r\n return (\r\n '' if self.target_cpu == current else\r\n self.target_dir().replace('\\\\', '\\\\%s_' % current)\r\n )\r\n\r\n\r\nclass RegistryInfo:\r\n \"\"\"\r\n Microsoft Visual Studio related registry information.\r\n\r\n Parameters\r\n ----------\r\n platform_info: PlatformInfo\r\n \"PlatformInfo\" instance.\r\n \"\"\"\r\n HKEYS = (winreg.HKEY_USERS,\r\n winreg.HKEY_CURRENT_USER,\r\n winreg.HKEY_LOCAL_MACHINE,\r\n winreg.HKEY_CLASSES_ROOT)\r\n\r\n def __init__(self, platform_info):\r\n self.pi = platform_info\r\n\r\n @property\r\n def visualstudio(self):\r\n \"\"\"\r\n Microsoft Visual Studio root registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return 'VisualStudio'\r\n\r\n @property\r\n def sxs(self):\r\n \"\"\"\r\n Microsoft Visual Studio SxS registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return join(self.visualstudio, 'SxS')\r\n\r\n @property\r\n def vc(self):\r\n \"\"\"\r\n Microsoft Visual C++ VC7 registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return join(self.sxs, 'VC7')\r\n\r\n @property\r\n def vs(self):\r\n \"\"\"\r\n Microsoft Visual Studio VS7 registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return join(self.sxs, 'VS7')\r\n\r\n @property\r\n def vc_for_python(self):\r\n \"\"\"\r\n Microsoft Visual C++ for Python registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return r'DevDiv\\VCForPython'\r\n\r\n @property\r\n def microsoft_sdk(self):\r\n \"\"\"\r\n Microsoft SDK registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return 'Microsoft SDKs'\r\n\r\n @property\r\n def windows_sdk(self):\r\n \"\"\"\r\n Microsoft Windows/Platform SDK registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return join(self.microsoft_sdk, 'Windows')\r\n\r\n @property\r\n def netfx_sdk(self):\r\n \"\"\"\r\n Microsoft .NET Framework SDK registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return join(self.microsoft_sdk, 'NETFXSDK')\r\n\r\n @property\r\n def windows_kits_roots(self):\r\n \"\"\"\r\n Microsoft Windows Kits Roots registry key.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n return r'Windows Kits\\Installed Roots'\r\n\r\n def microsoft(self, key, x86=False):\r\n \"\"\"\r\n Return key in Microsoft software registry.\r\n\r\n Parameters\r\n ----------\r\n key: str\r\n Registry key path where look.\r\n x86: str\r\n Force x86 software registry.\r\n\r\n Return\r\n ------\r\n str\r\n Registry key\r\n \"\"\"\r\n node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'\r\n return join('Software', node64, 'Microsoft', key)\r\n\r\n def lookup(self, key, name):\r\n \"\"\"\r\n Look for values in registry in Microsoft software registry.\r\n\r\n Parameters\r\n ----------\r\n key: str\r\n Registry key path where look.\r\n name: str\r\n Value name to find.\r\n\r\n Return\r\n ------\r\n str\r\n value\r\n \"\"\"\r\n key_read = winreg.KEY_READ\r\n openkey = winreg.OpenKey\r\n closekey = winreg.CloseKey\r\n ms = self.microsoft\r\n for hkey in self.HKEYS:\r\n bkey = None\r\n try:\r\n bkey = openkey(hkey, ms(key), 0, key_read)\r\n except (OSError, IOError):\r\n if not self.pi.current_is_x86():\r\n try:\r\n bkey = openkey(hkey, ms(key, True), 0, key_read)\r\n except (OSError, IOError):\r\n continue\r\n else:\r\n continue\r\n try:\r\n return winreg.QueryValueEx(bkey, name)[0]\r\n except (OSError, IOError):\r\n pass\r\n finally:\r\n if bkey:\r\n closekey(bkey)\r\n\r\n\r\nclass SystemInfo:\r\n \"\"\"\r\n Microsoft Windows and Visual Studio related system information.\r\n\r\n Parameters\r\n ----------\r\n registry_info: RegistryInfo\r\n \"RegistryInfo\" instance.\r\n vc_ver: float\r\n Required Microsoft Visual C++ version.\r\n \"\"\"\r\n\r\n # Variables and properties in this class use originals CamelCase variables\r\n # names from Microsoft source files for more easy comparison.\r\n WinDir = environ.get('WinDir', '')\r\n ProgramFiles = environ.get('ProgramFiles', '')\r\n ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)\r\n\r\n def __init__(self, registry_info, vc_ver=None):\r\n self.ri = registry_info\r\n self.pi = self.ri.pi\r\n\r\n self.known_vs_paths = self.find_programdata_vs_vers()\r\n\r\n # Except for VS15+, VC version is aligned with VS version\r\n self.vs_ver = self.vc_ver = (\r\n vc_ver or self._find_latest_available_vs_ver())\r\n\r\n def _find_latest_available_vs_ver(self):\r\n \"\"\"\r\n Find the latest VC version\r\n\r\n Return\r\n ------\r\n float\r\n version\r\n \"\"\"\r\n reg_vc_vers = self.find_reg_vs_vers()\r\n\r\n if not (reg_vc_vers or self.known_vs_paths):\r\n raise distutils.errors.DistutilsPlatformError(\r\n 'No Microsoft Visual C++ version found')\r\n\r\n vc_vers = set(reg_vc_vers)\r\n vc_vers.update(self.known_vs_paths)\r\n return sorted(vc_vers)[-1]\r\n\r\n def find_reg_vs_vers(self):\r\n \"\"\"\r\n Find Microsoft Visual Studio versions available in registry.\r\n\r\n Return\r\n ------\r\n list of float\r\n Versions\r\n \"\"\"\r\n ms = self.ri.microsoft\r\n vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)\r\n vs_vers = []\r\n for hkey, key in itertools.product(self.ri.HKEYS, vckeys):\r\n try:\r\n bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)\r\n except (OSError, IOError):\r\n continue\r\n with bkey:\r\n subkeys, values, _ = winreg.QueryInfoKey(bkey)\r\n for i in range(values):\r\n with contextlib.suppress(ValueError):\r\n ver = float(winreg.EnumValue(bkey, i)[0])\r\n if ver not in vs_vers:\r\n vs_vers.append(ver)\r\n for i in range(subkeys):\r\n with contextlib.suppress(ValueError):\r\n ver = float(winreg.EnumKey(bkey, i))\r\n if ver not in vs_vers:\r\n vs_vers.append(ver)\r\n return sorted(vs_vers)\r\n\r\n def find_programdata_vs_vers(self):\r\n r\"\"\"\r\n Find Visual studio 2017+ versions from information in\r\n \"C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances\".\r\n\r\n Return\r\n ------\r\n dict\r\n float version as key, path as value.\r\n \"\"\"\r\n vs_versions = {}\r\n instances_dir = \\\r\n r'C:\\ProgramData\\Microsoft\\VisualStudio\\Packages\\_Instances'\r\n\r\n try:\r\n hashed_names = listdir(instances_dir)\r\n\r\n except (OSError, IOError):\r\n # Directory not exists with all Visual Studio versions\r\n return vs_versions\r\n\r\n for name in hashed_names:\r\n try:\r\n # Get VS installation path from \"state.json\" file\r\n state_path = join(instances_dir, name, 'state.json')\r\n with open(state_path, 'rt', encoding='utf-8') as state_file:\r\n state = json.load(state_file)\r\n vs_path = state['installationPath']\r\n\r\n # Raises OSError if this VS installation does not contain VC\r\n listdir(join(vs_path, r'VC\\Tools\\MSVC'))\r\n\r\n # Store version and path\r\n vs_versions[self._as_float_version(\r\n state['installationVersion'])] = vs_path\r\n\r\n except (OSError, IOError, KeyError):\r\n # Skip if \"state.json\" file is missing or bad format\r\n continue\r\n\r\n return vs_versions\r\n\r\n @staticmethod\r\n def _as_float_version(version):\r\n \"\"\"\r\n Return a string version as a simplified float version (major.minor)\r\n\r\n Parameters\r\n ----------\r\n version: str\r\n Version.\r\n\r\n Return\r\n ------\r\n float\r\n version\r\n \"\"\"\r\n return float('.'.join(version.split('.')[:2]))\r\n\r\n @property\r\n def VSInstallDir(self):\r\n \"\"\"\r\n Microsoft Visual Studio directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n # Default path\r\n default = join(self.ProgramFilesx86,\r\n 'Microsoft Visual Studio %0.1f' % self.vs_ver)\r\n\r\n # Try to get path from registry, if fail use default path\r\n return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default\r\n\r\n @property\r\n def VCInstallDir(self):\r\n \"\"\"\r\n Microsoft Visual C++ directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n path = self._guess_vc() or self._guess_vc_legacy()\r\n\r\n if not isdir(path):\r\n msg = 'Microsoft Visual C++ directory not found'\r\n raise distutils.errors.DistutilsPlatformError(msg)\r\n\r\n return path\r\n\r\n def _guess_vc(self):\r\n \"\"\"\r\n Locate Visual C++ for VS2017+.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n if self.vs_ver <= 14.0:\r\n return ''\r\n\r\n try:\r\n # First search in known VS paths\r\n vs_dir = self.known_vs_paths[self.vs_ver]\r\n except KeyError:\r\n # Else, search with path from registry\r\n vs_dir = self.VSInstallDir\r\n\r\n guess_vc = join(vs_dir, r'VC\\Tools\\MSVC')\r\n\r\n # Subdir with VC exact version as name\r\n try:\r\n # Update the VC version with real one instead of VS version\r\n vc_ver = listdir(guess_vc)[-1]\r\n self.vc_ver = self._as_float_version(vc_ver)\r\n return join(guess_vc, vc_ver)\r\n except (OSError, IOError, IndexError):\r\n return ''\r\n\r\n def _guess_vc_legacy(self):\r\n \"\"\"\r\n Locate Visual C++ for versions prior to 2017.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n default = join(self.ProgramFilesx86,\r\n r'Microsoft Visual Studio %0.1f\\VC' % self.vs_ver)\r\n\r\n # Try to get \"VC++ for Python\" path from registry as default path\r\n reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver)\r\n python_vc = self.ri.lookup(reg_path, 'installdir')\r\n default_vc = join(python_vc, 'VC') if python_vc else default\r\n\r\n # Try to get path from registry, if fail use default path\r\n return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc\r\n\r\n @property\r\n def WindowsSdkVersion(self):\r\n \"\"\"\r\n Microsoft Windows SDK versions for specified MSVC++ version.\r\n\r\n Return\r\n ------\r\n tuple of str\r\n versions\r\n \"\"\"\r\n if self.vs_ver <= 9.0:\r\n return '7.0', '6.1', '6.0a'\r\n elif self.vs_ver == 10.0:\r\n return '7.1', '7.0a'\r\n elif self.vs_ver == 11.0:\r\n return '8.0', '8.0a'\r\n elif self.vs_ver == 12.0:\r\n return '8.1', '8.1a'\r\n elif self.vs_ver >= 14.0:\r\n return '10.0', '8.1'\r\n\r\n @property\r\n def WindowsSdkLastVersion(self):\r\n \"\"\"\r\n Microsoft Windows SDK last version.\r\n\r\n Return\r\n ------\r\n str\r\n version\r\n \"\"\"\r\n return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib'))\r\n\r\n @property # noqa: C901\r\n def WindowsSdkDir(self): # noqa: C901 # is too complex (12) # FIXME\r\n \"\"\"\r\n Microsoft Windows SDK directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n sdkdir = ''\r\n for ver in self.WindowsSdkVersion:\r\n # Try to get it from registry\r\n loc = join(self.ri.windows_sdk, 'v%s' % ver)\r\n sdkdir = self.ri.lookup(loc, 'installationfolder')\r\n if sdkdir:\r\n break\r\n if not sdkdir or not isdir(sdkdir):\r\n # Try to get \"VC++ for Python\" version from registry\r\n path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)\r\n install_base = self.ri.lookup(path, 'installdir')\r\n if install_base:\r\n sdkdir = join(install_base, 'WinSDK')\r\n if not sdkdir or not isdir(sdkdir):\r\n # If fail, use default new path\r\n for ver in self.WindowsSdkVersion:\r\n intver = ver[:ver.rfind('.')]\r\n path = r'Microsoft SDKs\\Windows Kits\\%s' % intver\r\n d = join(self.ProgramFiles, path)\r\n if isdir(d):\r\n sdkdir = d\r\n if not sdkdir or not isdir(sdkdir):\r\n # If fail, use default old path\r\n for ver in self.WindowsSdkVersion:\r\n path = r'Microsoft SDKs\\Windows\\v%s' % ver\r\n d = join(self.ProgramFiles, path)\r\n if isdir(d):\r\n sdkdir = d\r\n if not sdkdir:\r\n # If fail, use Platform SDK\r\n sdkdir = join(self.VCInstallDir, 'PlatformSDK')\r\n return sdkdir\r\n\r\n @property\r\n def WindowsSDKExecutablePath(self):\r\n \"\"\"\r\n Microsoft Windows SDK executable directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n # Find WinSDK NetFx Tools registry dir name\r\n if self.vs_ver <= 11.0:\r\n netfxver = 35\r\n arch = ''\r\n else:\r\n netfxver = 40\r\n hidex86 = True if self.vs_ver <= 12.0 else False\r\n arch = self.pi.current_dir(x64=True, hidex86=hidex86)\r\n fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\\\', '-'))\r\n\r\n # list all possibles registry paths\r\n regpaths = []\r\n if self.vs_ver >= 14.0:\r\n for ver in self.NetFxSdkVersion:\r\n regpaths += [join(self.ri.netfx_sdk, ver, fx)]\r\n\r\n for ver in self.WindowsSdkVersion:\r\n regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)]\r\n\r\n # Return installation folder from the more recent path\r\n for path in regpaths:\r\n execpath = self.ri.lookup(path, 'installationfolder')\r\n if execpath:\r\n return execpath\r\n\r\n @property\r\n def FSharpInstallDir(self):\r\n \"\"\"\r\n Microsoft Visual F# directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n path = join(self.ri.visualstudio, r'%0.1f\\Setup\\F#' % self.vs_ver)\r\n return self.ri.lookup(path, 'productdir') or ''\r\n\r\n @property\r\n def UniversalCRTSdkDir(self):\r\n \"\"\"\r\n Microsoft Universal CRT SDK directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n # Set Kit Roots versions for specified MSVC++ version\r\n vers = ('10', '81') if self.vs_ver >= 14.0 else ()\r\n\r\n # Find path of the more recent Kit\r\n for ver in vers:\r\n sdkdir = self.ri.lookup(self.ri.windows_kits_roots,\r\n 'kitsroot%s' % ver)\r\n if sdkdir:\r\n return sdkdir or ''\r\n\r\n @property\r\n def UniversalCRTSdkLastVersion(self):\r\n \"\"\"\r\n Microsoft Universal C Runtime SDK last version.\r\n\r\n Return\r\n ------\r\n str\r\n version\r\n \"\"\"\r\n return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib'))\r\n\r\n @property\r\n def NetFxSdkVersion(self):\r\n \"\"\"\r\n Microsoft .NET Framework SDK versions.\r\n\r\n Return\r\n ------\r\n tuple of str\r\n versions\r\n \"\"\"\r\n # Set FxSdk versions for specified VS version\r\n return (('4.7.2', '4.7.1', '4.7',\r\n '4.6.2', '4.6.1', '4.6',\r\n '4.5.2', '4.5.1', '4.5')\r\n if self.vs_ver >= 14.0 else ())\r\n\r\n @property\r\n def NetFxSdkDir(self):\r\n \"\"\"\r\n Microsoft .NET Framework SDK directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n sdkdir = ''\r\n for ver in self.NetFxSdkVersion:\r\n loc = join(self.ri.netfx_sdk, ver)\r\n sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')\r\n if sdkdir:\r\n break\r\n return sdkdir\r\n\r\n @property\r\n def FrameworkDir32(self):\r\n \"\"\"\r\n Microsoft .NET Framework 32bit directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n # Default path\r\n guess_fw = join(self.WinDir, r'Microsoft.NET\\Framework')\r\n\r\n # Try to get path from registry, if fail use default path\r\n return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw\r\n\r\n @property\r\n def FrameworkDir64(self):\r\n \"\"\"\r\n Microsoft .NET Framework 64bit directory.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n # Default path\r\n guess_fw = join(self.WinDir, r'Microsoft.NET\\Framework64')\r\n\r\n # Try to get path from registry, if fail use default path\r\n return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw\r\n\r\n @property\r\n def FrameworkVersion32(self):\r\n \"\"\"\r\n Microsoft .NET Framework 32bit versions.\r\n\r\n Return\r\n ------\r\n tuple of str\r\n versions\r\n \"\"\"\r\n return self._find_dot_net_versions(32)\r\n\r\n @property\r\n def FrameworkVersion64(self):\r\n \"\"\"\r\n Microsoft .NET Framework 64bit versions.\r\n\r\n Return\r\n ------\r\n tuple of str\r\n versions\r\n \"\"\"\r\n return self._find_dot_net_versions(64)\r\n\r\n def _find_dot_net_versions(self, bits):\r\n \"\"\"\r\n Find Microsoft .NET Framework versions.\r\n\r\n Parameters\r\n ----------\r\n bits: int\r\n Platform number of bits: 32 or 64.\r\n\r\n Return\r\n ------\r\n tuple of str\r\n versions\r\n \"\"\"\r\n # Find actual .NET version in registry\r\n reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)\r\n dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)\r\n ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''\r\n\r\n # Set .NET versions for specified MSVC++ version\r\n if self.vs_ver >= 12.0:\r\n return ver, 'v4.0'\r\n elif self.vs_ver >= 10.0:\r\n return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5'\r\n elif self.vs_ver == 9.0:\r\n return 'v3.5', 'v2.0.50727'\r\n elif self.vs_ver == 8.0:\r\n return 'v3.0', 'v2.0.50727'\r\n\r\n @staticmethod\r\n def _use_last_dir_name(path, prefix=''):\r\n \"\"\"\r\n Return name of the last dir in path or '' if no dir found.\r\n\r\n Parameters\r\n ----------\r\n path: str\r\n Use dirs in this path\r\n prefix: str\r\n Use only dirs starting by this prefix\r\n\r\n Return\r\n ------\r\n str\r\n name\r\n \"\"\"\r\n matching_dirs = (\r\n dir_name\r\n for dir_name in reversed(listdir(path))\r\n if isdir(join(path, dir_name)) and\r\n dir_name.startswith(prefix)\r\n )\r\n return next(matching_dirs, None) or ''\r\n\r\n\r\nclass EnvironmentInfo:\r\n \"\"\"\r\n Return environment variables for specified Microsoft Visual C++ version\r\n and platform : Lib, Include, Path and libpath.\r\n\r\n This function is compatible with Microsoft Visual C++ 9.0 to 14.X.\r\n\r\n Script created by analysing Microsoft environment configuration files like\r\n \"vcvars[...].bat\", \"SetEnv.Cmd\", \"vcbuildtools.bat\", ...\r\n\r\n Parameters\r\n ----------\r\n arch: str\r\n Target architecture.\r\n vc_ver: float\r\n Required Microsoft Visual C++ version. If not set, autodetect the last\r\n version.\r\n vc_min_ver: float\r\n Minimum Microsoft Visual C++ version.\r\n \"\"\"\r\n\r\n # Variables and properties in this class use originals CamelCase variables\r\n # names from Microsoft source files for more easy comparison.\r\n\r\n def __init__(self, arch, vc_ver=None, vc_min_ver=0):\r\n self.pi = PlatformInfo(arch)\r\n self.ri = RegistryInfo(self.pi)\r\n self.si = SystemInfo(self.ri, vc_ver)\r\n\r\n if self.vc_ver < vc_min_ver:\r\n err = 'No suitable Microsoft Visual C++ version found'\r\n raise distutils.errors.DistutilsPlatformError(err)\r\n\r\n @property\r\n def vs_ver(self):\r\n \"\"\"\r\n Microsoft Visual Studio.\r\n\r\n Return\r\n ------\r\n float\r\n version\r\n \"\"\"\r\n return self.si.vs_ver\r\n\r\n @property\r\n def vc_ver(self):\r\n \"\"\"\r\n Microsoft Visual C++ version.\r\n\r\n Return\r\n ------\r\n float\r\n version\r\n \"\"\"\r\n return self.si.vc_ver\r\n\r\n @property\r\n def VSTools(self):\r\n \"\"\"\r\n Microsoft Visual Studio Tools.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n paths = [r'Common7\\IDE', r'Common7\\Tools']\r\n\r\n if self.vs_ver >= 14.0:\r\n arch_subdir = self.pi.current_dir(hidex86=True, x64=True)\r\n paths += [r'Common7\\IDE\\CommonExtensions\\Microsoft\\TestWindow']\r\n paths += [r'Team Tools\\Performance Tools']\r\n paths += [r'Team Tools\\Performance Tools%s' % arch_subdir]\r\n\r\n return [join(self.si.VSInstallDir, path) for path in paths]\r\n\r\n @property\r\n def VCIncludes(self):\r\n \"\"\"\r\n Microsoft Visual C++ & Microsoft Foundation Class Includes.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n return [join(self.si.VCInstallDir, 'Include'),\r\n join(self.si.VCInstallDir, r'ATLMFC\\Include')]\r\n\r\n @property\r\n def VCLibraries(self):\r\n \"\"\"\r\n Microsoft Visual C++ & Microsoft Foundation Class Libraries.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver >= 15.0:\r\n arch_subdir = self.pi.target_dir(x64=True)\r\n else:\r\n arch_subdir = self.pi.target_dir(hidex86=True)\r\n paths = ['Lib%s' % arch_subdir, r'ATLMFC\\Lib%s' % arch_subdir]\r\n\r\n if self.vs_ver >= 14.0:\r\n paths += [r'Lib\\store%s' % arch_subdir]\r\n\r\n return [join(self.si.VCInstallDir, path) for path in paths]\r\n\r\n @property\r\n def VCStoreRefs(self):\r\n \"\"\"\r\n Microsoft Visual C++ store references Libraries.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver < 14.0:\r\n return []\r\n return [join(self.si.VCInstallDir, r'Lib\\store\\references')]\r\n\r\n @property\r\n def VCTools(self):\r\n \"\"\"\r\n Microsoft Visual C++ Tools.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n si = self.si\r\n tools = [join(si.VCInstallDir, 'VCPackages')]\r\n\r\n forcex86 = True if self.vs_ver <= 10.0 else False\r\n arch_subdir = self.pi.cross_dir(forcex86)\r\n if arch_subdir:\r\n tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)]\r\n\r\n if self.vs_ver == 14.0:\r\n path = 'Bin%s' % self.pi.current_dir(hidex86=True)\r\n tools += [join(si.VCInstallDir, path)]\r\n\r\n elif self.vs_ver >= 15.0:\r\n host_dir = (r'bin\\HostX86%s' if self.pi.current_is_x86() else\r\n r'bin\\HostX64%s')\r\n tools += [join(\r\n si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]\r\n\r\n if self.pi.current_cpu != self.pi.target_cpu:\r\n tools += [join(\r\n si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))]\r\n\r\n else:\r\n tools += [join(si.VCInstallDir, 'Bin')]\r\n\r\n return tools\r\n\r\n @property\r\n def OSLibraries(self):\r\n \"\"\"\r\n Microsoft Windows SDK Libraries.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver <= 10.0:\r\n arch_subdir = self.pi.target_dir(hidex86=True, x64=True)\r\n return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]\r\n\r\n else:\r\n arch_subdir = self.pi.target_dir(x64=True)\r\n lib = join(self.si.WindowsSdkDir, 'lib')\r\n libver = self._sdk_subdir\r\n return [join(lib, '%sum%s' % (libver, arch_subdir))]\r\n\r\n @property\r\n def OSIncludes(self):\r\n \"\"\"\r\n Microsoft Windows SDK Include.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n include = join(self.si.WindowsSdkDir, 'include')\r\n\r\n if self.vs_ver <= 10.0:\r\n return [include, join(include, 'gl')]\r\n\r\n else:\r\n if self.vs_ver >= 14.0:\r\n sdkver = self._sdk_subdir\r\n else:\r\n sdkver = ''\r\n return [join(include, '%sshared' % sdkver),\r\n join(include, '%sum' % sdkver),\r\n join(include, '%swinrt' % sdkver)]\r\n\r\n @property\r\n def OSLibpath(self):\r\n \"\"\"\r\n Microsoft Windows SDK Libraries Paths.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n ref = join(self.si.WindowsSdkDir, 'References')\r\n libpath = []\r\n\r\n if self.vs_ver <= 9.0:\r\n libpath += self.OSLibraries\r\n\r\n if self.vs_ver >= 11.0:\r\n libpath += [join(ref, r'CommonConfiguration\\Neutral')]\r\n\r\n if self.vs_ver >= 14.0:\r\n libpath += [\r\n ref,\r\n join(self.si.WindowsSdkDir, 'UnionMetadata'),\r\n join(\r\n ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'),\r\n join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'),\r\n join(\r\n ref, 'Windows.Networking.Connectivity.WwanContract',\r\n '1.0.0.0'),\r\n join(\r\n self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs',\r\n '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration',\r\n 'neutral'),\r\n ]\r\n return libpath\r\n\r\n @property\r\n def SdkTools(self):\r\n \"\"\"\r\n Microsoft Windows SDK Tools.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n return list(self._sdk_tools())\r\n\r\n def _sdk_tools(self):\r\n \"\"\"\r\n Microsoft Windows SDK Tools paths generator.\r\n\r\n Return\r\n ------\r\n generator of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver < 15.0:\r\n bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\\x86'\r\n yield join(self.si.WindowsSdkDir, bin_dir)\r\n\r\n if not self.pi.current_is_x86():\r\n arch_subdir = self.pi.current_dir(x64=True)\r\n path = 'Bin%s' % arch_subdir\r\n yield join(self.si.WindowsSdkDir, path)\r\n\r\n if self.vs_ver in (10.0, 11.0):\r\n if self.pi.target_is_x86():\r\n arch_subdir = ''\r\n else:\r\n arch_subdir = self.pi.current_dir(hidex86=True, x64=True)\r\n path = r'Bin\\NETFX 4.0 Tools%s' % arch_subdir\r\n yield join(self.si.WindowsSdkDir, path)\r\n\r\n elif self.vs_ver >= 15.0:\r\n path = join(self.si.WindowsSdkDir, 'Bin')\r\n arch_subdir = self.pi.current_dir(x64=True)\r\n sdkver = self.si.WindowsSdkLastVersion\r\n yield join(path, '%s%s' % (sdkver, arch_subdir))\r\n\r\n if self.si.WindowsSDKExecutablePath:\r\n yield self.si.WindowsSDKExecutablePath\r\n\r\n @property\r\n def _sdk_subdir(self):\r\n \"\"\"\r\n Microsoft Windows SDK version subdir.\r\n\r\n Return\r\n ------\r\n str\r\n subdir\r\n \"\"\"\r\n ucrtver = self.si.WindowsSdkLastVersion\r\n return ('%s\\\\' % ucrtver) if ucrtver else ''\r\n\r\n @property\r\n def SdkSetup(self):\r\n \"\"\"\r\n Microsoft Windows SDK Setup.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver > 9.0:\r\n return []\r\n\r\n return [join(self.si.WindowsSdkDir, 'Setup')]\r\n\r\n @property\r\n def FxTools(self):\r\n \"\"\"\r\n Microsoft .NET Framework Tools.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n pi = self.pi\r\n si = self.si\r\n\r\n if self.vs_ver <= 10.0:\r\n include32 = True\r\n include64 = not pi.target_is_x86() and not pi.current_is_x86()\r\n else:\r\n include32 = pi.target_is_x86() or pi.current_is_x86()\r\n include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'\r\n\r\n tools = []\r\n if include32:\r\n tools += [join(si.FrameworkDir32, ver)\r\n for ver in si.FrameworkVersion32]\r\n if include64:\r\n tools += [join(si.FrameworkDir64, ver)\r\n for ver in si.FrameworkVersion64]\r\n return tools\r\n\r\n @property\r\n def NetFxSDKLibraries(self):\r\n \"\"\"\r\n Microsoft .Net Framework SDK Libraries.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:\r\n return []\r\n\r\n arch_subdir = self.pi.target_dir(x64=True)\r\n return [join(self.si.NetFxSdkDir, r'lib\\um%s' % arch_subdir)]\r\n\r\n @property\r\n def NetFxSDKIncludes(self):\r\n \"\"\"\r\n Microsoft .Net Framework SDK Includes.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:\r\n return []\r\n\r\n return [join(self.si.NetFxSdkDir, r'include\\um')]\r\n\r\n @property\r\n def VsTDb(self):\r\n \"\"\"\r\n Microsoft Visual Studio Team System Database.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n return [join(self.si.VSInstallDir, r'VSTSDB\\Deploy')]\r\n\r\n @property\r\n def MSBuild(self):\r\n \"\"\"\r\n Microsoft Build Engine.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver < 12.0:\r\n return []\r\n elif self.vs_ver < 15.0:\r\n base_path = self.si.ProgramFilesx86\r\n arch_subdir = self.pi.current_dir(hidex86=True)\r\n else:\r\n base_path = self.si.VSInstallDir\r\n arch_subdir = ''\r\n\r\n path = r'MSBuild\\%0.1f\\bin%s' % (self.vs_ver, arch_subdir)\r\n build = [join(base_path, path)]\r\n\r\n if self.vs_ver >= 15.0:\r\n # Add Roslyn C# & Visual Basic Compiler\r\n build += [join(base_path, path, 'Roslyn')]\r\n\r\n return build\r\n\r\n @property\r\n def HTMLHelpWorkshop(self):\r\n \"\"\"\r\n Microsoft HTML Help Workshop.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver < 11.0:\r\n return []\r\n\r\n return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')]\r\n\r\n @property\r\n def UCRTLibraries(self):\r\n \"\"\"\r\n Microsoft Universal C Runtime SDK Libraries.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver < 14.0:\r\n return []\r\n\r\n arch_subdir = self.pi.target_dir(x64=True)\r\n lib = join(self.si.UniversalCRTSdkDir, 'lib')\r\n ucrtver = self._ucrt_subdir\r\n return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]\r\n\r\n @property\r\n def UCRTIncludes(self):\r\n \"\"\"\r\n Microsoft Universal C Runtime SDK Include.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if self.vs_ver < 14.0:\r\n return []\r\n\r\n include = join(self.si.UniversalCRTSdkDir, 'include')\r\n return [join(include, '%sucrt' % self._ucrt_subdir)]\r\n\r\n @property\r\n def _ucrt_subdir(self):\r\n \"\"\"\r\n Microsoft Universal C Runtime SDK version subdir.\r\n\r\n Return\r\n ------\r\n str\r\n subdir\r\n \"\"\"\r\n ucrtver = self.si.UniversalCRTSdkLastVersion\r\n return ('%s\\\\' % ucrtver) if ucrtver else ''\r\n\r\n @property\r\n def FSharp(self):\r\n \"\"\"\r\n Microsoft Visual F#.\r\n\r\n Return\r\n ------\r\n list of str\r\n paths\r\n \"\"\"\r\n if 11.0 > self.vs_ver > 12.0:\r\n return []\r\n\r\n return [self.si.FSharpInstallDir]\r\n\r\n @property\r\n def VCRuntimeRedist(self):\r\n \"\"\"\r\n Microsoft Visual C++ runtime redistributable dll.\r\n\r\n Return\r\n ------\r\n str\r\n path\r\n \"\"\"\r\n vcruntime = 'vcruntime%d0.dll' % self.vc_ver\r\n arch_subdir = self.pi.target_dir(x64=True).strip('\\\\')\r\n\r\n # Installation prefixes candidates\r\n prefixes = []\r\n tools_path = self.si.VCInstallDir\r\n redist_path = dirname(tools_path.replace(r'\\Tools', r'\\Redist'))\r\n if isdir(redist_path):\r\n # Redist version may not be exactly the same as tools\r\n redist_path = join(redist_path, listdir(redist_path)[-1])\r\n prefixes += [redist_path, join(redist_path, 'onecore')]\r\n\r\n prefixes += [join(tools_path, 'redist')] # VS14 legacy path\r\n\r\n # CRT directory\r\n crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10),\r\n # Sometime store in directory with VS version instead of VC\r\n 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10))\r\n\r\n # vcruntime path\r\n for prefix, crt_dir in itertools.product(prefixes, crt_dirs):\r\n path = join(prefix, arch_subdir, crt_dir, vcruntime)\r\n if isfile(path):\r\n return path\r\n\r\n def return_env(self, exists=True):\r\n \"\"\"\r\n Return environment dict.\r\n\r\n Parameters\r\n ----------\r\n exists: bool\r\n It True, only return existing paths.\r\n\r\n Return\r\n ------\r\n dict\r\n environment\r\n \"\"\"\r\n env = dict(\r\n include=self._build_paths('include',\r\n [self.VCIncludes,\r\n self.OSIncludes,\r\n self.UCRTIncludes,\r\n self.NetFxSDKIncludes],\r\n exists),\r\n lib=self._build_paths('lib',\r\n [self.VCLibraries,\r\n self.OSLibraries,\r\n self.FxTools,\r\n self.UCRTLibraries,\r\n self.NetFxSDKLibraries],\r\n exists),\r\n libpath=self._build_paths('libpath',\r\n [self.VCLibraries,\r\n self.FxTools,\r\n self.VCStoreRefs,\r\n self.OSLibpath],\r\n exists),\r\n path=self._build_paths('path',\r\n [self.VCTools,\r\n self.VSTools,\r\n self.VsTDb,\r\n self.SdkTools,\r\n self.SdkSetup,\r\n self.FxTools,\r\n self.MSBuild,\r\n self.HTMLHelpWorkshop,\r\n self.FSharp],\r\n exists),\r\n )\r\n if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist):\r\n env['py_vcruntime_redist'] = self.VCRuntimeRedist\r\n return env\r\n\r\n def _build_paths(self, name, spec_path_lists, exists):\r\n \"\"\"\r\n Given an environment variable name and specified paths,\r\n return a pathsep-separated string of paths containing\r\n unique, extant, directories from those paths and from\r\n the environment variable. Raise an error if no paths\r\n are resolved.\r\n\r\n Parameters\r\n ----------\r\n name: str\r\n Environment variable name\r\n spec_path_lists: list of str\r\n Paths\r\n exists: bool\r\n It True, only return existing paths.\r\n\r\n Return\r\n ------\r\n str\r\n Pathsep-separated paths\r\n \"\"\"\r\n # flatten spec_path_lists\r\n spec_paths = itertools.chain.from_iterable(spec_path_lists)\r\n env_paths = environ.get(name, '').split(pathsep)\r\n paths = itertools.chain(spec_paths, env_paths)\r\n extant_paths = list(filter(isdir, paths)) if exists else paths\r\n if not extant_paths:\r\n msg = \"%s environment variable is empty\" % name.upper()\r\n raise distutils.errors.DistutilsPlatformError(msg)\r\n unique_paths = unique_everseen(extant_paths)\r\n return pathsep.join(unique_paths)\r\n", "\"\"\"\r\n============================\r\n``ctypes`` Utility Functions\r\n============================\r\n\r\nSee Also\r\n---------\r\nload_library : Load a C library.\r\nndpointer : Array restype/argtype with verification.\r\nas_ctypes : Create a ctypes array from an ndarray.\r\nas_array : Create an ndarray from a ctypes array.\r\n\r\nReferences\r\n----------\r\n.. [1] \"SciPy Cookbook: ctypes\", https://scipy-cookbook.readthedocs.io/items/Ctypes.html\r\n\r\nExamples\r\n--------\r\nLoad the C library:\r\n\r\n>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP\r\n\r\nOur result type, an ndarray that must be of type double, be 1-dimensional\r\nand is C-contiguous in memory:\r\n\r\n>>> array_1d_double = np.ctypeslib.ndpointer(\r\n... dtype=np.double,\r\n... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP\r\n\r\nOur C-function typically takes an array and updates its values\r\nin-place. For example::\r\n\r\n void foo_func(double* x, int length)\r\n {\r\n int i;\r\n for (i = 0; i < length; i++) {\r\n x[i] = i*i;\r\n }\r\n }\r\n\r\nWe wrap it using:\r\n\r\n>>> _lib.foo_func.restype = None #doctest: +SKIP\r\n>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP\r\n\r\nThen, we're ready to call ``foo_func``:\r\n\r\n>>> out = np.empty(15, dtype=np.double)\r\n>>> _lib.foo_func(out, len(out)) #doctest: +SKIP\r\n\r\n\"\"\"\r\n__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array']\r\n\r\nimport os\r\nfrom numpy import (\r\n integer, ndarray, dtype as _dtype, array, frombuffer\r\n)\r\nfrom numpy.core.multiarray import _flagdict, flagsobj\r\n\r\ntry:\r\n import ctypes\r\nexcept ImportError:\r\n ctypes = None\r\n\r\nif ctypes is None:\r\n def _dummy(*args, **kwds):\r\n \"\"\"\r\n Dummy object that raises an ImportError if ctypes is not available.\r\n\r\n Raises\r\n ------\r\n ImportError\r\n If ctypes is not available.\r\n\r\n \"\"\"\r\n raise ImportError(\"ctypes is not available.\")\r\n load_library = _dummy\r\n as_ctypes = _dummy\r\n as_array = _dummy\r\n from numpy import intp as c_intp\r\n _ndptr_base = object\r\nelse:\r\n import numpy.core._internal as nic\r\n c_intp = nic._getintp_ctype()\r\n del nic\r\n _ndptr_base = ctypes.c_void_p\r\n\r\n # Adapted from Albert Strasheim\r\n def load_library(libname, loader_path):\r\n \"\"\"\r\n It is possible to load a library using\r\n >>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP\r\n\r\n But there are cross-platform considerations, such as library file extensions,\r\n plus the fact Windows will just load the first library it finds with that name.\r\n NumPy supplies the load_library function as a convenience.\r\n\r\n Parameters\r\n ----------\r\n libname : str\r\n Name of the library, which can have 'lib' as a prefix,\r\n but without an extension.\r\n loader_path : str\r\n Where the library can be found.\r\n\r\n Returns\r\n -------\r\n ctypes.cdll[libpath] : library object\r\n A ctypes library object\r\n\r\n Raises\r\n ------\r\n OSError\r\n If there is no library with the expected extension, or the\r\n library is defective and cannot be loaded.\r\n \"\"\"\r\n if ctypes.__version__ < '1.0.1':\r\n import warnings\r\n warnings.warn(\"All features of ctypes interface may not work \"\r\n \"with ctypes < 1.0.1\", stacklevel=2)\r\n\r\n ext = os.path.splitext(libname)[1]\r\n if not ext:\r\n # Try to load library with platform-specific name, otherwise\r\n # default to libname.[so|pyd]. Sometimes, these files are built\r\n # erroneously on non-linux platforms.\r\n from numpy.distutils.misc_util import get_shared_lib_extension\r\n so_ext = get_shared_lib_extension()\r\n libname_ext = [libname + so_ext]\r\n # mac, windows and linux >= py3.2 shared library and loadable\r\n # module have different extensions so try both\r\n so_ext2 = get_shared_lib_extension(is_python_ext=True)\r\n if not so_ext2 == so_ext:\r\n libname_ext.insert(0, libname + so_ext2)\r\n else:\r\n libname_ext = [libname]\r\n\r\n loader_path = os.path.abspath(loader_path)\r\n if not os.path.isdir(loader_path):\r\n libdir = os.path.dirname(loader_path)\r\n else:\r\n libdir = loader_path\r\n\r\n for ln in libname_ext:\r\n libpath = os.path.join(libdir, ln)\r\n if os.path.exists(libpath):\r\n try:\r\n return ctypes.cdll[libpath]\r\n except OSError:\r\n ## defective lib file\r\n raise\r\n ## if no successful return in the libname_ext loop:\r\n raise OSError(\"no file with expected extension\")\r\n\r\n\r\ndef _num_fromflags(flaglist):\r\n num = 0\r\n for val in flaglist:\r\n num += _flagdict[val]\r\n return num\r\n\r\n_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',\r\n 'OWNDATA', 'UPDATEIFCOPY', 'WRITEBACKIFCOPY']\r\ndef _flags_fromnum(num):\r\n res = []\r\n for key in _flagnames:\r\n value = _flagdict[key]\r\n if (num & value):\r\n res.append(key)\r\n return res\r\n\r\n\r\nclass _ndptr(_ndptr_base):\r\n @classmethod\r\n def from_param(cls, obj):\r\n if not isinstance(obj, ndarray):\r\n raise TypeError(\"argument must be an ndarray\")\r\n if cls._dtype_ is not None \\\r\n and obj.dtype != cls._dtype_:\r\n raise TypeError(\"array must have data type %s\" % cls._dtype_)\r\n if cls._ndim_ is not None \\\r\n and obj.ndim != cls._ndim_:\r\n raise TypeError(\"array must have %d dimension(s)\" % cls._ndim_)\r\n if cls._shape_ is not None \\\r\n and obj.shape != cls._shape_:\r\n raise TypeError(\"array must have shape %s\" % str(cls._shape_))\r\n if cls._flags_ is not None \\\r\n and ((obj.flags.num & cls._flags_) != cls._flags_):\r\n raise TypeError(\"array must have flags %s\" %\r\n _flags_fromnum(cls._flags_))\r\n return obj.ctypes\r\n\r\n\r\nclass _concrete_ndptr(_ndptr):\r\n \"\"\"\r\n Like _ndptr, but with `_shape_` and `_dtype_` specified.\r\n\r\n Notably, this means the pointer has enough information to reconstruct\r\n the array, which is not generally true.\r\n \"\"\"\r\n def _check_retval_(self):\r\n \"\"\"\r\n This method is called when this class is used as the .restype\r\n attribute for a shared-library function, to automatically wrap the\r\n pointer into an array.\r\n \"\"\"\r\n return self.contents\r\n\r\n @property\r\n def contents(self):\r\n \"\"\"\r\n Get an ndarray viewing the data pointed to by this pointer.\r\n\r\n This mirrors the `contents` attribute of a normal ctypes pointer\r\n \"\"\"\r\n full_dtype = _dtype((self._dtype_, self._shape_))\r\n full_ctype = ctypes.c_char * full_dtype.itemsize\r\n buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents\r\n return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)\r\n\r\n\r\n# Factory for an array-checking class with from_param defined for\r\n# use with ctypes argtypes mechanism\r\n_pointer_type_cache = {}\r\ndef ndpointer(dtype=None, ndim=None, shape=None, flags=None):\r\n \"\"\"\r\n Array-checking restype/argtypes.\r\n\r\n An ndpointer instance is used to describe an ndarray in restypes\r\n and argtypes specifications. This approach is more flexible than\r\n using, for example, ``POINTER(c_double)``, since several restrictions\r\n can be specified, which are verified upon calling the ctypes function.\r\n These include data type, number of dimensions, shape and flags. If a\r\n given array does not satisfy the specified restrictions,\r\n a ``TypeError`` is raised.\r\n\r\n Parameters\r\n ----------\r\n dtype : data-type, optional\r\n Array data-type.\r\n ndim : int, optional\r\n Number of array dimensions.\r\n shape : tuple of ints, optional\r\n Array shape.\r\n flags : str or tuple of str\r\n Array flags; may be one or more of:\r\n\r\n - C_CONTIGUOUS / C / CONTIGUOUS\r\n - F_CONTIGUOUS / F / FORTRAN\r\n - OWNDATA / O\r\n - WRITEABLE / W\r\n - ALIGNED / A\r\n - WRITEBACKIFCOPY / X\r\n - UPDATEIFCOPY / U\r\n\r\n Returns\r\n -------\r\n klass : ndpointer type object\r\n A type object, which is an ``_ndtpr`` instance containing\r\n dtype, ndim, shape and flags information.\r\n\r\n Raises\r\n ------\r\n TypeError\r\n If a given array does not satisfy the specified restrictions.\r\n\r\n Examples\r\n --------\r\n >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,\r\n ... ndim=1,\r\n ... flags='C_CONTIGUOUS')]\r\n ... #doctest: +SKIP\r\n >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))\r\n ... #doctest: +SKIP\r\n\r\n \"\"\"\r\n\r\n # normalize dtype to an Optional[dtype]\r\n if dtype is not None:\r\n dtype = _dtype(dtype)\r\n\r\n # normalize flags to an Optional[int]\r\n num = None\r\n if flags is not None:\r\n if isinstance(flags, str):\r\n flags = flags.split(',')\r\n elif isinstance(flags, (int, integer)):\r\n num = flags\r\n flags = _flags_fromnum(num)\r\n elif isinstance(flags, flagsobj):\r\n num = flags.num\r\n flags = _flags_fromnum(num)\r\n if num is None:\r\n try:\r\n flags = [x.strip().upper() for x in flags]\r\n except Exception as e:\r\n raise TypeError(\"invalid flags specification\") from e\r\n num = _num_fromflags(flags)\r\n\r\n # normalize shape to an Optional[tuple]\r\n if shape is not None:\r\n try:\r\n shape = tuple(shape)\r\n except TypeError:\r\n # single integer -> 1-tuple\r\n shape = (shape,)\r\n\r\n cache_key = (dtype, ndim, shape, num)\r\n\r\n try:\r\n return _pointer_type_cache[cache_key]\r\n except KeyError:\r\n pass\r\n\r\n # produce a name for the new type\r\n if dtype is None:\r\n name = 'any'\r\n elif dtype.names is not None:\r\n name = str(id(dtype))\r\n else:\r\n name = dtype.str\r\n if ndim is not None:\r\n name += \"_%dd\" % ndim\r\n if shape is not None:\r\n name += \"_\"+\"x\".join(str(x) for x in shape)\r\n if flags is not None:\r\n name += \"_\"+\"_\".join(flags)\r\n\r\n if dtype is not None and shape is not None:\r\n base = _concrete_ndptr\r\n else:\r\n base = _ndptr\r\n\r\n klass = type(\"ndpointer_%s\"%name, (base,),\r\n {\"_dtype_\": dtype,\r\n \"_shape_\" : shape,\r\n \"_ndim_\" : ndim,\r\n \"_flags_\" : num})\r\n _pointer_type_cache[cache_key] = klass\r\n return klass\r\n\r\n\r\nif ctypes is not None:\r\n def _ctype_ndarray(element_type, shape):\r\n \"\"\" Create an ndarray of the given element type and shape \"\"\"\r\n for dim in shape[::-1]:\r\n element_type = dim * element_type\r\n # prevent the type name include np.ctypeslib\r\n element_type.__module__ = None\r\n return element_type\r\n\r\n\r\n def _get_scalar_type_map():\r\n \"\"\"\r\n Return a dictionary mapping native endian scalar dtype to ctypes types\r\n \"\"\"\r\n ct = ctypes\r\n simple_types = [\r\n ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,\r\n ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,\r\n ct.c_float, ct.c_double,\r\n ct.c_bool,\r\n ]\r\n return {_dtype(ctype): ctype for ctype in simple_types}\r\n\r\n\r\n _scalar_type_map = _get_scalar_type_map()\r\n\r\n\r\n def _ctype_from_dtype_scalar(dtype):\r\n # swapping twice ensure that `=` is promoted to <, >, or |\r\n dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')\r\n dtype_native = dtype.newbyteorder('=')\r\n try:\r\n ctype = _scalar_type_map[dtype_native]\r\n except KeyError as e:\r\n raise NotImplementedError(\r\n \"Converting {!r} to a ctypes type\".format(dtype)\r\n ) from None\r\n\r\n if dtype_with_endian.byteorder == '>':\r\n ctype = ctype.__ctype_be__\r\n elif dtype_with_endian.byteorder == '<':\r\n ctype = ctype.__ctype_le__\r\n\r\n return ctype\r\n\r\n\r\n def _ctype_from_dtype_subarray(dtype):\r\n element_dtype, shape = dtype.subdtype\r\n ctype = _ctype_from_dtype(element_dtype)\r\n return _ctype_ndarray(ctype, shape)\r\n\r\n\r\n def _ctype_from_dtype_structured(dtype):\r\n # extract offsets of each field\r\n field_data = []\r\n for name in dtype.names:\r\n field_dtype, offset = dtype.fields[name][:2]\r\n field_data.append((offset, name, _ctype_from_dtype(field_dtype)))\r\n\r\n # ctypes doesn't care about field order\r\n field_data = sorted(field_data, key=lambda f: f[0])\r\n\r\n if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):\r\n # union, if multiple fields all at address 0\r\n size = 0\r\n _fields_ = []\r\n for offset, name, ctype in field_data:\r\n _fields_.append((name, ctype))\r\n size = max(size, ctypes.sizeof(ctype))\r\n\r\n # pad to the right size\r\n if dtype.itemsize != size:\r\n _fields_.append(('', ctypes.c_char * dtype.itemsize))\r\n\r\n # we inserted manual padding, so always `_pack_`\r\n return type('union', (ctypes.Union,), dict(\r\n _fields_=_fields_,\r\n _pack_=1,\r\n __module__=None,\r\n ))\r\n else:\r\n last_offset = 0\r\n _fields_ = []\r\n for offset, name, ctype in field_data:\r\n padding = offset - last_offset\r\n if padding < 0:\r\n raise NotImplementedError(\"Overlapping fields\")\r\n if padding > 0:\r\n _fields_.append(('', ctypes.c_char * padding))\r\n\r\n _fields_.append((name, ctype))\r\n last_offset = offset + ctypes.sizeof(ctype)\r\n\r\n\r\n padding = dtype.itemsize - last_offset\r\n if padding > 0:\r\n _fields_.append(('', ctypes.c_char * padding))\r\n\r\n # we inserted manual padding, so always `_pack_`\r\n return type('struct', (ctypes.Structure,), dict(\r\n _fields_=_fields_,\r\n _pack_=1,\r\n __module__=None,\r\n ))\r\n\r\n\r\n def _ctype_from_dtype(dtype):\r\n if dtype.fields is not None:\r\n return _ctype_from_dtype_structured(dtype)\r\n elif dtype.subdtype is not None:\r\n return _ctype_from_dtype_subarray(dtype)\r\n else:\r\n return _ctype_from_dtype_scalar(dtype)\r\n\r\n\r\n def as_ctypes_type(dtype):\r\n r\"\"\"\r\n Convert a dtype into a ctypes type.\r\n\r\n Parameters\r\n ----------\r\n dtype : dtype\r\n The dtype to convert\r\n\r\n Returns\r\n -------\r\n ctype\r\n A ctype scalar, union, array, or struct\r\n\r\n Raises\r\n ------\r\n NotImplementedError\r\n If the conversion is not possible\r\n\r\n Notes\r\n -----\r\n This function does not losslessly round-trip in either direction.\r\n\r\n ``np.dtype(as_ctypes_type(dt))`` will:\r\n\r\n - insert padding fields\r\n - reorder fields to be sorted by offset\r\n - discard field titles\r\n\r\n ``as_ctypes_type(np.dtype(ctype))`` will:\r\n\r\n - discard the class names of `ctypes.Structure`\\ s and\r\n `ctypes.Union`\\ s\r\n - convert single-element `ctypes.Union`\\ s into single-element\r\n `ctypes.Structure`\\ s\r\n - insert padding fields\r\n\r\n \"\"\"\r\n return _ctype_from_dtype(_dtype(dtype))\r\n\r\n\r\n def as_array(obj, shape=None):\r\n \"\"\"\r\n Create a numpy array from a ctypes array or POINTER.\r\n\r\n The numpy array shares the memory with the ctypes object.\r\n\r\n The shape parameter must be given if converting from a ctypes POINTER.\r\n The shape parameter is ignored if converting from a ctypes array\r\n \"\"\"\r\n if isinstance(obj, ctypes._Pointer):\r\n # convert pointers to an array of the desired shape\r\n if shape is None:\r\n raise TypeError(\r\n 'as_array() requires a shape argument when called on a '\r\n 'pointer')\r\n p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))\r\n obj = ctypes.cast(obj, p_arr_type).contents\r\n\r\n return array(obj, copy=False)\r\n\r\n\r\n def as_ctypes(obj):\r\n \"\"\"Create and return a ctypes object from a numpy array. Actually\r\n anything that exposes the __array_interface__ is accepted.\"\"\"\r\n ai = obj.__array_interface__\r\n if ai[\"strides\"]:\r\n raise TypeError(\"strided arrays not supported\")\r\n if ai[\"version\"] != 3:\r\n raise TypeError(\"only __array_interface__ version 3 supported\")\r\n addr, readonly = ai[\"data\"]\r\n if readonly:\r\n raise TypeError(\"readonly arrays unsupported\")\r\n\r\n # can't use `_dtype((ai[\"typestr\"], ai[\"shape\"]))` here, as it overflows\r\n # dtype.itemsize (gh-14214)\r\n ctype_scalar = as_ctypes_type(ai[\"typestr\"])\r\n result_type = _ctype_ndarray(ctype_scalar, ai[\"shape\"])\r\n result = result_type.from_address(addr)\r\n result.__keep = obj\r\n return result\r\n", "\"\"\"Typing tests for `numpy.core._ufunc_config`.\"\"\"\r\n\r\nimport numpy as np\r\n\r\ndef func1(a: str, b: int) -> None: ...\r\ndef func2(a: str, b: int, c: float = ...) -> None: ...\r\ndef func3(a: str, b: int) -> int: ...\r\n\r\nclass Write1:\r\n def write(self, a: str) -> None: ...\r\n\r\nclass Write2:\r\n def write(self, a: str, b: int = ...) -> None: ...\r\n\r\nclass Write3:\r\n def write(self, a: str) -> int: ...\r\n\r\n\r\n_err_default = np.geterr()\r\n_bufsize_default = np.getbufsize()\r\n_errcall_default = np.geterrcall()\r\n\r\ntry:\r\n np.seterr(all=None)\r\n np.seterr(divide=\"ignore\")\r\n np.seterr(over=\"warn\")\r\n np.seterr(under=\"call\")\r\n np.seterr(invalid=\"raise\")\r\n np.geterr()\r\n\r\n np.setbufsize(4096)\r\n np.getbufsize()\r\n\r\n np.seterrcall(func1)\r\n np.seterrcall(func2)\r\n np.seterrcall(func3)\r\n np.seterrcall(Write1())\r\n np.seterrcall(Write2())\r\n np.seterrcall(Write3())\r\n np.geterrcall()\r\n\r\n with np.errstate(call=func1, all=\"call\"):\r\n pass\r\n with np.errstate(call=Write1(), divide=\"log\", over=\"log\"):\r\n pass\r\n\r\nfinally:\r\n np.seterr(**_err_default)\r\n np.setbufsize(_bufsize_default)\r\n np.seterrcall(_errcall_default)\r\n", "\"\"\"\r\nUtility classes and functions for the polynomial modules.\r\n\r\nThis module provides: error and warning objects; a polynomial base class;\r\nand some routines used in both the `polynomial` and `chebyshev` modules.\r\n\r\nError objects\r\n-------------\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n PolyError base class for this sub-package's errors.\r\n PolyDomainError raised when domains are mismatched.\r\n\r\nWarning objects\r\n---------------\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n RankWarning raised in least-squares fit for rank-deficient matrix.\r\n\r\nBase class\r\n----------\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n PolyBase Obsolete base class for the polynomial classes. Do not use.\r\n\r\nFunctions\r\n---------\r\n\r\n.. autosummary::\r\n :toctree: generated/\r\n\r\n as_series convert list of array_likes into 1-D arrays of common type.\r\n trimseq remove trailing zeros.\r\n trimcoef remove small trailing coefficients.\r\n getdomain return the domain appropriate for a given set of abscissae.\r\n mapdomain maps points between domains.\r\n mapparms parameters of the linear map between domains.\r\n\r\n\"\"\"\r\nimport operator\r\nimport functools\r\nimport warnings\r\n\r\nimport numpy as np\r\n\r\n__all__ = [\r\n 'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq',\r\n 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase']\r\n\r\n#\r\n# Warnings and Exceptions\r\n#\r\n\r\nclass RankWarning(UserWarning):\r\n \"\"\"Issued by chebfit when the design matrix is rank deficient.\"\"\"\r\n pass\r\n\r\nclass PolyError(Exception):\r\n \"\"\"Base class for errors in this module.\"\"\"\r\n pass\r\n\r\nclass PolyDomainError(PolyError):\r\n \"\"\"Issued by the generic Poly class when two domains don't match.\r\n\r\n This is raised when an binary operation is passed Poly objects with\r\n different domains.\r\n\r\n \"\"\"\r\n pass\r\n\r\n#\r\n# Base class for all polynomial types\r\n#\r\n\r\nclass PolyBase:\r\n \"\"\"\r\n Base class for all polynomial types.\r\n\r\n Deprecated in numpy 1.9.0, use the abstract\r\n ABCPolyBase class instead. Note that the latter\r\n requires a number of virtual functions to be\r\n implemented.\r\n\r\n \"\"\"\r\n pass\r\n\r\n#\r\n# Helper functions to convert inputs to 1-D arrays\r\n#\r\ndef trimseq(seq):\r\n \"\"\"Remove small Poly series coefficients.\r\n\r\n Parameters\r\n ----------\r\n seq : sequence\r\n Sequence of Poly series coefficients. This routine fails for\r\n empty sequences.\r\n\r\n Returns\r\n -------\r\n series : sequence\r\n Subsequence with trailing zeros removed. If the resulting sequence\r\n would be empty, return the first element. The returned sequence may\r\n or may not be a view.\r\n\r\n Notes\r\n -----\r\n Do not lose the type info if the sequence contains unknown objects.\r\n\r\n \"\"\"\r\n if len(seq) == 0:\r\n return seq\r\n else:\r\n for i in range(len(seq) - 1, -1, -1):\r\n if seq[i] != 0:\r\n break\r\n return seq[:i+1]\r\n\r\n\r\ndef as_series(alist, trim=True):\r\n \"\"\"\r\n Return argument as a list of 1-d arrays.\r\n\r\n The returned list contains array(s) of dtype double, complex double, or\r\n object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of\r\n size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays\r\n of size ``N`` (i.e., is \"parsed by row\"); and a higher dimensional array\r\n raises a Value Error if it is not first reshaped into either a 1-d or 2-d\r\n array.\r\n\r\n Parameters\r\n ----------\r\n alist : array_like\r\n A 1- or 2-d array_like\r\n trim : boolean, optional\r\n When True, trailing zeros are removed from the inputs.\r\n When False, the inputs are passed through intact.\r\n\r\n Returns\r\n -------\r\n [a1, a2,...] : list of 1-D arrays\r\n A copy of the input data as a list of 1-d arrays.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n Raised when `as_series` cannot convert its input to 1-d arrays, or at\r\n least one of the resulting arrays is empty.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polyutils as pu\r\n >>> a = np.arange(4)\r\n >>> pu.as_series(a)\r\n [array([0.]), array([1.]), array([2.]), array([3.])]\r\n >>> b = np.arange(6).reshape((2,3))\r\n >>> pu.as_series(b)\r\n [array([0., 1., 2.]), array([3., 4., 5.])]\r\n\r\n >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16)))\r\n [array([1.]), array([0., 1., 2.]), array([0., 1.])]\r\n\r\n >>> pu.as_series([2, [1.1, 0.]])\r\n [array([2.]), array([1.1])]\r\n\r\n >>> pu.as_series([2, [1.1, 0.]], trim=False)\r\n [array([2.]), array([1.1, 0. ])]\r\n\r\n \"\"\"\r\n arrays = [np.array(a, ndmin=1, copy=False) for a in alist]\r\n if min([a.size for a in arrays]) == 0:\r\n raise ValueError(\"Coefficient array is empty\")\r\n if any(a.ndim != 1 for a in arrays):\r\n raise ValueError(\"Coefficient array is not 1-d\")\r\n if trim:\r\n arrays = [trimseq(a) for a in arrays]\r\n\r\n if any(a.dtype == np.dtype(object) for a in arrays):\r\n ret = []\r\n for a in arrays:\r\n if a.dtype != np.dtype(object):\r\n tmp = np.empty(len(a), dtype=np.dtype(object))\r\n tmp[:] = a[:]\r\n ret.append(tmp)\r\n else:\r\n ret.append(a.copy())\r\n else:\r\n try:\r\n dtype = np.common_type(*arrays)\r\n except Exception as e:\r\n raise ValueError(\"Coefficient arrays have no common type\") from e\r\n ret = [np.array(a, copy=True, dtype=dtype) for a in arrays]\r\n return ret\r\n\r\n\r\ndef trimcoef(c, tol=0):\r\n \"\"\"\r\n Remove \"small\" \"trailing\" coefficients from a polynomial.\r\n\r\n \"Small\" means \"small in absolute value\" and is controlled by the\r\n parameter `tol`; \"trailing\" means highest order coefficient(s), e.g., in\r\n ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)\r\n both the 3-rd and 4-th order coefficients would be \"trimmed.\"\r\n\r\n Parameters\r\n ----------\r\n c : array_like\r\n 1-d array of coefficients, ordered from lowest order to highest.\r\n tol : number, optional\r\n Trailing (i.e., highest order) elements with absolute value less\r\n than or equal to `tol` (default value is zero) are removed.\r\n\r\n Returns\r\n -------\r\n trimmed : ndarray\r\n 1-d array with trailing zeros removed. If the resulting series\r\n would be empty, a series containing a single zero is returned.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If `tol` < 0\r\n\r\n See Also\r\n --------\r\n trimseq\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polyutils as pu\r\n >>> pu.trimcoef((0,0,3,0,5,0,0))\r\n array([0., 0., 3., 0., 5.])\r\n >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed\r\n array([0.])\r\n >>> i = complex(0,1) # works for complex\r\n >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)\r\n array([0.0003+0.j , 0.001 -0.001j])\r\n\r\n \"\"\"\r\n if tol < 0:\r\n raise ValueError(\"tol must be non-negative\")\r\n\r\n [c] = as_series([c])\r\n [ind] = np.nonzero(np.abs(c) > tol)\r\n if len(ind) == 0:\r\n return c[:1]*0\r\n else:\r\n return c[:ind[-1] + 1].copy()\r\n\r\ndef getdomain(x):\r\n \"\"\"\r\n Return a domain suitable for given abscissae.\r\n\r\n Find a domain suitable for a polynomial or Chebyshev series\r\n defined at the values supplied.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n 1-d array of abscissae whose domain will be determined.\r\n\r\n Returns\r\n -------\r\n domain : ndarray\r\n 1-d array containing two values. If the inputs are complex, then\r\n the two returned points are the lower left and upper right corners\r\n of the smallest rectangle (aligned with the axes) in the complex\r\n plane containing the points `x`. If the inputs are real, then the\r\n two points are the ends of the smallest interval containing the\r\n points `x`.\r\n\r\n See Also\r\n --------\r\n mapparms, mapdomain\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polyutils as pu\r\n >>> points = np.arange(4)**2 - 5; points\r\n array([-5, -4, -1, 4])\r\n >>> pu.getdomain(points)\r\n array([-5., 4.])\r\n >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle\r\n >>> pu.getdomain(c)\r\n array([-1.-1.j, 1.+1.j])\r\n\r\n \"\"\"\r\n [x] = as_series([x], trim=False)\r\n if x.dtype.char in np.typecodes['Complex']:\r\n rmin, rmax = x.real.min(), x.real.max()\r\n imin, imax = x.imag.min(), x.imag.max()\r\n return np.array((complex(rmin, imin), complex(rmax, imax)))\r\n else:\r\n return np.array((x.min(), x.max()))\r\n\r\ndef mapparms(old, new):\r\n \"\"\"\r\n Linear map parameters between domains.\r\n\r\n Return the parameters of the linear map ``offset + scale*x`` that maps\r\n `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``.\r\n\r\n Parameters\r\n ----------\r\n old, new : array_like\r\n Domains. Each domain must (successfully) convert to a 1-d array\r\n containing precisely two values.\r\n\r\n Returns\r\n -------\r\n offset, scale : scalars\r\n The map ``L(x) = offset + scale*x`` maps the first domain to the\r\n second.\r\n\r\n See Also\r\n --------\r\n getdomain, mapdomain\r\n\r\n Notes\r\n -----\r\n Also works for complex numbers, and thus can be used to calculate the\r\n parameters required to map any line in the complex plane to any other\r\n line therein.\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polyutils as pu\r\n >>> pu.mapparms((-1,1),(-1,1))\r\n (0.0, 1.0)\r\n >>> pu.mapparms((1,-1),(-1,1))\r\n (-0.0, -1.0)\r\n >>> i = complex(0,1)\r\n >>> pu.mapparms((-i,-1),(1,i))\r\n ((1+1j), (1-0j))\r\n\r\n \"\"\"\r\n oldlen = old[1] - old[0]\r\n newlen = new[1] - new[0]\r\n off = (old[1]*new[0] - old[0]*new[1])/oldlen\r\n scl = newlen/oldlen\r\n return off, scl\r\n\r\ndef mapdomain(x, old, new):\r\n \"\"\"\r\n Apply linear map to input points.\r\n\r\n The linear map ``offset + scale*x`` that maps the domain `old` to\r\n the domain `new` is applied to the points `x`.\r\n\r\n Parameters\r\n ----------\r\n x : array_like\r\n Points to be mapped. If `x` is a subtype of ndarray the subtype\r\n will be preserved.\r\n old, new : array_like\r\n The two domains that determine the map. Each must (successfully)\r\n convert to 1-d arrays containing precisely two values.\r\n\r\n Returns\r\n -------\r\n x_out : ndarray\r\n Array of points of the same shape as `x`, after application of the\r\n linear map between the two domains.\r\n\r\n See Also\r\n --------\r\n getdomain, mapparms\r\n\r\n Notes\r\n -----\r\n Effectively, this implements:\r\n\r\n .. math ::\r\n x\\\\_out = new[0] + m(x - old[0])\r\n\r\n where\r\n\r\n .. math ::\r\n m = \\\\frac{new[1]-new[0]}{old[1]-old[0]}\r\n\r\n Examples\r\n --------\r\n >>> from numpy.polynomial import polyutils as pu\r\n >>> old_domain = (-1,1)\r\n >>> new_domain = (0,2*np.pi)\r\n >>> x = np.linspace(-1,1,6); x\r\n array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])\r\n >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out\r\n array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary\r\n 6.28318531])\r\n >>> x - pu.mapdomain(x_out, new_domain, old_domain)\r\n array([0., 0., 0., 0., 0., 0.])\r\n\r\n Also works for complex numbers (and thus can be used to map any line in\r\n the complex plane to any other line therein).\r\n\r\n >>> i = complex(0,1)\r\n >>> old = (-1 - i, 1 + i)\r\n >>> new = (-1 + i, 1 - i)\r\n >>> z = np.linspace(old[0], old[1], 6); z\r\n array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ])\r\n >>> new_z = pu.mapdomain(z, old, new); new_z\r\n array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary\r\n\r\n \"\"\"\r\n x = np.asanyarray(x)\r\n off, scl = mapparms(old, new)\r\n return off + scl*x\r\n\r\n\r\ndef _nth_slice(i, ndim):\r\n sl = [np.newaxis] * ndim\r\n sl[i] = slice(None)\r\n return tuple(sl)\r\n\r\n\r\ndef _vander_nd(vander_fs, points, degrees):\r\n r\"\"\"\r\n A generalization of the Vandermonde matrix for N dimensions\r\n\r\n The result is built by combining the results of 1d Vandermonde matrices,\r\n\r\n .. math::\r\n W[i_0, \\ldots, i_M, j_0, \\ldots, j_N] = \\prod_{k=0}^N{V_k(x_k)[i_0, \\ldots, i_M, j_k]}\r\n\r\n where\r\n\r\n .. math::\r\n N &= \\texttt{len(points)} = \\texttt{len(degrees)} = \\texttt{len(vander\\_fs)} \\\\\r\n M &= \\texttt{points[k].ndim} \\\\\r\n V_k &= \\texttt{vander\\_fs[k]} \\\\\r\n x_k &= \\texttt{points[k]} \\\\\r\n 0 \\le j_k &\\le \\texttt{degrees[k]}\r\n\r\n Expanding the one-dimensional :math:`V_k` functions gives:\r\n\r\n .. math::\r\n W[i_0, \\ldots, i_M, j_0, \\ldots, j_N] = \\prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \\ldots, i_M])}\r\n\r\n where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along\r\n dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`.\r\n\r\n Parameters\r\n ----------\r\n vander_fs : Sequence[function(array_like, int) -> ndarray]\r\n The 1d vander function to use for each axis, such as ``polyvander``\r\n points : Sequence[array_like]\r\n Arrays of point coordinates, all of the same shape. The dtypes\r\n will be converted to either float64 or complex128 depending on\r\n whether any of the elements are complex. Scalars are converted to\r\n 1-D arrays.\r\n This must be the same length as `vander_fs`.\r\n degrees : Sequence[int]\r\n The maximum degree (inclusive) to use for each axis.\r\n This must be the same length as `vander_fs`.\r\n\r\n Returns\r\n -------\r\n vander_nd : ndarray\r\n An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``.\r\n \"\"\"\r\n n_dims = len(vander_fs)\r\n if n_dims != len(points):\r\n raise ValueError(\r\n f\"Expected {n_dims} dimensions of sample points, got {len(points)}\")\r\n if n_dims != len(degrees):\r\n raise ValueError(\r\n f\"Expected {n_dims} dimensions of degrees, got {len(degrees)}\")\r\n if n_dims == 0:\r\n raise ValueError(\"Unable to guess a dtype or shape when no points are given\")\r\n\r\n # convert to the same shape and type\r\n points = tuple(np.array(tuple(points), copy=False) + 0.0)\r\n\r\n # produce the vandermonde matrix for each dimension, placing the last\r\n # axis of each in an independent trailing axis of the output\r\n vander_arrays = (\r\n vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)]\r\n for i in range(n_dims)\r\n )\r\n\r\n # we checked this wasn't empty already, so no `initial` needed\r\n return functools.reduce(operator.mul, vander_arrays)\r\n\r\n\r\ndef _vander_nd_flat(vander_fs, points, degrees):\r\n \"\"\"\r\n Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis\r\n\r\n Used to implement the public ``<type>vander<n>d`` functions.\r\n \"\"\"\r\n v = _vander_nd(vander_fs, points, degrees)\r\n return v.reshape(v.shape[:-len(degrees)] + (-1,))\r\n\r\n\r\ndef _fromroots(line_f, mul_f, roots):\r\n \"\"\"\r\n Helper function used to implement the ``<type>fromroots`` functions.\r\n\r\n Parameters\r\n ----------\r\n line_f : function(float, float) -> ndarray\r\n The ``<type>line`` function, such as ``polyline``\r\n mul_f : function(array_like, array_like) -> ndarray\r\n The ``<type>mul`` function, such as ``polymul``\r\n roots :\r\n See the ``<type>fromroots`` functions for more detail\r\n \"\"\"\r\n if len(roots) == 0:\r\n return np.ones(1)\r\n else:\r\n [roots] = as_series([roots], trim=False)\r\n roots.sort()\r\n p = [line_f(-r, 1) for r in roots]\r\n n = len(p)\r\n while n > 1:\r\n m, r = divmod(n, 2)\r\n tmp = [mul_f(p[i], p[i+m]) for i in range(m)]\r\n if r:\r\n tmp[0] = mul_f(tmp[0], p[-1])\r\n p = tmp\r\n n = m\r\n return p[0]\r\n\r\n\r\ndef _valnd(val_f, c, *args):\r\n \"\"\"\r\n Helper function used to implement the ``<type>val<n>d`` functions.\r\n\r\n Parameters\r\n ----------\r\n val_f : function(array_like, array_like, tensor: bool) -> array_like\r\n The ``<type>val`` function, such as ``polyval``\r\n c, args :\r\n See the ``<type>val<n>d`` functions for more detail\r\n \"\"\"\r\n args = [np.asanyarray(a) for a in args]\r\n shape0 = args[0].shape\r\n if not all((a.shape == shape0 for a in args[1:])):\r\n if len(args) == 3:\r\n raise ValueError('x, y, z are incompatible')\r\n elif len(args) == 2:\r\n raise ValueError('x, y are incompatible')\r\n else:\r\n raise ValueError('ordinates are incompatible')\r\n it = iter(args)\r\n x0 = next(it)\r\n\r\n # use tensor on only the first\r\n c = val_f(x0, c)\r\n for xi in it:\r\n c = val_f(xi, c, tensor=False)\r\n return c\r\n\r\n\r\ndef _gridnd(val_f, c, *args):\r\n \"\"\"\r\n Helper function used to implement the ``<type>grid<n>d`` functions.\r\n\r\n Parameters\r\n ----------\r\n val_f : function(array_like, array_like, tensor: bool) -> array_like\r\n The ``<type>val`` function, such as ``polyval``\r\n c, args :\r\n See the ``<type>grid<n>d`` functions for more detail\r\n \"\"\"\r\n for xi in args:\r\n c = val_f(xi, c)\r\n return c\r\n\r\n\r\ndef _div(mul_f, c1, c2):\r\n \"\"\"\r\n Helper function used to implement the ``<type>div`` functions.\r\n\r\n Implementation uses repeated subtraction of c2 multiplied by the nth basis.\r\n For some polynomial types, a more efficient approach may be possible.\r\n\r\n Parameters\r\n ----------\r\n mul_f : function(array_like, array_like) -> array_like\r\n The ``<type>mul`` function, such as ``polymul``\r\n c1, c2 :\r\n See the ``<type>div`` functions for more detail\r\n \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = as_series([c1, c2])\r\n if c2[-1] == 0:\r\n raise ZeroDivisionError()\r\n\r\n lc1 = len(c1)\r\n lc2 = len(c2)\r\n if lc1 < lc2:\r\n return c1[:1]*0, c1\r\n elif lc2 == 1:\r\n return c1/c2[-1], c1[:1]*0\r\n else:\r\n quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)\r\n rem = c1\r\n for i in range(lc1 - lc2, - 1, -1):\r\n p = mul_f([0]*i + [1], c2)\r\n q = rem[-1]/p[-1]\r\n rem = rem[:-1] - q*p[:-1]\r\n quo[i] = q\r\n return quo, trimseq(rem)\r\n\r\n\r\ndef _add(c1, c2):\r\n \"\"\" Helper function used to implement the ``<type>add`` functions. \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = as_series([c1, c2])\r\n if len(c1) > len(c2):\r\n c1[:c2.size] += c2\r\n ret = c1\r\n else:\r\n c2[:c1.size] += c1\r\n ret = c2\r\n return trimseq(ret)\r\n\r\n\r\ndef _sub(c1, c2):\r\n \"\"\" Helper function used to implement the ``<type>sub`` functions. \"\"\"\r\n # c1, c2 are trimmed copies\r\n [c1, c2] = as_series([c1, c2])\r\n if len(c1) > len(c2):\r\n c1[:c2.size] -= c2\r\n ret = c1\r\n else:\r\n c2 = -c2\r\n c2[:c1.size] += c1\r\n ret = c2\r\n return trimseq(ret)\r\n\r\n\r\ndef _fit(vander_f, x, y, deg, rcond=None, full=False, w=None):\r\n \"\"\"\r\n Helper function used to implement the ``<type>fit`` functions.\r\n\r\n Parameters\r\n ----------\r\n vander_f : function(array_like, int) -> ndarray\r\n The 1d vander function, such as ``polyvander``\r\n c1, c2 :\r\n See the ``<type>fit`` functions for more detail\r\n \"\"\"\r\n x = np.asarray(x) + 0.0\r\n y = np.asarray(y) + 0.0\r\n deg = np.asarray(deg)\r\n\r\n # check arguments.\r\n if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:\r\n raise TypeError(\"deg must be an int or non-empty 1-D array of int\")\r\n if deg.min() < 0:\r\n raise ValueError(\"expected deg >= 0\")\r\n if x.ndim != 1:\r\n raise TypeError(\"expected 1D vector for x\")\r\n if x.size == 0:\r\n raise TypeError(\"expected non-empty vector for x\")\r\n if y.ndim < 1 or y.ndim > 2:\r\n raise TypeError(\"expected 1D or 2D array for y\")\r\n if len(x) != len(y):\r\n raise TypeError(\"expected x and y to have same length\")\r\n\r\n if deg.ndim == 0:\r\n lmax = deg\r\n order = lmax + 1\r\n van = vander_f(x, lmax)\r\n else:\r\n deg = np.sort(deg)\r\n lmax = deg[-1]\r\n order = len(deg)\r\n van = vander_f(x, lmax)[:, deg]\r\n\r\n # set up the least squares matrices in transposed form\r\n lhs = van.T\r\n rhs = y.T\r\n if w is not None:\r\n w = np.asarray(w) + 0.0\r\n if w.ndim != 1:\r\n raise TypeError(\"expected 1D vector for w\")\r\n if len(x) != len(w):\r\n raise TypeError(\"expected x and w to have same length\")\r\n # apply weights. Don't use inplace operations as they\r\n # can cause problems with NA.\r\n lhs = lhs * w\r\n rhs = rhs * w\r\n\r\n # set rcond\r\n if rcond is None:\r\n rcond = len(x)*np.finfo(x.dtype).eps\r\n\r\n # Determine the norms of the design matrix columns.\r\n if issubclass(lhs.dtype.type, np.complexfloating):\r\n scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))\r\n else:\r\n scl = np.sqrt(np.square(lhs).sum(1))\r\n scl[scl == 0] = 1\r\n\r\n # Solve the least squares problem.\r\n c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond)\r\n c = (c.T/scl).T\r\n\r\n # Expand c to include non-fitted coefficients which are set to zero\r\n if deg.ndim > 0:\r\n if c.ndim == 2:\r\n cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)\r\n else:\r\n cc = np.zeros(lmax+1, dtype=c.dtype)\r\n cc[deg] = c\r\n c = cc\r\n\r\n # warn on rank reduction\r\n if rank != order and not full:\r\n msg = \"The fit may be poorly conditioned\"\r\n warnings.warn(msg, RankWarning, stacklevel=2)\r\n\r\n if full:\r\n return c, [resids, rank, s, rcond]\r\n else:\r\n return c\r\n\r\n\r\ndef _pow(mul_f, c, pow, maxpower):\r\n \"\"\"\r\n Helper function used to implement the ``<type>pow`` functions.\r\n\r\n Parameters\r\n ----------\r\n vander_f : function(array_like, int) -> ndarray\r\n The 1d vander function, such as ``polyvander``\r\n pow, maxpower :\r\n See the ``<type>pow`` functions for more detail\r\n mul_f : function(array_like, array_like) -> ndarray\r\n The ``<type>mul`` function, such as ``polymul``\r\n \"\"\"\r\n # c is a trimmed copy\r\n [c] = as_series([c])\r\n power = int(pow)\r\n if power != pow or power < 0:\r\n raise ValueError(\"Power must be a non-negative integer.\")\r\n elif maxpower is not None and power > maxpower:\r\n raise ValueError(\"Power is too large\")\r\n elif power == 0:\r\n return np.array([1], dtype=c.dtype)\r\n elif power == 1:\r\n return c\r\n else:\r\n # This can be made more efficient by using powers of two\r\n # in the usual way.\r\n prd = c\r\n for i in range(2, power + 1):\r\n prd = mul_f(prd, c)\r\n return prd\r\n\r\n\r\ndef _deprecate_as_int(x, desc):\r\n \"\"\"\r\n Like `operator.index`, but emits a deprecation warning when passed a float\r\n\r\n Parameters\r\n ----------\r\n x : int-like, or float with integral value\r\n Value to interpret as an integer\r\n desc : str\r\n description to include in any error message\r\n\r\n Raises\r\n ------\r\n TypeError : if x is a non-integral float or non-numeric\r\n DeprecationWarning : if x is an integral float\r\n \"\"\"\r\n try:\r\n return operator.index(x)\r\n except TypeError as e:\r\n # Numpy 1.17.0, 2019-03-11\r\n try:\r\n ix = int(x)\r\n except TypeError:\r\n pass\r\n else:\r\n if ix == x:\r\n warnings.warn(\r\n f\"In future, this will raise TypeError, as {desc} will \"\r\n \"need to be an integer not just an integral float.\",\r\n DeprecationWarning,\r\n stacklevel=3\r\n )\r\n return ix\r\n\r\n raise TypeError(f\"{desc} must be an integer\") from e\r\n" ]
[ [ "matplotlib.pyplot.isinteractive", "matplotlib.pyplot.axhspan", "matplotlib.pyplot.subplots", "matplotlib.pyplot.ticklabel_format", "matplotlib.pyplot.ion" ], [ "numpy.distutils.ccompiler.gen_lib_options" ], [ "numpy.distutils.misc_util.get_shared_lib_extension", "numpy.core._internal._getintp_ctype", "numpy.dtype", "numpy.frombuffer", "numpy.array" ], [ "numpy.seterrcall", "numpy.setbufsize", "numpy.geterr", "numpy.geterrcall", "numpy.getbufsize", "numpy.seterr", "numpy.errstate" ], [ "numpy.square", "numpy.abs", "numpy.asarray", "numpy.sort", "numpy.ones", "numpy.dtype", "numpy.linalg.lstsq", "numpy.common_type", "numpy.asanyarray", "numpy.finfo", "numpy.array", "numpy.zeros", "numpy.empty" ] ]
jungtaekkim/bayeso-benchmarks
[ "3650aaeeaa123da14f0f839da664b071ee17bf9a" ]
[ "tests/test_inf_dim_ackley.py" ]
[ "#\n# author: Jungtaek Kim ([email protected])\n# last updated: February 8, 2021\n#\n\nimport numpy as np\nimport pytest\n\nfrom bayeso_benchmarks.inf_dim_ackley import *\n\nclass_fun = Ackley\n\nTEST_EPSILON = 1e-5\n\n\ndef test_init():\n obj_fun = class_fun(2)\n\n with pytest.raises(TypeError) as error:\n class_fun()\n with pytest.raises(AssertionError) as error:\n class_fun('abc')\n with pytest.raises(AssertionError) as error:\n class_fun(2.1)\n with pytest.raises(AssertionError) as error:\n class_fun(2, seed='abc')\n with pytest.raises(AssertionError) as error:\n class_fun(2, seed=2.1)\n\ndef test_validate_properties():\n obj_fun = class_fun(5)\n obj_fun.validate_properties()\n\ndef test_output():\n obj_fun = class_fun(3)\n bounds = obj_fun.get_bounds()\n\n grids = obj_fun.sample_grids(3)\n truths_grids = np.array([\n [2.15703112e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [4.44089210e-16],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n ])\n \n print(grids)\n print(obj_fun.output(grids))\n print(np.abs(obj_fun.output(grids) - truths_grids) < TEST_EPSILON)\n assert np.all(np.abs(obj_fun.output(grids) - truths_grids) < TEST_EPSILON)\n" ]
[ [ "numpy.array" ] ]
tzole1155/moai
[ "d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180", "d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180", "d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180", "d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180" ]
[ "moai/validation/single.py", "moai/parameters/optimization/optimizers/sgdw.py", "moai/supervision/losses/distribution/variance.py", "moai/supervision/losses/image/ssim.py" ]
[ "import moai.utils.engine as mieng\n\nimport torch\nimport omegaconf.omegaconf\nimport typing\nimport logging\nimport inspect\nimport itertools\n\nlog = logging.getLogger(__name__)\n\n__all__ = ['Metric']\n\nclass Metric(mieng.Single):\n def __init__(self,\n metrics: omegaconf.DictConfig,\n **kwargs: typing.Mapping[str, typing.Any],\n ):\n super(Metric, self).__init__(\n items=metrics, \n name=\"metric\",\n )\n loop = ((key, params) for key, params in kwargs.items() if hasattr(indicators, key))\n for k, p in loop:\n last_module = self.metric\n sig = inspect.signature(last_module.forward)\n for keys in zip(*list(p[prop] for prop in itertools.chain(sig.parameters, ['out']))):\n self.execs.append(lambda tensor_dict, metric_dict, k=keys, p=sig.parameters.keys(), f=last_module:\n metric_dict.update({\n k[-1]: f(**dict(zip(p, \n list(tensor_dict[i] for i in k[:-1])\n )))\n })\n )\n\n def forward(self,\n tensors: typing.Dict[str, torch.Tensor]\n ) -> typing.Dict[str, torch.Tensor]:\n metrics = { } \n for exe in self.execs:\n exe(tensors, metrics)\n returned = { }\n for k, m in metrics.items():\n returned[k] = torch.mean(m) if len(m.size()) > 0 else m \n return returned", "import torch\n\n__all__ = ['SGDW']\n\n#NOTE: modified from https://github.com/wbaek/torchskeleton/tree/master/skeleton/optim\n\nclass SGDW(torch.optim.SGD):\n \"\"\"Implements SGD with weight decay\n\n - **Paper**: [Decoupled Weight Decay Regularization](https://arxiv.org/abs/1711.05101)\n - **Implementation**: [GitHub @ wbaek](https://github.com/wbaek/torchskeleton/tree/master/skeleton/optim)\n \n \"\"\"\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)\n buf.mul_(momentum).add_(d_p)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n # decoupled weight decay\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n\n p.data.add_(-group['lr'], d_p)\n\n return loss\n", "from moai.monads.utils import expand_spatial_dims\nfrom moai.monads.distribution import CenterOfMass\n\nimport torch\n\n__all__ = [\"VarianceRegularization\"]\n\n__KEYPOINTS_TO_COORDS__ = {#TODO: create grid conversion OPs and link to there\n 'ndc': lambda coord, img: torch.addcmul(\n torch.scalar_tensor(0.5).to(coord), coord, torch.scalar_tensor(0.5).to(coord)\n ) * torch.Tensor([*img.shape[2:]]).to(coord).expand_as(coord),\n 'coord': lambda coord, img: coord,\n 'norm': lambda coord, img: coord * torch.Tensor([*img.shape[2:]]).to(coord).expand_as(coord),\n}\n\n__GRID_TO_COORDS__ = {#TODO: create grid conversion OPs and link to there\n 'ndc': lambda grid: torch.addcmul(\n torch.scalar_tensor(0.5).to(grid), grid, torch.scalar_tensor(0.5).to(grid)\n ) * expand_spatial_dims(torch.Tensor([*grid.shape[2:]]).to(grid), grid),\n 'coord': lambda grid: grid,\n 'norm': lambda grid: grid * expand_spatial_dims(torch.Tensor([*grid.shape[2:]]).to(grid), grid),\n}\n\nclass VarianceRegularization(CenterOfMass):\n def __init__(self,\n sigma: float=1.5, # in pixels\n grid_type: str='ndc', # 'ndc', 'coord', 'norm' \n ):\n super(VarianceRegularization, self).__init__(mode='coords', flip=False)\n self.target_variance = sigma ** 2\n self.grid_type = grid_type\n\n def forward(self, \n heatmaps: torch.Tensor, # [B, K, (D), (H), W]\n grid: torch.Tensor, # [B, (1-3), (D), (H), W]\n keypoints: torch.Tensor, # [B, K, (1-3)]\n ) -> torch.Tensor: \n k = __KEYPOINTS_TO_COORDS__[self.grid_type](keypoints) \n grid = __GRID_TO_COORDS__[self.grid_type](grid)\n diffs = (grid.unsqueeze(1) - k) ** 2 # [B, K, (1-3), (D), (H), W]\n pred_stds = super(VarianceRegularization, self).forward(diffs, heatmaps)\n pred_variances = pred_stds ** 2\n squared_error = (pred_variances - self.target_variance) ** 2\n return torch.sum(squared_error, dim=-1)\n\n\n", "import torch\nimport kornia\n\n__all__ = [\"StructuralDisimilarity\"]\n\n#NOTE: check if kornia fixes its implementation https://github.com/kornia/kornia/issues/473 \n\nclass StructuralDisimilarity(kornia.losses.SSIM):\n def __init__(self,\n window_size: int=7, \n dynamic_range: float=1.0\n ):\n super(StructuralDisimilarity, self).__init__(\n window_size=window_size,\n reduction='none',\n max_val=dynamic_range\n )\n\n def forward(self, \n gt: torch.Tensor,\n pred: torch.Tensor,\n weights: torch.Tensor=None,\n mask: torch.Tensor=None,\n ) -> torch.Tensor: \n if mask is not None:\n gt = torch.where(mask, gt, torch.zeros_like(gt))\n pred = torch.where(mask, pred, torch.zeros_like(gt)) \n ssim = super(StructuralDisimilarity, self).forward(gt, pred)\n if weights is not None:\n ssim = ssim * weights\n if mask is not None:\n ssim = ssim[mask]\n return torch.clamp(1.0 - (ssim + 1.0) * 0.5, min=0.0, max=1.0)" ]
[ [ "torch.mean" ], [ "torch.zeros_like" ], [ "torch.scalar_tensor", "torch.sum", "torch.Tensor" ], [ "torch.clamp", "torch.zeros_like" ] ]
shoshijak/NTPoly
[ "04ee94f743727775bbc97120325c57bf393932e9" ]
[ "UnitTests/test_matrix.py" ]
[ "\"\"\"\nA test suite for local matrices.\n\"\"\"\nimport unittest\nimport NTPolySwig as nt\n\nfrom scipy.io import mmwrite, mmread\n\n\nclass TestParameters:\n '''An internal class for holding test parameters.'''\n\n def __init__(self, rows, columns, sparsity):\n '''Default constructor\n @param[in] rows matrix rows.\n @param[in] columns matrix columns.\n @param[in] sparsity matrix sparsity.\n '''\n # Matrix rows.\n self.rows = rows\n # Matrix columns.\n self.columns = columns\n # Matrix sparsity.\n self.sparsity = sparsity\n\n def create_matrix(self, square=False, complex=False):\n '''\n Function to create a matrix for a given set of parameters.\n '''\n from scipy.sparse import random, csr_matrix\n\n r = self.rows\n c = self.columns\n s = self.sparsity\n if square:\n r = c\n if complex:\n mat = random(r, c, s, format=\"csr\")\n mat += 1j * random(r, c, s, format=\"csr\")\n else:\n mat = random(r, c, s, format=\"csr\")\n\n return csr_matrix(mat)\n\n\nclass TestLocalMatrix(unittest.TestCase):\n '''A test class for local matrices.'''\n from os import environ\n from os.path import join\n # Parameters for the matrices\n parameters = []\n # Location of the scratch directory.\n scratch_dir = environ['SCRATCHDIR']\n file1 = join(scratch_dir, \"matrix1.mtx\")\n file2 = join(scratch_dir, \"matrix2.mtx\")\n file3 = join(scratch_dir, \"matrix3.mtx\")\n SMatrix = nt.Matrix_lsr\n MatrixMemoryPool = nt.MatrixMemoryPool_r\n complex = False\n\n def _compare_mat(self, val1, val2):\n from helpers import THRESHOLD\n from scipy.sparse.linalg import norm\n\n normval = abs(norm(val1 - val2))\n self.assertLessEqual(normval, THRESHOLD)\n\n def _compare(self, val1, val2):\n from helpers import THRESHOLD\n from scipy.linalg import norm\n\n normval = abs(norm(val1 - val2))\n self.assertLessEqual(normval, THRESHOLD)\n\n def setUp(self):\n '''Set up a test.'''\n self.parameters = []\n self.parameters.append(TestParameters(2, 4, 0.0))\n self.parameters.append(TestParameters(8, 8, 0.0))\n self.parameters.append(TestParameters(2, 2, 1.0))\n self.parameters.append(TestParameters(4, 4, 1.0))\n self.parameters.append(TestParameters(19, 19, 1.0))\n self.parameters.append(TestParameters(4, 2, 1.0))\n self.parameters.append(TestParameters(2, 4, 1.0))\n self.parameters.append(TestParameters(4, 4, 0.2))\n self.parameters.append(TestParameters(8, 8, 1.0))\n\n def test_read(self):\n '''Test routines to read and write matrices.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n matrix2 = self.SMatrix(self.file1)\n matrix2.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n self._compare_mat(matrix1, ResultMat)\n\n def test_readcircular(self):\n '''Test routines to read a matrix produced by ntpoly.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n matrix2 = self.SMatrix(self.file1)\n matrix2.WriteToMatrixMarket(self.file2)\n matrix3 = self.SMatrix(self.file2)\n matrix3.WriteToMatrixMarket(self.file3)\n ResultMat = mmread(self.file3)\n\n self._compare_mat(matrix1, ResultMat)\n\n def test_readsymmetric(self):\n '''Test routines to read and write matrices.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex, square=True)\n matrix1 = matrix1 + matrix1.H\n mmwrite(self.file1, matrix1)\n matrix2 = self.SMatrix(self.file1)\n matrix2.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n\n self._compare_mat(matrix1, ResultMat)\n\n def test_addition(self):\n '''Test routines to add together matrices.'''\n from random import uniform\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n alpha = uniform(1.0, 2.0)\n CheckMat = alpha * matrix1 + matrix2\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(self.file2)\n matrix2.Increment(matrix1, alpha, 0.0)\n matrix2.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n\n self._compare_mat(CheckMat, ResultMat)\n\n def test_addzero(self):\n '''Test routines to add together a matrix and zero.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n\n CheckMat = matrix1\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(matrix1.GetColumns(), matrix1.GetRows())\n matrix2.Increment(matrix1, 1.0, 0.0)\n matrix2.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_addzeroreverse(self):\n '''Test routines to add together a matrix and zero.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n\n CheckMat = matrix1\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(matrix1.GetColumns(), matrix1.GetRows())\n matrix1.Increment(matrix2, 1.0, 0.0)\n matrix1.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_dot(self):\n '''Test routines to dot two matrices.'''\n from numpy import sum, multiply\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n check = sum(multiply(matrix1.todense(), matrix2.todense()))\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(self.file2)\n result = matrix2.Dot(matrix1)\n\n self._compare(result, check)\n\n def test_transpose(self):\n '''Test routines to transpose a matrix.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n\n matrix2 = self.SMatrix(self.file1)\n matrix2T = self.SMatrix(matrix2.GetRows(), matrix2.GetColumns())\n matrix2T.Transpose(matrix2)\n matrix2T.WriteToMatrixMarket(self.file2)\n\n CheckMat = matrix1.T\n ResultMat = mmread(self.file2)\n\n self._compare_mat(CheckMat, ResultMat)\n\n def test_pairwise(self):\n '''Test routines to pairwise multiply two matrices.'''\n from scipy.sparse import csr_matrix\n from numpy import multiply\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n CheckMat = csr_matrix(\n multiply(matrix1.todense(), matrix2.todense()))\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(self.file2)\n ntmatrix3 = self.SMatrix(\n ntmatrix1.GetColumns(), ntmatrix1.GetRows())\n ntmatrix3.PairwiseMultiply(ntmatrix1, ntmatrix2)\n ntmatrix3.WriteToMatrixMarket(self.file3)\n\n ResultMat = mmread(self.file3)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_multiply(self):\n '''Test routines to multiply two matrices.'''\n from random import uniform\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex).H\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n alpha = uniform(1.0, 2.0)\n beta = 0.0\n if abs(beta) > 0.0:\n CheckMat = alpha * matrix1.dot(matrix2) + beta * matrix1\n else:\n CheckMat = alpha * matrix1.dot(matrix2)\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(self.file2)\n ntmatrix3 = self.SMatrix(ntmatrix2.GetColumns(),\n ntmatrix1.GetRows())\n memory_pool = self.MatrixMemoryPool(ntmatrix2.GetColumns(),\n ntmatrix1.GetRows())\n ntmatrix3.Gemm(ntmatrix1, ntmatrix2, False, False, alpha, beta,\n 0.0, memory_pool)\n ntmatrix3.WriteToMatrixMarket(self.file3)\n\n ResultMat = mmread(self.file3)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_multiply_zero(self):\n '''Test routines to multiply two matrices where one is zero.'''\n from random import uniform\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = 0 * param.create_matrix(complex=self.complex).H\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n alpha = uniform(1.0, 2.0)\n beta = 0.0\n if abs(beta) > 0.0:\n CheckMat = alpha * matrix1.dot(matrix2) + beta * matrix1\n else:\n CheckMat = alpha * matrix1.dot(matrix2)\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(self.file2)\n ntmatrix3 = self.SMatrix(ntmatrix2.GetColumns(),\n ntmatrix1.GetRows())\n memory_pool = self.MatrixMemoryPool(ntmatrix2.GetColumns(),\n ntmatrix1.GetRows())\n ntmatrix3.Gemm(ntmatrix1, ntmatrix2, False, False, alpha, beta,\n 0.0, memory_pool)\n ntmatrix3.WriteToMatrixMarket(self.file3)\n\n ResultMat = mmread(self.file3)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_get_row(self):\n '''Test function that extracts a row from the matrix'''\n from random import randint\n for param in self.parameters:\n if param.rows == 0:\n continue\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n row_num = randint(0, param.rows - 1)\n CheckMat = matrix1[row_num, :]\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(ntmatrix1.GetColumns(), 1)\n ntmatrix1.ExtractRow(row_num, ntmatrix2)\n ntmatrix2.WriteToMatrixMarket(self.file2)\n\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_get_column(self):\n '''Test function that extracts a column from the matrix'''\n from random import randint\n for param in self.parameters:\n if param.columns == 0:\n continue\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n column_num = randint(0, param.columns - 1)\n CheckMat = matrix1[:, column_num]\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(1, ntmatrix1.GetRows())\n ntmatrix1.ExtractColumn(column_num, ntmatrix2)\n ntmatrix2.WriteToMatrixMarket(self.file2)\n\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n\nclass TestLocalMatrix_c(TestLocalMatrix):\n '''Specialization for complex matrices'''\n SMatrix = nt.Matrix_lsc\n MatrixMemoryPool = nt.MatrixMemoryPool_c\n complex = True\n\n def test_conjugatetranspose(self):\n '''Test routines to compute the conjugate transpose of a matrix.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n\n matrix2 = self.SMatrix(self.file1)\n matrix2T = self.SMatrix(matrix2.GetRows(), matrix2.GetColumns())\n matrix2T.Transpose(matrix2)\n matrix2T.Conjugate()\n matrix2T.WriteToMatrixMarket(self.file2)\n\n CheckMat = matrix1.H\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_dot(self):\n '''Test routines to dot two matrices.'''\n from numpy import sum, multiply, conj\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n check = sum(multiply(conj(matrix1.todense()), matrix2.todense()))\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(self.file2)\n result = matrix1.Dot(matrix2)\n\n self._compare(result, check)\n\n\n###############################################################################\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "scipy.io.mmread", "scipy.io.mmwrite", "scipy.sparse.csr_matrix", "scipy.sparse.random", "scipy.linalg.norm" ] ]
suryadheeshjith/Frustum-PointNet
[ "10e7b1c0ee8183c4791e67c44e7e2ba6c265486c" ]
[ "mayavi/test_drawline.py" ]
[ "import numpy\nfrom mayavi.mlab import *\n\ndef test_plot3d():\n \"\"\"Generates a pretty set of lines.\"\"\"\n n_mer, n_long = 6, 11\n pi = numpy.pi\n dphi = pi / 1000.0\n phi = numpy.arange(0.0, 2 * pi + 0.5 * dphi, dphi)\n mu = phi * n_mer\n x = numpy.cos(mu) * (1 + numpy.cos(n_long * mu / n_mer) * 0.5)\n y = numpy.sin(mu) * (1 + numpy.cos(n_long * mu / n_mer) * 0.5)\n z = numpy.sin(n_long * mu / n_mer) * 0.5\n\n l = plot3d(x, y, z, numpy.sin(mu), tube_radius=0.025, colormap='Spectral')\n return l\n\ntest_plot3d()\ninput()\n" ]
[ [ "numpy.arange", "numpy.cos", "numpy.sin" ] ]
m-mirz/proloaf
[ "4109665b2e6eb1dbdc37dae4a3c0afd2ca6af87f" ]
[ "source/fc_prep.py" ]
[ "# Copyright 2021 The ProLoaF Authors. All Rights Reserved.\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# ==============================================================================\n\"\"\"\nPreprocesses your input data for use with ProLoaF\n\nTransforms the data to a common format (pandas.DataFrame as csv) for all stations.\n\nNotes\n-----\n- This script can load xlsx or csv files.\n- If your data does not match the criteria, you can use a custom script that saves your\ndata as a pandas.DataFrame with datetimeindex to a csv file with a “;” as separator to\naccomplish the same thing.\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport sys\nimport json\nimport os\n\nMAIN_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(MAIN_PATH)\n\nfrom utils.config_util import read_config, parse_basic\n#Import customized functions below this point\n\nimport utils.datatuner as dt\n\ndef load_raw_data_xlsx(files):\n \"\"\"\n Load data from an xlsx file\n\n After loading, the date column in the raw data is converted to a UTC datetime\n\n Parameters\n ----------\n files : list\n A list of files to read. See the Notes section for more information\n\n Returns\n -------\n list\n A list containing a DataFrame for each file that was read\n\n Notes\n -----\n - Files is an array of maps containing the following data with the keyword (keyword)\n + ('file_name') the name of the xlsx file\n + ('date_column') the name of the date_column in the raw_data\n + ('time_zone') specifier for the timezone the raw data is recorded in\n + ('sheet_name') name or list of names of the sheets that are to be read\n + ('combine') boolean, all datasheets with true are combined into one, all others are read individually\n + ('start_column') Columns between this and ('end_column') are loaded\n + ('end_column')\n\n \"\"\"\n print('Importing XLSX Data...')\n\n combined_files = []\n individual_files = []\n\n for xlsx_file in files:\n print('importing ' + xlsx_file['file_name'])\n # if isinstance(file_name, str):\n # file_name = [file_name,'UTC']\n date_column = xlsx_file['date_column']\n raw_data = pd.read_excel(INPATH + xlsx_file['file_name'], xlsx_file['sheet_name'],\n parse_dates=[date_column])\n\n # convert load data to UTC\n if(xlsx_file['time_zone'] != 'UTC'):\n raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(xlsx_file['time_zone'], ambiguous=\"infer\").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')\n else:\n if (xlsx_file['dayfirst']):\n raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)\n else:\n raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)\n\n if(xlsx_file['data_abs']):\n raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']] = raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']].abs()\n # rename column IDs, specifically Time, this will be used later as the df index\n raw_data.rename(columns={date_column: 'Time'}, inplace=True)\n raw_data.head() # now the data is positive and set to UTC\n raw_data.info()\n # interpolating for missing entries created by asfreq and original missing values if any\n raw_data.interpolate(method='time', inplace=True)\n\n if(xlsx_file['combine']):\n combined_files.append(raw_data)\n else:\n individual_files.append(raw_data)\n if(len(combined_files) > 0):\n individual_files.append(pd.concat(combined_files))\n return individual_files\n\ndef load_raw_data_csv(files):\n \"\"\"\n Load data from a csv file\n\n After loading, the date column in the raw data is converted to a UTC datetime\n\n Parameters\n ----------\n files : list\n A list of files to read. See the Notes section for more information\n\n Returns\n -------\n list\n A list containing a DataFrame for each file that was read\n\n Notes\n -----\n - Files is an array of maps containing the following data with the keyword (keyword)\n + ('file_name') the name of the load_file\n + ('date_column') the name of the date_column in the raw_data\n + ('dayfirst') specifier for the formatting of the read time\n + ('sep') separator used in this file\n + ('combine') boolean, all datasheets with true are combined into one, all others are read individually\n + ('use_columns') list of columns that are loaded\n\n \"\"\"\n\n print('Importing CSV Data...')\n\n\n combined_files = []\n individual_files = []\n\n for csv_file in files:\n print('Importing ' + csv_file['file_name'] + ' ...')\n date_column = csv_file['date_column']\n raw_data = pd.read_csv(INPATH + csv_file['file_name'], sep=csv_file['sep'], usecols=csv_file['use_columns'], parse_dates=[date_column] , dayfirst=csv_file['dayfirst'])\n # pd.read_csv(INPATH + name, sep=sep, usecols=cols, parse_dates=[date_column] , dayfirst=dayfirst)\n if (csv_file['time_zone'] != 'UTC'):\n raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(csv_file['time_zone'], ambiguous=\"infer\").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')\n else:\n if (csv_file['dayfirst']):\n raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)\n else:\n raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)\n\n print('...Importing finished. ')\n raw_data.rename(columns={date_column: 'Time'}, inplace=True)\n\n if(csv_file['combine']):\n combined_files.append(raw_data)\n else:\n individual_files.append(raw_data)\n\n if(len(combined_files) > 0):\n individual_files.append(pd.concat(combined_files, sort = False))\n #for frame in individual_files:\n # frame.rename(columns={date_column: 'Time'}, inplace=True)\n return individual_files\n\ndef set_to_hours(df):\n \"\"\"\n Sets the index of the DataFrame to 'Time' and the frequency to hours.\n\n Parameters\n ----------\n df : pandas.DataFrame\n The DataFrame whose index and frequency are to be changed\n\n Returns\n -------\n df\n The modified DataFrame\n\n \"\"\"\n\n df['Time'] = pd.to_datetime(df['Time'])\n df = df.set_index('Time')\n df = df.asfreq(freq='H')\n return df\n\n\nif __name__ == '__main__':\n\n ARGS = parse_basic()\n config_file = os.path.join(MAIN_PATH, 'targets', ARGS.station, 'preprocessing.json')\n PAR = read_config(config_path=config_file)\n\n # DEFINES\n if PAR['local'] == True:\n INPATH = os.path.join(MAIN_PATH, PAR['raw_path'])\n else:\n INPATH = PAR['raw_path']\n if ('xlsx_files' in PAR):\n XLSX_FILES = PAR['xlsx_files']\n if ('csv_files' in PAR):\n CSV_FILES = PAR['csv_files']\n OUTFILE = os.path.join(MAIN_PATH, PAR['data_path'])\n\n # Prepare Load Data\n df_list = []\n if ('xlsx_files' in PAR):\n xlsx_data = load_raw_data_xlsx(XLSX_FILES)\n for data in xlsx_data:\n hourly_data = set_to_hours(df=data)\n dt.fill_if_missing(hourly_data)\n df_list.append(hourly_data)\n\n if ('csv_files' in PAR):\n csv_data = load_raw_data_csv(CSV_FILES)\n for data in csv_data:\n hourly_data = set_to_hours(df=data)\n dt.fill_if_missing(hourly_data)\n print(hourly_data)\n df_list.append(hourly_data)\n\n print(df_list)\n # When concatenating, the arrays are filled with NaNs if the index is not available.\n # Since the DataFrames were already interpolated there are non \"natural\" NaNs left so\n # dropping all rows with NaNs finds the maximum overlap in indices\n # # Merge load and weather data to one df\n df = pd.concat(df_list, axis = 1)\n\n df.dropna(inplace = True)\n\n if not df.index.equals(pd.date_range(min(df.index),max(df.index),freq = df.index.freq)):\n raise ValueError(\"DateTime index is not continuous\")\n if not df.isnull().values.any():\n print('No missing data \\n')\n df.head()\n\n ## http://blog.davidkaleko.com/feature-engineering-cyclical-features.html\n df['hour_sin'] = np.sin(df.index.hour * (2. * np.pi / 24))\n df['hour_cos'] = np.cos(df.index.hour * (2. * np.pi / 24))\n df['mnth_sin'] = np.sin((df.index.month - 1) * (2. * np.pi / 12))\n df['mnth_cos'] = np.cos((df.index.month - 1) * (2. * np.pi / 12))\n # fetch back the datetime again\n\n # add one-hot encoding for Hour & Month\n hours = pd.get_dummies(df.index.hour, prefix='hour').set_index(df.index) # one-hot encoding of hours\n month = pd.get_dummies(df.index.month, prefix='month').set_index(df.index) # one-hot encoding of month\n weekday = pd.get_dummies(df.index.dayofweek, prefix='weekday').set_index(df.index) # one-hot encoding of month\n df = pd.concat([df, hours, month, weekday], axis=1)\n\n # store new df as csv\n df.head()\n df.to_csv(OUTFILE, sep=';', index=True)\n" ]
[ [ "pandas.concat", "pandas.read_excel", "pandas.to_datetime", "pandas.read_csv", "numpy.cos", "numpy.sin", "pandas.get_dummies" ] ]
robot-perception-group/AutonomousBlimpDRL
[ "a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee", "a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee" ]
[ "RL/rl/rllib_script/test_agent/test_agent.py", "RL/rl/rllib_script/agent/model/tx2_model.py" ]
[ "import os\nimport pickle\n\nimport numpy as np\nimport ray\nimport sys\nimport rl.rllib_script.agent.model.ray_model\nfrom blimp_env.envs import ResidualPlanarNavigateEnv\nfrom ray.rllib.agents import ppo\nfrom ray.tune.logger import pretty_print\n\ncheckpoint_path = os.path.expanduser(\n \"~/catkin_ws/src/AutonomousBlimpDRL/RL/rl/trained_model/PPO_ResidualPlanarNavigateEnv_9d24f_00000_0_2022-02-21_17-09-14/checkpoint_001080/checkpoint-1080\"\n)\n\nauto_start_simulation = True # start simulation\nduration = int(0.5 * 3600 * 10 * 7) + 24193600\n\nnum_workers = 7\n\nreal_experiment = True # no reset\nevaluation_mode = False # fix robotid, don't support multiworker\nonline_training = False # if training during test\n\n\nif float(sys.argv[1]) == 0.0:\n run_pid = True\nelif float(sys.argv[1]) == 1.0:\n run_pid = False\n\nwindspeed = 0.5 * float(sys.argv[2])\nbuoyancy = 0.93 + 0.07 * float(sys.argv[3])\n\nif float(sys.argv[4]) == 0.0:\n traj = \"square\"\nelif float(sys.argv[4]) == 1.0:\n traj = \"coil\"\n\n\ntrigger_dist = 7\ninit_alt = 100\n\n###########################################\n\nENV = ResidualPlanarNavigateEnv\n\nrun_base_dir = os.path.dirname(os.path.dirname(checkpoint_path))\nconfig_path = os.path.join(run_base_dir, \"params.pkl\")\nwith open(config_path, \"rb\") as f:\n config = pickle.load(f)\n\nif run_pid:\n beta = 0.0\n disable_servo = True\nelse:\n beta = 0.5\n disable_servo = False\n\n\nenv_config = config[\"env_config\"]\nenv_config.update(\n {\n \"DBG\": False,\n \"evaluation_mode\": evaluation_mode,\n \"real_experiment\": real_experiment,\n \"seed\": 123,\n \"duration\": duration,\n \"beta\": beta,\n \"success_threshhold\": trigger_dist, # [meters]\n }\n)\nenv_config[\"simulation\"].update(\n {\n \"gui\": False,\n \"auto_start_simulation\": auto_start_simulation,\n \"enable_meshes\": True,\n \"enable_wind\": True,\n \"enable_wind_sampling\": True,\n \"wind_speed\": windspeed,\n \"wind_direction\": (1, 0),\n \"enable_buoyancy_sampling\": True,\n \"buoyancy_range\": [buoyancy, buoyancy],\n \"position\": (0, 0, init_alt),\n }\n)\n\nobs_config = {\n \"noise_stdv\": 0.05,\n}\nif \"observation\" in env_config:\n env_config[\"observation\"].update(obs_config)\nelse:\n env_config[\"observation\"] = obs_config\n\nact_config = {\n \"act_noise_stdv\": 0.5,\n \"disable_servo\": disable_servo,\n}\nif \"action\" in env_config:\n env_config[\"action\"].update(act_config)\nelse:\n env_config[\"action\"] = act_config\n\n\ndef generate_coil(points, radius, speed=5):\n li = []\n nwp_layer = 8\n for i in range(points):\n x = radius * np.sin(i * 2 * np.pi / nwp_layer)\n y = radius * np.cos(i * 2 * np.pi / nwp_layer)\n wp = (x, y, -init_alt - 2 * i, speed)\n li.append(wp)\n return li\n\n\ncoil = generate_coil(8 * 2 - 1, 30)\nsquare = [\n (40, 40, -init_alt, 3),\n (40, -40, -init_alt, 3),\n (-40, -40, -init_alt, 3),\n (-40, 40, -init_alt, 3),\n]\n\nif traj == \"coil\":\n wp_list = coil\nelif traj == \"square\":\n wp_list = square\ntarget_config = {\n \"type\": \"MultiGoal\",\n \"target_name_space\": \"goal_\",\n \"trigger_dist\": trigger_dist,\n \"wp_list\": wp_list,\n \"enable_random_goal\": False,\n}\nif \"target\" in env_config:\n env_config[\"target\"].update(target_config)\nelse:\n env_config[\"target\"] = target_config\n\n\nif online_training:\n config.update(\n {\n \"create_env_on_driver\": False,\n \"num_workers\": num_workers,\n \"num_gpus\": 1,\n \"explore\": False,\n \"env_config\": env_config,\n \"horizon\": 400,\n \"rollout_fragment_length\": 400,\n \"train_batch_size\": 5600,\n \"sgd_minibatch_size\": 512,\n \"lr\": 5e-4,\n \"lr_schedule\": None,\n \"num_sgd_iter\": 16,\n }\n )\nelse:\n config.update(\n {\n \"create_env_on_driver\": False,\n \"num_workers\": num_workers,\n \"num_gpus\": 1,\n \"explore\": False,\n \"env_config\": env_config,\n \"horizon\": 400,\n \"rollout_fragment_length\": 400,\n \"train_batch_size\": 5600,\n \"sgd_minibatch_size\": 512,\n \"lr\": 0,\n \"lr_schedule\": None,\n \"num_sgd_iter\": 0,\n }\n )\n\nprint(config)\nray.shutdown()\nray.init()\nagent = ppo.PPOTrainer(config=config, env=ENV)\nagent.restore(checkpoint_path)\nfor _ in range(int(duration)):\n result = agent.train()\n print(pretty_print(result))\n if result[\"timesteps_total\"] >= duration:\n break\nprint(\"done\")\n", "\"\"\"\nBecause RLlib does not support TX2, this code is basically ray model object with some custom model code.\nMost functionalities are not supported. \n\"\"\"\n\nfrom typing import Any, Dict, List, Union\n\nimport gym\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport tree # pip install dm_tree\nfrom gym.spaces import Discrete, MultiDiscrete\nfrom rl.rllib_script.agent.model.misc import (ModelV2, SlimFC, ViewRequirement,\n get_base_struct_from_space)\nfrom rl.rllib_script.agent.model.misc import \\\n normc_initializer as torch_normc_initializer\nfrom rl.rllib_script.agent.model.misc import one_hot\nfrom rl.rllib_script.agent.model.sample_batch import SampleBatch\n\nTensorType = Any\nModelConfigDict = dict\n\n\n\nclass TorchModelV2(ModelV2):\n \"\"\"Torch version of ModelV2.\n\n Note that this class by itself is not a valid model unless you\n inherit from nn.Module and implement forward() in a subclass.\"\"\"\n\n def __init__(self, obs_space: gym.spaces.Space,\n action_space: gym.spaces.Space, num_outputs: int,\n model_config: ModelConfigDict, name: str):\n \"\"\"Initialize a TorchModelV2.\n\n Here is an example implementation for a subclass\n ``MyModelClass(TorchModelV2, nn.Module)``::\n\n def __init__(self, *args, **kwargs):\n TorchModelV2.__init__(self, *args, **kwargs)\n nn.Module.__init__(self)\n self._hidden_layers = nn.Sequential(...)\n self._logits = ...\n self._value_branch = ...\n \"\"\"\n\n if not isinstance(self, nn.Module):\n raise ValueError(\n \"Subclasses of TorchModelV2 must also inherit from \"\n \"nn.Module, e.g., MyModel(TorchModelV2, nn.Module)\")\n\n ModelV2.__init__(\n self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n framework=\"torch\")\n\n def variables(self, as_dict: bool = False) -> \\\n Union[List[TensorType], Dict[str, TensorType]]:\n p = list(self.parameters())\n if as_dict:\n return {k: p[i] for i, k in enumerate(self.state_dict().keys())}\n return p\n\n def trainable_variables(self, as_dict: bool = False) -> \\\n Union[List[TensorType], Dict[str, TensorType]]:\n if as_dict:\n return {\n k: v\n for k, v in self.variables(as_dict=True).items()\n if v.requires_grad\n }\n return [v for v in self.variables() if v.requires_grad]\n\ndef add_time_dimension(padded_inputs: TensorType,\n *,\n max_seq_len: int,\n framework: str = \"tf\",\n time_major: bool = False):\n \"\"\"Adds a time dimension to padded inputs.\n\n Args:\n padded_inputs (TensorType): a padded batch of sequences. That is,\n for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where\n A, B, C are sequence elements and * denotes padding.\n max_seq_len (int): The max. sequence length in padded_inputs.\n framework (str): The framework string (\"tf2\", \"tf\", \"tfe\", \"torch\").\n time_major (bool): Whether data should be returned in time-major (TxB)\n format or not (BxT).\n\n Returns:\n TensorType: Reshaped tensor of shape [B, T, ...] or [T, B, ...].\n \"\"\"\n\n # Sequence lengths have to be specified for LSTM batch inputs. The\n # input batch must be padded to the max seq length given here. That is,\n # batch_size == len(seq_lens) * max(seq_lens)\n if framework in [\"tf2\", \"tf\", \"tfe\"]:\n assert time_major is False, \"time-major not supported yet for tf!\"\n padded_batch_size = tf.shape(padded_inputs)[0]\n # Dynamically reshape the padded batch to introduce a time dimension.\n new_batch_size = padded_batch_size // max_seq_len\n new_shape = (\n [new_batch_size, max_seq_len] + list(padded_inputs.shape[1:]))\n return tf.reshape(padded_inputs, new_shape)\n else:\n assert framework == \"torch\", \"`framework` must be either tf or torch!\"\n padded_batch_size = padded_inputs.shape[0]\n\n # Dynamically reshape the padded batch to introduce a time dimension.\n new_batch_size = padded_batch_size // max_seq_len\n if time_major:\n new_shape = (max_seq_len, new_batch_size) + padded_inputs.shape[1:]\n else:\n new_shape = (new_batch_size, max_seq_len) + padded_inputs.shape[1:]\n return torch.reshape(padded_inputs, new_shape)\n\nclass TorchRNN(TorchModelV2):\n \"\"\"Helper class to simplify implementing RNN models with TorchModelV2.\n\n Instead of implementing forward(), you can implement forward_rnn() which\n takes batches with the time dimension added already.\n\n Here is an example implementation for a subclass\n ``MyRNNClass(RecurrentNetwork, nn.Module)``::\n\n def __init__(self, obs_space, num_outputs):\n nn.Module.__init__(self)\n super().__init__(obs_space, action_space, num_outputs,\n model_config, name)\n self.obs_size = _get_size(obs_space)\n self.rnn_hidden_dim = model_config[\"lstm_cell_size\"]\n self.fc1 = nn.Linear(self.obs_size, self.rnn_hidden_dim)\n self.rnn = nn.GRUCell(self.rnn_hidden_dim, self.rnn_hidden_dim)\n self.fc2 = nn.Linear(self.rnn_hidden_dim, num_outputs)\n\n self.value_branch = nn.Linear(self.rnn_hidden_dim, 1)\n self._cur_value = None\n\n @override(ModelV2)\n def get_initial_state(self):\n # Place hidden states on same device as model.\n h = [self.fc1.weight.new(\n 1, self.rnn_hidden_dim).zero_().squeeze(0)]\n return h\n\n @override(ModelV2)\n def value_function(self):\n assert self._cur_value is not None, \"must call forward() first\"\n return self._cur_value\n\n @override(RecurrentNetwork)\n def forward_rnn(self, input_dict, state, seq_lens):\n x = nn.functional.relu(self.fc1(input_dict[\"obs_flat\"].float()))\n h_in = state[0].reshape(-1, self.rnn_hidden_dim)\n h = self.rnn(x, h_in)\n q = self.fc2(h)\n self._cur_value = self.value_branch(h).squeeze(1)\n return q, [h]\n \"\"\"\n\n def forward(self, input_dict: Dict[str, TensorType],\n state: List[TensorType],\n seq_lens: TensorType) -> (TensorType, List[TensorType]):\n \"\"\"Adds time dimension to batch before sending inputs to forward_rnn().\n\n You should implement forward_rnn() in your subclass.\"\"\"\n flat_inputs = input_dict[\"obs_flat\"].float()\n if isinstance(seq_lens, np.ndarray):\n seq_lens = torch.Tensor(seq_lens).int()\n max_seq_len = flat_inputs.shape[0] // seq_lens.shape[0]\n self.time_major = self.model_config.get(\"_time_major\", False)\n inputs = add_time_dimension(\n flat_inputs,\n max_seq_len=max_seq_len,\n framework=\"torch\",\n time_major=self.time_major,\n )\n output, new_state = self.forward_rnn(inputs, state, seq_lens)\n output = torch.reshape(output, [-1, self.num_outputs])\n return output, new_state\n\n def forward_rnn(self, inputs: TensorType, state: List[TensorType],\n seq_lens: TensorType) -> (TensorType, List[TensorType]):\n \"\"\"Call the model with the given input tensors and state.\n\n Args:\n inputs (dict): Observation tensor with shape [B, T, obs_size].\n state (list): List of state tensors, each with shape [B, size].\n seq_lens (Tensor): 1D tensor holding input sequence lengths.\n Note: len(seq_lens) == B.\n\n Returns:\n (outputs, new_state): The model output tensor of shape\n [B, T, num_outputs] and the list of new state tensors each with\n shape [B, size].\n\n Examples:\n def forward_rnn(self, inputs, state, seq_lens):\n model_out, h, c = self.rnn_model([inputs, seq_lens] + state)\n return model_out, [h, c]\n \"\"\"\n raise NotImplementedError(\"You must implement this for an RNN model\")\n\n\n\ndef _create_bn_layers(\n input_layer_size,\n out_size,\n sizes=[64, 64],\n output_init_weights=1e-2,\n activation_fn=nn.Tanh,\n):\n layers = []\n prev_layer_size = input_layer_size\n for size in sizes:\n layers.append(\n SlimFC(\n in_size=prev_layer_size,\n out_size=size,\n initializer=torch_normc_initializer(1.0),\n activation_fn=activation_fn,\n )\n )\n prev_layer_size = size\n layers.append(nn.LayerNorm(prev_layer_size))\n\n _hidden_layers = nn.Sequential(*layers)\n _hidden_out = None\n _branch = SlimFC(\n in_size=prev_layer_size,\n out_size=out_size,\n initializer=torch_normc_initializer(output_init_weights),\n activation_fn=None,\n )\n return _hidden_layers, _hidden_out, _branch\n\nclass TorchBatchNormModel(TorchModelV2, nn.Module):\n \"\"\"Example of a TorchModelV2 using batch normalization.\n\n modified from\n https://github.com/ray-project/ray/blob/90fd38c64ac282df63c2a7fbccf66a46217991a4/rllib/examples/models/batch_norm_model.py#L155\n \"\"\"\n\n capture_index = 0\n\n def __init__(\n self, obs_space, action_space, num_outputs, model_config, name, **kwargs\n ):\n TorchModelV2.__init__(\n self, obs_space, action_space, num_outputs, model_config, name\n )\n nn.Module.__init__(self)\n input_layer_size = int(np.product(obs_space.shape))\n self._logits = None\n\n actor_sizes = model_config[\"custom_model_config\"].get(\"actor_sizes\", [64, 64])\n critic_sizes = model_config[\"custom_model_config\"].get(\n \"critic_sizes\", [128, 128]\n )\n\n (self._hidden_layers, self._hidden_out, self._logits) = _create_bn_layers(\n input_layer_size=input_layer_size,\n out_size=self.num_outputs,\n output_init_weights=1e-12,\n sizes=actor_sizes,\n )\n (\n self._hidden_layers_v,\n self._hidden_out_v,\n self._value_branch,\n ) = _create_bn_layers(\n input_layer_size=input_layer_size,\n out_size=1,\n sizes=critic_sizes,\n )\n\n def forward(self, input_dict, state, seq_lens):\n # Set the correct train-mode for our hidden module (only important\n # b/c we have some batch-norm layers).\n self._hidden_layers.train(mode=bool(input_dict.get(\"is_training\", False)))\n self._hidden_layers_v.train(mode=bool(input_dict.get(\"is_training\", False)))\n\n self._hidden_out = self._hidden_layers(input_dict[\"obs\"])\n self._hidden_out_v = self._hidden_layers_v(input_dict[\"obs\"])\n\n logits = self._logits(self._hidden_out)\n return logits, []\n\n \n def value_function(self):\n assert self._hidden_out_v is not None, \"must call forward first!\"\n return torch.reshape(self._value_branch(self._hidden_out_v), [-1])\n\n\nclass TorchBatchNormRNNModel(TorchRNN, nn.Module):\n \"\"\"modified from\n https://github.com/ray-project/ray/blob/master/rllib/examples/models/rnn_model.py\n https://github.com/ray-project/ray/blob/master/rllib/models/torch/recurrent_net.py\n \"\"\"\n\n def __init__(\n self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n ):\n nn.Module.__init__(self)\n super().__init__(obs_space, action_space, num_outputs, model_config, name)\n\n input_layer_size = int(np.product(obs_space.shape))\n hidden_sizes = model_config[\"custom_model_config\"].get(\"hidden_sizes\", [64, 64])\n self.cell_size = model_config[\"custom_model_config\"].get(\"lstm_cell_size\", 64)\n\n self.time_major = model_config.get(\"_time_major\", False)\n self.use_prev_action = model_config[\"custom_model_config\"].get(\n \"lstm_use_prev_action\", True\n )\n\n self.use_prev_reward = model_config[\"custom_model_config\"].get(\n \"lstm_use_prev_reward\", True\n )\n\n # self.action_space_struct = get_base_struct_from_space(self.action_space)\n self.action_space_struct = get_base_struct_from_space(action_space)\n self.action_dim = 0\n\n for space in tree.flatten(self.action_space_struct):\n if isinstance(space, Discrete):\n self.action_dim += space.n\n elif isinstance(space, MultiDiscrete):\n self.action_dim += np.sum(space.nvec)\n elif space.shape is not None:\n self.action_dim += int(np.product(space.shape))\n else:\n self.action_dim += int(len(space))\n\n # Add prev-action/reward nodes to input to LSTM.\n lstm_input_size = hidden_sizes[-1]\n if self.use_prev_action:\n lstm_input_size += self.action_dim\n if self.use_prev_reward:\n lstm_input_size += 1\n\n self.hidden, _, _ = _create_bn_layers(\n input_layer_size=input_layer_size,\n out_size=hidden_sizes[-1],\n output_init_weights=1e-12,\n sizes=hidden_sizes,\n )\n self.lstm = nn.LSTM(\n lstm_input_size, self.cell_size, batch_first=not self.time_major\n )\n self._logits_branch = SlimFC(\n in_size=self.cell_size,\n out_size=self.num_outputs,\n activation_fn=None,\n initializer=torch_normc_initializer(1e-12),\n )\n self._value_branch = SlimFC(\n in_size=self.cell_size,\n out_size=1,\n activation_fn=None,\n initializer=torch.nn.init.xavier_uniform_,\n )\n\n # __sphinx_doc_begin__\n # Add prev-a/r to this model's view, if required.\n if model_config[\"custom_model_config\"][\"lstm_use_prev_action\"]:\n self.view_requirements[SampleBatch.PREV_ACTIONS] = ViewRequirement(\n SampleBatch.ACTIONS, space=self.action_space, shift=-1\n )\n if model_config[\"custom_model_config\"][\"lstm_use_prev_reward\"]:\n self.view_requirements[SampleBatch.PREV_REWARDS] = ViewRequirement(\n SampleBatch.REWARDS, shift=-1\n )\n # __sphinx_doc_end__\n\n self._features = None\n\n def forward(\n self,\n input_dict: Dict[str, TensorType],\n state: List[TensorType],\n seq_lens: TensorType,\n ):\n assert seq_lens is not None\n wrapped_out, _ = self.forward_hidden(input_dict, [], None)\n\n prev_a_r = []\n # Prev actions.\n if self.model_config[\"custom_model_config\"][\"lstm_use_prev_action\"]:\n try:\n prev_a = input_dict[SampleBatch.PREV_ACTIONS]\n except KeyError:\n print(\n \"[ Warning ] lstm detect keyerror, prev_actions not in the batch key\"\n )\n prev_a = torch.zeros(self.action_dim)\n\n if isinstance(self.action_space, (Discrete, MultiDiscrete)):\n prev_a = one_hot(prev_a.float(), self.action_space)\n else:\n prev_a = prev_a.float()\n prev_a_r.append(torch.reshape(prev_a, [-1, self.action_dim]))\n\n # Prev rewards.\n if self.model_config[\"custom_model_config\"][\"lstm_use_prev_reward\"]:\n try:\n prev_r = input_dict[SampleBatch.PREV_REWARDS].float()\n except KeyError:\n print(\n \"[ Warning ] lstm detect keyerror, prev_rewards not in the batch key\"\n )\n prev_r = torch.zeros(1)\n\n prev_a_r.append(torch.reshape(prev_r, [-1, 1]))\n\n # Concat prev. actions + rewards to the \"main\" input.\n if prev_a_r:\n wrapped_out = torch.cat([wrapped_out] + prev_a_r, dim=1)\n\n # Push everything through our LSTM.\n input_dict[\"obs_flat\"] = wrapped_out\n return super().forward(input_dict, state, seq_lens)\n\n \n def get_initial_state(self):\n # TODO: (sven): Get rid of `get_initial_state` once Trajectory\n # View API is supported across all of RLlib.\n # Place hidden states on same device as model.\n linear = next(self._logits_branch._model.children())\n h = [\n linear.weight.new(1, self.cell_size).zero_().squeeze(0),\n linear.weight.new(1, self.cell_size).zero_().squeeze(0),\n ]\n return h\n\n \n def value_function(self):\n assert self._features is not None, \"must call forward() first\"\n return torch.reshape(self._value_branch(self._features), [-1])\n\n def forward_hidden(self, input_dict, state, seq_lens):\n # Set the correct train-mode for our hidden module (only important\n # b/c we have some batch-norm layers).\n self.hidden.train(mode=bool(input_dict.get(\"is_training\", False)))\n\n hidden_out = self.hidden(input_dict[\"obs\"])\n\n return hidden_out, []\n\n def forward_rnn(self, inputs, state, seq_lens):\n \"\"\"Feeds `inputs` (B x T x ..) through the Gru Unit.\n Returns the resulting outputs as a sequence (B x T x ...).\n Values are stored in self._cur_value in simple (B) shape (where B\n contains both the B and T dims!).\n Returns:\n NN Outputs (B x T x ...) as sequence.\n The state batches as a List of two items (c- and h-states).\n \"\"\"\n self._features, [h, c] = self.lstm(\n inputs, [torch.unsqueeze(state[0], 0), torch.unsqueeze(state[1], 0)]\n )\n logits = self._logits_branch(self._features)\n return logits, [torch.squeeze(h, 0), torch.squeeze(c, 0)]\n\n" ]
[ [ "numpy.cos", "numpy.sin" ], [ "torch.nn.Sequential", "numpy.product", "torch.nn.Module.__init__", "torch.nn.LSTM", "torch.cat", "torch.Tensor", "torch.reshape", "torch.zeros", "torch.unsqueeze", "torch.nn.LayerNorm", "numpy.sum", "torch.squeeze" ] ]
MilanSusa/Skin-Cancer-Detection-Inference-API
[ "f4a62982ee6dfb3e2d56bdfc65fcc885aab69935" ]
[ "app.py" ]
[ "import os\nimport shutil\n\nfrom flask import Flask, request, jsonify\nfrom werkzeug.utils import secure_filename\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.metrics import top_k_categorical_accuracy\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom keras.applications.mobilenet import preprocess_input\n\nFOLDER_REL_DIR = 'static' + os.path.sep + 'images' + os.path.sep\nFOLDER_ABS_DIR = os.path.join(os.getcwd(), FOLDER_REL_DIR)\n\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = FOLDER_ABS_DIR\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\n\ndef top_3_accuracy(y_true, y_pred):\n return top_k_categorical_accuracy(y_true, y_pred, k=3)\n\n\ndef top_2_accuracy(y_true, y_pred):\n return top_k_categorical_accuracy(y_true, y_pred, k=2)\n\n\nMODEL = load_model('./pretrained_models/mobilenet.h5',\n custom_objects={\n 'top_2_accuracy': top_2_accuracy,\n 'top_3_accuracy': top_3_accuracy\n })\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef remove_image():\n for filename in os.listdir(FOLDER_ABS_DIR):\n file_path = os.path.join(FOLDER_ABS_DIR, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(f'Failed to delete {file_path}: {e}')\n\n\[email protected]('/api/v1/inference', methods=['POST'])\ndef perform_inference():\n file = request.files['file']\n\n if file and allowed_file(file.filename):\n remove_image()\n\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n test_img_gen = ImageDataGenerator(preprocessing_function=preprocess_input)\n test_data_gen = test_img_gen.flow_from_directory(directory='static',\n target_size=(224, 224),\n color_mode='rgb')\n\n preds = MODEL.predict_generator(generator=test_data_gen,\n steps=1)\n remove_image()\n data = {\n \"akiec\": str(preds[0][0]),\n \"bcc\": str(preds[0][1]),\n \"bkl\": str(preds[0][2]),\n \"df\": str(preds[0][3]),\n \"mel\": str(preds[0][4]),\n \"nv\": str(preds[0][5]),\n \"vasc\": str(preds[0][6])\n }\n\n return jsonify({\"data\": data})\n\n\nif __name__ == '__main__':\n app.run()\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.keras.metrics.top_k_categorical_accuracy" ] ]
alisure-fork/BASNet
[ "0cc349a3190d92a2fe991107f711abdcce3531ec", "0cc349a3190d92a2fe991107f711abdcce3531ec", "0cc349a3190d92a2fe991107f711abdcce3531ec" ]
[ "src/MyThink_MIC5_Decoder8.py", "src/MyTrain_MIC5_Decoder5_MSE1.py", "src/MyTrain_MIC5_Decoder8.py" ]
[ "import os\nimport glob\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom skimage import io\nfrom alisuretool.Tools import Tools\nfrom torch.utils.data import DataLoader\nfrom src.MyTrain_MIC5_Decoder8 import BASNet, DatasetUSOD\n\n\ndef one_decoder():\n # --------- 1. get path ---------\n has_mask = True\n more_obj = False\n # model_dir = './saved_models/my_train_mic5_decoder8_aug_mask_norm_5bce_d2/120_train_3.043.pth'\n # prediction_dir = Tools.new_dir('./test_data/my_train_mic5_decoder8_aug_mask_norm_5bce_d2_120_image_decoder')\n model_dir = './saved_models/my_train_mic5_decoder8_aug_mask_norm_5bce_d3/115_train_3.046.pth'\n prediction_dir = Tools.new_dir('./test_data/my_train_mic5_decoder8_aug_mask_norm_5bce_d3_115_image_decoder')\n\n # --------- 2. data loader ---------\n image_dir = '/mnt/4T/Data/SOD/DUTS/DUTS-TR/DUTS-TR-Image/'\n img_name_list = glob.glob(image_dir + '*.jpg')\n test_dataset = DatasetUSOD(img_name_list=img_name_list, is_train=False)\n test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=8)\n\n # --------- 3. model define ---------\n Tools.print(\"...load BASNet...\")\n net = BASNet(3, clustering_num_list=[128, 256, 512], pretrained=False, has_mask=has_mask, more_obj=more_obj)\n if torch.cuda.is_available():\n net.cuda()\n net.load_state_dict(torch.load(model_dir), strict=False)\n\n # --------- 4. inference for each image ---------\n net.eval()\n for i_test, (inputs_test, _) in enumerate(test_dataloader):\n Tools.print(\"inference: {} {}\".format(i_test, img_name_list[i_test]))\n inputs_test = inputs_test.type(torch.FloatTensor).cuda()\n\n return_m, return_d = net(inputs_test)\n\n top_k_value, top_k_index = torch.topk(return_m[\"m1\"][\"smc_logits\"], 1, 1)\n smc_result = top_k_index.cpu().detach().numpy()[0][0]\n\n img_name = img_name_list[i_test]\n result_path = os.path.join(prediction_dir, str(smc_result))\n result_path = Tools.new_dir(result_path)\n\n # 1\n result_name = os.path.join(result_path, os.path.split(img_name)[1])\n im_data = io.imread(img_name)\n io.imsave(result_name, im_data)\n\n # 2\n cam1 = return_d[\"label\"][\"cam_norm_1_up\"].squeeze().cpu().data.numpy()\n cam2 = return_d[\"label\"][\"cam_norm_2_up\"].squeeze().cpu().data.numpy()\n cam3 = return_d[\"label\"][\"cam_norm_3_up\"].squeeze().cpu().data.numpy()\n\n im1 = Image.fromarray(cam1 * 255).convert('RGB')\n im2 = Image.fromarray(cam2 * 255).convert('RGB')\n im3 = Image.fromarray(cam3 * 255).convert('RGB')\n\n imo1 = im1.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n imo2 = im2.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n imo3 = im3.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n\n imo1.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], 1, smc_result)))\n imo2.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], 2, smc_result)))\n imo3.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], 3, smc_result)))\n\n # 3\n camf = return_d[\"label\"][\"cam_norm_up\"].squeeze().cpu().data.numpy()\n imf = Image.fromarray(camf * 255).convert('RGB')\n imof = imf.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n imof.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], \"f\", smc_result)))\n\n # 4\n label = return_d[\"label\"][\"label\"].squeeze().cpu().data.numpy()\n im_label = Image.fromarray((np.asarray(label, dtype=np.uint8) + 1) * 127).convert('RGB')\n imo_label = im_label.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n imo_label.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], \"l\", smc_result)))\n\n # 5\n for key in [\"d1\", \"d2\", \"d3\"]:\n d_out_up_sigmoid = return_d[key][\"out_up_sigmoid\"].squeeze().cpu().data.numpy()\n im_d_out_up_sigmoid = Image.fromarray(d_out_up_sigmoid * 255).convert('RGB')\n imo_d_out_up_sigmoid = im_d_out_up_sigmoid.resize((im_data.shape[1], im_data.shape[0]),\n resample=Image.BILINEAR)\n imo_d_out_up_sigmoid.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], key, smc_result)))\n pass\n\n pass\n\n pass\n\n\nif __name__ == '__main__':\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n one_decoder()\n pass\n\n", "import os\nimport glob\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom PIL import Image\nimport torch.optim as optim\nfrom torchvision import models\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom alisuretool.Tools import Tools\nfrom torch.utils.data import DataLoader, Dataset\n\n\n#######################################################################################################################\n# 1 Data\n\nclass DatasetUSOD(Dataset):\n\n def __init__(self, img_name_list, is_train=True):\n # self.image_name_list = img_name_list[:20]\n # self.label_name_list = lbl_name_list[:20]\n self.image_name_list = img_name_list\n\n self.is_train = is_train\n self.transform_train = transforms.Compose([\n transforms.RandomResizedCrop(size=224, scale=(0.3, 1.)),\n transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n self.transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n pass\n\n def __len__(self):\n return len(self.image_name_list)\n\n def __getitem__(self, idx):\n image = Image.open(self.image_name_list[idx]).convert(\"RGB\")\n image = self.transform_train(image) if self.is_train else self.transform_test(image)\n return image, idx\n\n pass\n\n\n#######################################################################################################################\n# 2 Model\n\n\nclass ResBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(ResBlock, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n pass\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n pass\n\n\nclass ConvBlock(nn.Module):\n\n def __init__(self, cin, cout, stride=1, has_relu=True):\n super(ConvBlock, self).__init__()\n self.has_relu = has_relu\n\n self.conv = nn.Conv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn = nn.BatchNorm2d(cout)\n self.relu = nn.ReLU(inplace=True)\n pass\n\n def forward(self, x):\n out = self.conv(x)\n out = self.bn(out)\n if self.has_relu:\n out = self.relu(out)\n return out\n\n pass\n\n\nclass MICNormalize(nn.Module):\n\n def __init__(self, power=2):\n super(MICNormalize, self).__init__()\n self.power = power\n pass\n\n def forward(self, x, dim=1):\n norm = x.pow(self.power).sum(dim, keepdim=True).pow(1. / self.power)\n out = x.div(norm)\n return out\n\n pass\n\n\nclass MICProduceClass(object):\n\n def __init__(self, n_sample, out_dim, ratio=1.0):\n super().__init__()\n self.out_dim = out_dim\n self.n_sample = n_sample\n self.class_per_num = self.n_sample // self.out_dim * ratio\n self.count = 0\n self.count_2 = 0\n self.class_num = np.zeros(shape=(self.out_dim, ), dtype=np.int)\n self.classes = np.zeros(shape=(self.n_sample, ), dtype=np.int)\n pass\n\n def reset(self):\n self.count = 0\n self.count_2 = 0\n self.class_num *= 0\n pass\n\n def cal_label(self, out, indexes):\n top_k = out.data.topk(self.out_dim, dim=1)[1].cpu()\n indexes_cpu = indexes.cpu()\n\n batch_size = top_k.size(0)\n class_labels = np.zeros(shape=(batch_size,), dtype=np.int)\n\n for i in range(batch_size):\n for j_index, j in enumerate(top_k[i]):\n if self.class_per_num > self.class_num[j]:\n class_labels[i] = j\n self.class_num[j] += 1\n self.count += 1 if self.classes[indexes_cpu[i]] != j else 0\n self.classes[indexes_cpu[i]] = j\n self.count_2 += 1 if j_index != 0 else 0\n break\n pass\n pass\n pass\n\n def get_label(self, indexes):\n return torch.tensor(self.classes[indexes.cpu().numpy()]).long()\n\n pass\n\n\nclass BASNet(nn.Module):\n\n def __init__(self, n_channels, clustering_num_list=None, pretrained=True, has_mask=True):\n super(BASNet, self).__init__()\n self.has_mask = has_mask # 28\n resnet = models.resnet18(pretrained=pretrained)\n\n # -------------Encoder--------------\n self.encoder0 = ConvBlock(n_channels, 64, has_relu=True)\n self.encoder1 = resnet.layer1 # 224\n self.encoder2 = resnet.layer2 # 112\n self.encoder3 = resnet.layer3 # 56\n self.encoder4 = resnet.layer4 # 28\n\n # -------------MIC-------------\n self.clustering_num_list = list([128, 256, 512]) if clustering_num_list is None else clustering_num_list\n\n # MIC 1\n self.mic_1_b1 = ResBlock(512, 512) # 28\n self.mic_1_b2 = ResBlock(512, 512)\n self.mic_1_b3 = ResBlock(512, 512)\n self.mic_1_c1 = ConvBlock(512, self.clustering_num_list[0], has_relu=True)\n self.mic_1_l2norm = MICNormalize(2)\n\n # MIC 2\n self.mic_2_pool = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.mic_2_b1 = ResBlock(512, 512) # 14\n self.mic_2_b2 = ResBlock(512, 512)\n self.mic_2_b3 = ResBlock(512, 512)\n self.mic_2_c1 = ConvBlock(512, self.clustering_num_list[1], has_relu=True)\n self.mic_2_l2norm = MICNormalize(2)\n\n # MIC 3\n self.mic_3_pool = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.mic_3_b1 = ResBlock(512, 512) # 7\n self.mic_3_b2 = ResBlock(512, 512)\n self.mic_3_b3 = ResBlock(512, 512)\n self.mic_3_c1 = ConvBlock(512, self.clustering_num_list[2], has_relu=True)\n self.mic_3_l2norm = MICNormalize(2)\n\n # Decoder\n self.decoder_1_b1 = ResBlock(512, 512) # 28\n self.decoder_1_b2 = ResBlock(512, 512)\n self.decoder_1_b3 = ResBlock(512, 512)\n self.decoder_1_out_c = nn.Conv2d(512, 1, 3, padding=1)\n\n # UP\n self.mic_up_2 = nn.Upsample(scale_factor=2, mode='bilinear')\n self.mic_up_4 = nn.Upsample(scale_factor=4, mode='bilinear')\n self.mic_up_8 = nn.Upsample(scale_factor=8, mode='bilinear')\n self.mic_up_16 = nn.Upsample(scale_factor=16, mode='bilinear')\n self.mic_up_32 = nn.Upsample(scale_factor=32, mode='bilinear')\n pass\n\n def forward(self, x):\n # -------------Encoder-------------\n e0 = self.encoder0(x) # 64 * 224 * 224\n e1 = self.encoder1(e0) # 64 * 224 * 224\n e2 = self.encoder2(e1) # 128 * 112 * 112\n e3 = self.encoder3(e2) # 256 * 56 * 56\n e4 = self.encoder4(e3) # 512 * 28 * 28\n\n # -------------MIC-------------\n # 1\n mic_f_1 = self.mic_1_b1(e4)\n mic_f_1 = self.mic_1_b2(mic_f_1)\n mic_f_1 = self.mic_1_b3(mic_f_1)\n\n mic_1 = self.mic_1_c1(mic_f_1) # 512 * 28 * 28\n smc_logits_1, smc_l2norm_1, smc_sigmoid_1 = self.salient_map_clustering(mic_1, which=1, has_mask=self.has_mask)\n cam_1 = self.cluster_activation_map(smc_logits_1, mic_1) # 簇激活图:Cluster Activation Map\n\n return_1 = {\n \"mic_f\": mic_f_1,\n \"mic\": mic_1,\n \"smc_logits\": smc_logits_1,\n \"smc_l2norm\": smc_l2norm_1,\n \"smc_sigmoid\": smc_sigmoid_1,\n \"cam\": cam_1\n }\n\n # 2\n mic_f_2 = self.mic_2_pool(mic_f_1) # 512 * 14 * 14\n mic_f_2 = self.mic_2_b1(mic_f_2)\n mic_f_2 = self.mic_2_b2(mic_f_2)\n mic_f_2 = self.mic_2_b3(mic_f_2)\n\n mic_2 = self.mic_2_c1(mic_f_2) # 512 * 14 * 14\n smc_logits_2, smc_l2norm_2, smc_sigmoid_2 = self.salient_map_clustering(mic_2, which=2, has_mask=self.has_mask)\n cam_2 = self.cluster_activation_map(smc_logits_2, mic_2) # 簇激活图:Cluster Activation Map\n\n return_2 = {\n \"mic_f\": mic_f_2,\n \"mic\": mic_2,\n \"smc_logits\": smc_logits_2,\n \"smc_l2norm\": smc_l2norm_2,\n \"smc_sigmoid\": smc_sigmoid_2,\n \"cam\": cam_2\n }\n\n # 3\n mic_f_3 = self.mic_3_pool(mic_f_2) # 512 * 7 * 7\n mic_f_3 = self.mic_3_b1(mic_f_3)\n mic_f_3 = self.mic_3_b2(mic_f_3)\n mic_f_3 = self.mic_3_b3(mic_f_3)\n\n mic_3 = self.mic_3_c1(mic_f_3) # 512 * 7 * 7\n smc_logits_3, smc_l2norm_3, smc_sigmoid_3 = self.salient_map_clustering(mic_3, which=3, has_mask=self.has_mask)\n cam_3 = self.cluster_activation_map(smc_logits_3, mic_3) # 簇激活图:Cluster Activation Map\n\n return_3 = {\n \"mic_f\": mic_f_3,\n \"mic\": mic_3,\n \"smc_logits\": smc_logits_3,\n \"smc_l2norm\": smc_l2norm_3,\n \"smc_sigmoid\": smc_sigmoid_3,\n \"cam\": cam_3\n }\n\n # -------------Label-------------\n # cam_norm_1_up = self.mic_up_8(cam_1)\n # cam_norm_2_up = self.mic_up_16(cam_2)\n # cam_norm_3_up = self.mic_up_32(cam_3)\n cam_norm_1_up = cam_1\n cam_norm_2_up = self.mic_up_2(cam_2)\n cam_norm_3_up = self.mic_up_4(cam_3)\n if cam_norm_1_up.size()[2] != cam_norm_3_up.size()[2] or cam_norm_1_up.size()[3] != cam_norm_3_up.size()[3]:\n cam_norm_2_up = torch.nn.functional.interpolate(cam_norm_2_up,\n size=[cam_norm_1_up.size()[2], cam_norm_1_up.size()[3]])\n cam_norm_3_up = torch.nn.functional.interpolate(cam_norm_3_up,\n size=[cam_norm_1_up.size()[2], cam_norm_1_up.size()[3]])\n pass\n\n cam_norm_up = (cam_norm_1_up + cam_norm_2_up) / 2\n label = self.salient_map_label(cam_norm_up) # 显著图划分:Salient Map Divide\n\n # -------------Decoder-------------\n d1_1 = self.decoder_1_b1(e4)\n d1_2 = self.decoder_1_b2(d1_1)\n d1_3 = self.decoder_1_b3(d1_2)\n d1_out = self.decoder_1_out_c(d1_3)\n d1_out_up = self.mic_up_8(d1_out) # 1 * 224 * 224\n d1_out_sigmoid = torch.sigmoid(d1_out) # 1 * 28 * 28 # 小输出\n d1_out_up_sigmoid = torch.sigmoid(d1_out_up) # 1 * 224 * 224 # 大输出\n\n return_d = {\n \"cam_norm_1_up\": cam_norm_1_up,\n \"cam_norm_2_up\": cam_norm_2_up,\n \"cam_norm_3_up\": cam_norm_3_up,\n \"cam_norm_up\": cam_norm_up,\n \"label\": label,\n\n \"d1_out\": d1_out,\n \"d1_out_up\": d1_out_up,\n \"d1_out_sigmoid\": d1_out_sigmoid,\n \"d1_out_up_sigmoid\": d1_out_up_sigmoid\n }\n\n return return_1, return_2, return_3, return_d\n\n def salient_map_clustering(self, mic, which=1, has_mask=True):\n # m1\n mic_gaussian = mic\n if has_mask:\n if which == 1:\n g_mask = self._mask_gaussian([mic.size()[2], mic.size()[3]], sigma=mic.size()[2] * mic.size()[3] // 2)\n mic_gaussian = mic * torch.tensor(g_mask).cuda()\n elif which == 2:\n # g_mask = self._mask_gaussian([mic.size()[2], mic.size()[3]], sigma=mic.size()[2] * mic.size()[3])\n # mic_gaussian = mic * torch.tensor(g_mask).cuda()\n mic_gaussian = mic\n else:\n mic_gaussian = mic\n pass\n\n smc_logits = F.adaptive_avg_pool2d(mic_gaussian, 1).view((mic_gaussian.size()[0], -1)) # 512\n\n smc_l2norm = self.mic_1_l2norm(smc_logits)\n smc_sigmoid = torch.sigmoid(smc_logits)\n return smc_logits, smc_l2norm, smc_sigmoid\n\n def cluster_activation_map(self, smc_logits, mic_feature):\n top_k_value, top_k_index = torch.topk(smc_logits, 1, 1)\n cam = torch.cat([mic_feature[i:i+1, top_k_index[i], :, :] for i in range(mic_feature.size()[0])])\n\n cam_norm = self._feature_norm(cam) # 1 * 28 * 28\n return cam_norm\n\n @staticmethod\n def salient_map_label(cam_norm_up):\n label = torch.zeros(tuple(cam_norm_up.size()))\n label = label.cuda() if torch.cuda.is_available() else label\n label.data = cam_norm_up.data\n return label\n\n @staticmethod\n def _feature_norm(feature_map):\n feature_shape = feature_map.size()\n batch_min, _ = torch.min(feature_map.view((feature_shape[0], -1)), dim=-1, keepdim=True)\n batch_max, _ = torch.max(feature_map.view((feature_shape[0], -1)), dim=-1, keepdim=True)\n norm = torch.div(feature_map.view((feature_shape[0], -1)) - batch_min, batch_max - batch_min)\n return norm.view(feature_shape)\n\n @staticmethod\n def _mask_gaussian(image_size, where=None, sigma=20):\n\n x = np.arange(0, image_size[1], 1, float)\n y = np.arange(0, image_size[0], 1, float)\n y = y[:, np.newaxis]\n\n if where:\n x0, y0 = where[1], where[0]\n else:\n x0, y0 = image_size[1] // 2, image_size[0] // 2\n pass\n\n # 生成高斯掩码\n mask = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma).astype(np.float32)\n return mask\n\n pass\n\n\n#######################################################################################################################\n# 3 Runner\n\n\nclass BASRunner(object):\n\n def __init__(self, epoch_num=1000, batch_size_train=8, has_mask=True,\n clustering_num_1=128, clustering_num_2=256, clustering_num_3=512,\n clustering_ratio_1=1, clustering_ratio_2=1.5, clustering_ratio_3=2,\n data_dir='/mnt/4T/Data/SOD/DUTS/DUTS-TR', tra_image_dir='DUTS-TR-Image',\n tra_label_dir='DUTS-TR-Mask', model_dir=\"./saved_models/my_train_mic_only\"):\n self.epoch_num = epoch_num\n self.batch_size_train = batch_size_train\n self.has_mask = has_mask\n\n # Dataset\n self.model_dir = model_dir\n self.data_dir = data_dir\n self.tra_image_dir = tra_image_dir\n self.tra_label_dir = tra_label_dir\n self.tra_img_name_list, tra_lbl_name_list = self.get_tra_img_label_name()\n self.dataset_usod = DatasetUSOD(img_name_list=self.tra_img_name_list, is_train=True)\n self.dataloader_usod = DataLoader(self.dataset_usod, self.batch_size_train, shuffle=True, num_workers=8)\n\n # Model\n self.net = BASNet(3, clustering_num_list=[clustering_num_1, clustering_num_2, clustering_num_3],\n pretrained=True, has_mask=self.has_mask)\n self.net = self.net.cuda() if torch.cuda.is_available() else self.net\n\n # MIC\n self.produce_class_1 = MICProduceClass(n_sample=len(self.dataset_usod),\n out_dim=clustering_num_1, ratio=clustering_ratio_1)\n self.produce_class_2 = MICProduceClass(n_sample=len(self.dataset_usod),\n out_dim=clustering_num_2, ratio=clustering_ratio_2)\n self.produce_class_3 = MICProduceClass(n_sample=len(self.dataset_usod),\n out_dim=clustering_num_3, ratio=clustering_ratio_3)\n\n # Loss and Optim\n self.mse_loss = nn.MSELoss()\n self.mic_loss = nn.CrossEntropyLoss()\n self.mse_loss = self.mse_loss.cuda() if torch.cuda.is_available() else self.mse_loss\n self.mic_loss = self.mic_loss.cuda() if torch.cuda.is_available() else self.mic_loss\n\n self.optimizer = optim.Adam(self.net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n pass\n\n def load_model(self, model_file_name):\n self.net.load_state_dict(torch.load(model_file_name), strict=False)\n Tools.print(\"restore from {}\".format(model_file_name))\n pass\n\n def get_tra_img_label_name(self):\n tra_img_name_list = glob.glob(os.path.join(self.data_dir, self.tra_image_dir, '*.jpg'))\n tra_lbl_name_list = [os.path.join(self.data_dir, self.tra_label_dir, '{}.png'.format(\n os.path.splitext(os.path.basename(img_path))[0])) for img_path in tra_img_name_list]\n Tools.print(\"train images: {}\".format(len(tra_img_name_list)))\n Tools.print(\"train labels: {}\".format(len(tra_lbl_name_list)))\n return tra_img_name_list, tra_lbl_name_list\n\n def all_loss_fusion(self, mic_1_out, mic_2_out, mic_3_out,\n mic_labels_1, mic_labels_2, mic_labels_3, sod_sigmoid, sod_label):\n loss_mic_1 = self.mic_loss(mic_1_out, mic_labels_1)\n loss_mic_2 = self.mic_loss(mic_2_out, mic_labels_2)\n loss_mic_3 = self.mic_loss(mic_3_out, mic_labels_3)\n\n loss_mse = self.mse_loss(sod_sigmoid, sod_label)\n\n loss_all = (loss_mic_1 + loss_mic_2 + loss_mic_3) / 3 + loss_mse\n return loss_all, loss_mic_1, loss_mic_2, loss_mic_3, loss_mse\n\n def train(self, save_epoch_freq=5, print_ite_num=100, update_epoch_freq=1):\n\n for epoch in range(0, self.epoch_num):\n\n ###########################################################################\n # 0 更新标签\n if epoch % update_epoch_freq == 0:\n Tools.print()\n Tools.print(\"Update label {} .......\".format(epoch))\n self.net.eval()\n\n self.produce_class_1.reset()\n self.produce_class_2.reset()\n self.produce_class_3.reset()\n for batch_idx, (inputs, indexes) in enumerate(self.dataloader_usod):\n inputs = inputs.type(torch.FloatTensor)\n inputs = inputs.cuda() if torch.cuda.is_available() else inputs\n indexes = indexes.cuda() if torch.cuda.is_available() else indexes\n\n return_1, return_2, return_3, return_d = self.net(inputs)\n\n self.produce_class_1.cal_label(return_1[\"smc_l2norm\"], indexes)\n self.produce_class_2.cal_label(return_2[\"smc_l2norm\"], indexes)\n self.produce_class_3.cal_label(return_3[\"smc_l2norm\"], indexes)\n pass\n\n Tools.print(\"Epoch: [{}] {}/{} {}/{} {}/{}\".format(\n epoch, self.produce_class_1.count, self.produce_class_1.count_2, self.produce_class_2.count,\n self.produce_class_2.count_2, self.produce_class_3.count, self.produce_class_3.count_2))\n Tools.print()\n pass\n\n ###########################################################################\n # 1 训练模型\n all_loss, all_loss_mic_1, all_loss_mic_2, all_loss_mic_3, all_loss_mse = 0.0, 0.0, 0.0, 0.0, 0.0\n self.net.train()\n for i, (inputs, indexes) in enumerate(self.dataloader_usod):\n inputs = inputs.type(torch.FloatTensor)\n inputs = inputs.cuda() if torch.cuda.is_available() else inputs\n indexes = indexes.cuda() if torch.cuda.is_available() else indexes\n self.optimizer.zero_grad()\n\n return_1, return_2, return_3, return_d = self.net(inputs)\n\n mic_labels_1 = self.produce_class_1.get_label(indexes)\n mic_labels_1 = mic_labels_1.cuda() if torch.cuda.is_available() else mic_labels_1\n mic_labels_2 = self.produce_class_2.get_label(indexes)\n mic_labels_2 = mic_labels_2.cuda() if torch.cuda.is_available() else mic_labels_2\n mic_labels_3 = self.produce_class_3.get_label(indexes)\n mic_labels_3 = mic_labels_3.cuda() if torch.cuda.is_available() else mic_labels_3\n\n loss, loss_mic_1, loss_mic_2, loss_mic_3, loss_mse = self.all_loss_fusion(\n return_1[\"smc_logits\"], return_2[\"smc_logits\"], return_3[\"smc_logits\"],\n mic_labels_1, mic_labels_2, mic_labels_3, return_d[\"d1_out_sigmoid\"], return_d[\"label\"])\n loss.backward()\n self.optimizer.step()\n\n all_loss += loss.item()\n all_loss_mic_1 += loss_mic_1.item()\n all_loss_mic_2 += loss_mic_2.item()\n all_loss_mic_3 += loss_mic_3.item()\n all_loss_mse += loss_mse.item()\n if i % print_ite_num == 0:\n Tools.print(\"[E:{:4d}/{:4d}, b:{:4d}/{:4d}] \"\n \"a loss:{:.2f} loss:{:.2f} \"\n \"a mic 1:{:.2f} mic 1:{:.2f} \"\n \"a mic 2:{:.2f} mic 2:{:.2f} \"\n \"a mic 3:{:.2f} mic 3:{:.2f} \"\n \"a mse:{:.2f} mse:{:.2f}\".format(\n epoch, self.epoch_num, i, len(self.dataloader_usod),\n all_loss/(i+1), loss.item(),\n all_loss_mic_1/(i+1), loss_mic_1.item(),\n all_loss_mic_2/(i+1), loss_mic_2.item(),\n all_loss_mic_3/(i+1), loss_mic_3.item(),\n all_loss_mse/(i+1), loss_mse.item()))\n pass\n\n pass\n\n ###########################################################################\n # 2 保存模型\n if epoch % save_epoch_freq == 0:\n save_file_name = Tools.new_dir(os.path.join(\n self.model_dir, \"{}_train_{:.3f}.pth\".format(epoch, all_loss / len(self.dataloader_usod))))\n torch.save(self.net.state_dict(), save_file_name)\n\n Tools.print()\n Tools.print(\"Save Model to {}\".format(save_file_name))\n Tools.print()\n pass\n\n pass\n\n pass\n\n pass\n\n\n#######################################################################################################################\n# 4 Main\n\n\nif __name__ == '__main__':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n # bas_runner = BASRunner(batch_size_train=2, data_dir='D:\\\\data\\\\SOD\\\\DUTS\\\\DUTS-TR')\n # bas_runner = BASRunner(batch_size_train=12, model_dir=\"./saved_models/my_mic_123_mask\")\n\n bas_runner = BASRunner(batch_size_train=10, has_mask=True,\n model_dir=\"./saved_models/my_train_mic5_decoder5_mse1_small\")\n bas_runner.load_model('./saved_models/my_train5_diff_aug_mask/125_train_6.569.pth')\n\n bas_runner.train()\n pass\n", "import os\nimport glob\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom PIL import Image\nimport torch.optim as optim\nfrom torchvision import models\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom alisuretool.Tools import Tools\nfrom torch.utils.data import DataLoader, Dataset\n\n\n#######################################################################################################################\n# 1 Data\n\nclass DatasetUSOD(Dataset):\n\n def __init__(self, img_name_list, is_train=True):\n # self.image_name_list = img_name_list[:20]\n # self.label_name_list = lbl_name_list[:20]\n self.image_name_list = img_name_list\n\n self.is_train = is_train\n self.transform_train = transforms.Compose([\n transforms.RandomResizedCrop(size=224, scale=(0.3, 1.)),\n transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),\n transforms.RandomGrayscale(p=0.2),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n self.transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n pass\n\n def __len__(self):\n return len(self.image_name_list)\n\n def __getitem__(self, idx):\n image = Image.open(self.image_name_list[idx]).convert(\"RGB\")\n image = self.transform_train(image) if self.is_train else self.transform_test(image)\n return image, idx\n\n pass\n\n\n#######################################################################################################################\n# 2 Model\n\n\nclass ResBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(ResBlock, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n pass\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n pass\n\n\nclass ConvBlock(nn.Module):\n\n def __init__(self, cin, cout, stride=1, has_relu=True):\n super(ConvBlock, self).__init__()\n self.has_relu = has_relu\n\n self.conv = nn.Conv2d(cin, cout, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn = nn.BatchNorm2d(cout)\n self.relu = nn.ReLU(inplace=True)\n pass\n\n def forward(self, x):\n out = self.conv(x)\n out = self.bn(out)\n if self.has_relu:\n out = self.relu(out)\n return out\n\n pass\n\n\nclass MICNormalize(nn.Module):\n\n def __init__(self, power=2):\n super(MICNormalize, self).__init__()\n self.power = power\n pass\n\n def forward(self, x, dim=1):\n norm = x.pow(self.power).sum(dim, keepdim=True).pow(1. / self.power)\n out = x.div(norm)\n return out\n\n pass\n\n\nclass MICProduceClass(object):\n\n def __init__(self, n_sample, out_dim, ratio=1.0):\n super().__init__()\n self.out_dim = out_dim\n self.n_sample = n_sample\n self.class_per_num = self.n_sample // self.out_dim * ratio\n self.count = 0\n self.count_2 = 0\n self.class_num = np.zeros(shape=(self.out_dim, ), dtype=np.int)\n self.classes = np.zeros(shape=(self.n_sample, ), dtype=np.int)\n pass\n\n def reset(self):\n self.count = 0\n self.count_2 = 0\n self.class_num *= 0\n pass\n\n def cal_label(self, out, indexes):\n top_k = out.data.topk(self.out_dim, dim=1)[1].cpu()\n indexes_cpu = indexes.cpu()\n\n batch_size = top_k.size(0)\n class_labels = np.zeros(shape=(batch_size,), dtype=np.int)\n\n for i in range(batch_size):\n for j_index, j in enumerate(top_k[i]):\n if self.class_per_num > self.class_num[j]:\n class_labels[i] = j\n self.class_num[j] += 1\n self.count += 1 if self.classes[indexes_cpu[i]] != j else 0\n self.classes[indexes_cpu[i]] = j\n self.count_2 += 1 if j_index != 0 else 0\n break\n pass\n pass\n pass\n\n def get_label(self, indexes):\n return torch.tensor(self.classes[indexes.cpu().numpy()]).long()\n\n pass\n\n\nclass BASNet(nn.Module):\n\n def __init__(self, n_channels, clustering_num_list=None, pretrained=True, has_mask=True, more_obj=False):\n super(BASNet, self).__init__()\n self.has_mask = has_mask # 28\n self.more_obj = more_obj # 28\n resnet = models.resnet18(pretrained=pretrained)\n\n # -------------Encoder--------------\n self.encoder0 = ConvBlock(n_channels, 64, has_relu=True) # 64 * 224 * 224\n self.encoder1 = resnet.layer1 # 64 * 224 * 224\n self.encoder2 = resnet.layer2 # 128 * 112 * 112\n self.encoder3 = resnet.layer3 # 256 * 56 * 56\n self.encoder4 = resnet.layer4 # 512 * 28 * 28\n\n # -------------MIC-------------\n self.clustering_num_list = list([128, 256, 512]) if clustering_num_list is None else clustering_num_list\n\n # MIC 1\n self.mic_1_b1 = ResBlock(512, 512) # 28\n self.mic_1_b2 = ResBlock(512, 512)\n self.mic_1_b3 = ResBlock(512, 512)\n self.mic_1_c1 = ConvBlock(512, self.clustering_num_list[0], has_relu=True)\n self.mic_1_l2norm = MICNormalize(2)\n\n # MIC 2\n self.mic_2_pool = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.mic_2_b1 = ResBlock(512, 512) # 14\n self.mic_2_b2 = ResBlock(512, 512)\n self.mic_2_b3 = ResBlock(512, 512)\n self.mic_2_c1 = ConvBlock(512, self.clustering_num_list[1], has_relu=True)\n self.mic_2_l2norm = MICNormalize(2)\n\n # MIC 3\n self.mic_3_pool = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.mic_3_b1 = ResBlock(512, 512) # 7\n self.mic_3_b2 = ResBlock(512, 512)\n self.mic_3_b3 = ResBlock(512, 512)\n self.mic_3_c1 = ConvBlock(512, self.clustering_num_list[2], has_relu=True)\n self.mic_3_l2norm = MICNormalize(2)\n\n # Decoder\n self.decoder_1_b = ResBlock(512, 512) # 28\n self.decoder_1_out = nn.Conv2d(512, 1, 3, padding=1)\n self.decoder_1_c = ConvBlock(512, 256, has_relu=True)\n self.decoder_2_b = ResBlock(256, 256) # 56\n self.decoder_2_out = nn.Conv2d(256, 1, 3, padding=1)\n self.decoder_2_c = ConvBlock(256, 128, has_relu=True)\n self.decoder_3_b = ResBlock(128, 128) # 112\n self.decoder_3_out = nn.Conv2d(128, 1, 3, padding=1)\n\n # UP\n self.mic_up_2 = nn.Upsample(scale_factor=2, mode='bilinear')\n self.mic_up_4 = nn.Upsample(scale_factor=4, mode='bilinear')\n self.mic_up_8 = nn.Upsample(scale_factor=8, mode='bilinear')\n self.mic_up_16 = nn.Upsample(scale_factor=16, mode='bilinear')\n self.mic_up_32 = nn.Upsample(scale_factor=32, mode='bilinear')\n pass\n\n def forward(self, x):\n # -------------Encoder-------------\n e0 = self.encoder0(x) # 64 * 224 * 224\n e1 = self.encoder1(e0) # 64 * 224 * 224\n e2 = self.encoder2(e1) # 128 * 112 * 112\n e3 = self.encoder3(e2) # 256 * 56 * 56\n e4 = self.encoder4(e3) # 512 * 28 * 28\n\n # -------------MIC-------------\n # 1\n mic_f_1 = self.mic_1_b1(e4)\n mic_f_1 = self.mic_1_b2(mic_f_1)\n mic_f_1 = self.mic_1_b3(mic_f_1)\n\n mic_1 = self.mic_1_c1(mic_f_1) # 512 * 28 * 28\n smc_logits_1, smc_l2norm_1, smc_sigmoid_1 = self.salient_map_clustering(mic_1, which=1, has_mask=self.has_mask)\n cam_1 = self.cluster_activation_map(smc_logits_1, mic_1) # 簇激活图:Cluster Activation Map\n\n return_m1 = {\n \"mic_f\": mic_f_1,\n \"mic\": mic_1,\n \"smc_logits\": smc_logits_1,\n \"smc_l2norm\": smc_l2norm_1,\n \"smc_sigmoid\": smc_sigmoid_1,\n \"cam\": cam_1\n }\n\n # 2\n mic_f_2 = self.mic_2_pool(mic_f_1) # 512 * 14 * 14\n mic_f_2 = self.mic_2_b1(mic_f_2)\n mic_f_2 = self.mic_2_b2(mic_f_2)\n mic_f_2 = self.mic_2_b3(mic_f_2)\n\n mic_2 = self.mic_2_c1(mic_f_2) # 512 * 14 * 14\n smc_logits_2, smc_l2norm_2, smc_sigmoid_2 = self.salient_map_clustering(mic_2, which=2, has_mask=self.has_mask)\n cam_2 = self.cluster_activation_map(smc_logits_2, mic_2) # 簇激活图:Cluster Activation Map\n\n return_m2 = {\n \"mic_f\": mic_f_2,\n \"mic\": mic_2,\n \"smc_logits\": smc_logits_2,\n \"smc_l2norm\": smc_l2norm_2,\n \"smc_sigmoid\": smc_sigmoid_2,\n \"cam\": cam_2\n }\n\n # 3\n mic_f_3 = self.mic_3_pool(mic_f_2) # 512 * 7 * 7\n mic_f_3 = self.mic_3_b1(mic_f_3)\n mic_f_3 = self.mic_3_b2(mic_f_3)\n mic_f_3 = self.mic_3_b3(mic_f_3)\n\n mic_3 = self.mic_3_c1(mic_f_3) # 512 * 7 * 7\n smc_logits_3, smc_l2norm_3, smc_sigmoid_3 = self.salient_map_clustering(mic_3, which=3, has_mask=self.has_mask)\n cam_3 = self.cluster_activation_map(smc_logits_3, mic_3) # 簇激活图:Cluster Activation Map\n\n return_m3 = {\n \"mic_f\": mic_f_3,\n \"mic\": mic_3,\n \"smc_logits\": smc_logits_3,\n \"smc_l2norm\": smc_l2norm_3,\n \"smc_sigmoid\": smc_sigmoid_3,\n \"cam\": cam_3\n }\n\n # -------------Label-------------\n cam_norm_1_up = self.mic_up_8(cam_1)\n cam_norm_2_up = self.mic_up_16(cam_2)\n cam_norm_3_up = self.mic_up_32(cam_3)\n cam_norm_2_up = self.up_to_target(cam_norm_2_up, cam_norm_1_up)\n cam_norm_3_up = self.up_to_target(cam_norm_3_up, cam_norm_1_up)\n\n # 1\n cam_norm_up = (cam_norm_1_up + cam_norm_2_up) / 2\n label = self.salient_map_divide(cam_norm_up, obj_th=0.80, bg_th=0.15, more_obj=self.more_obj) # 显著图划分\n\n return_d0 = {\n \"label\": label,\n \"cam_norm_up\": cam_norm_up,\n \"cam_norm_1_up\": cam_norm_1_up,\n \"cam_norm_2_up\": cam_norm_2_up,\n \"cam_norm_3_up\": cam_norm_3_up\n }\n\n # -------------Decoder-------------\n d1 = self.decoder_1_b(e4) # 512 * 56 * 56\n d1_d2 = self.up_to_target(self.mic_up_2(self.decoder_1_c(d1)), e3) + e3 # 256 * 56 * 56\n d1_out = self.decoder_1_out(d1) # 1 * 28 * 28\n d1_out_sigmoid = torch.sigmoid(d1_out) # 1 * 28 * 28 # 小输出\n d1_out_up = self.mic_up_8(d1_out) # 1 * 224 * 224\n d1_out_up_sigmoid = torch.sigmoid(d1_out_up) # 1 * 224 * 224 # 大输出\n\n return_d1 = {\n \"out\": d1_out,\n \"out_sigmoid\": d1_out_sigmoid,\n \"out_up\": d1_out_up,\n \"out_up_sigmoid\": d1_out_up_sigmoid\n }\n\n d2 = self.decoder_2_b(d1_d2) # 256 * 56 * 56\n d2_d3 = self.up_to_target(self.mic_up_2(self.decoder_2_c(d2)), e2) + e2 # 128 * 112 * 112\n d2_out = self.decoder_2_out(d2) # 1 * 56 * 56\n d2_out_sigmoid = torch.sigmoid(d2_out) # 1 * 56 * 56 # 小输出\n d2_out_up = self.mic_up_4(d2_out) # 1 * 224 * 224\n d2_out_up_sigmoid = torch.sigmoid(d2_out_up) # 1 * 224 * 224 # 大输出\n\n return_d2 = {\n \"out\": d2_out,\n \"out_sigmoid\": d2_out_sigmoid,\n \"out_up\": d2_out_up,\n \"out_up_sigmoid\": d2_out_up_sigmoid\n }\n\n d3 = self.decoder_3_b(d2_d3) # 128 * 112 * 112\n d3_out = self.decoder_3_out(d3) # 1 * 112 * 112\n d3_out_sigmoid = torch.sigmoid(d3_out) # 1 * 112 * 112 # 小输出\n d3_out_up = self.mic_up_2(d3_out) # 1 * 224 * 224\n d3_out_up_sigmoid = torch.sigmoid(d3_out_up) # 1 * 224 * 224 # 大输出\n\n return_d3 = {\n \"out\": d3_out,\n \"out_sigmoid\": d3_out_sigmoid,\n \"out_up\": d3_out_up,\n \"out_up_sigmoid\": d3_out_up_sigmoid\n }\n\n return_m = {\"m1\": return_m1, \"m2\": return_m2, \"m3\": return_m3}\n return_d = {\"label\": return_d0, \"d1\": return_d1, \"d2\": return_d2, \"d3\": return_d3}\n return return_m, return_d\n\n @staticmethod\n def up_to_target(source, target):\n if source.size()[2] != target.size()[2] or source.size()[3] != target.size()[3]:\n source = torch.nn.functional.interpolate(source, size=[target.size()[2], target.size()[3]])\n return source\n\n def salient_map_clustering(self, mic, which=1, has_mask=True):\n # m1\n mic_gaussian = mic\n if has_mask:\n if which == 1:\n g_mask = self._mask_gaussian([mic.size()[2], mic.size()[3]], sigma=mic.size()[2] * mic.size()[3] // 2)\n mic_gaussian = mic * torch.tensor(g_mask).cuda()\n elif which == 2:\n # g_mask = self._mask_gaussian([mic.size()[2], mic.size()[3]], sigma=mic.size()[2] * mic.size()[3])\n # mic_gaussian = mic * torch.tensor(g_mask).cuda()\n mic_gaussian = mic\n else:\n mic_gaussian = mic\n pass\n\n smc_logits = F.adaptive_avg_pool2d(mic_gaussian, 1).view((mic_gaussian.size()[0], -1)) # 512\n\n smc_l2norm = self.mic_1_l2norm(smc_logits)\n smc_sigmoid = torch.sigmoid(smc_logits)\n return smc_logits, smc_l2norm, smc_sigmoid\n\n def cluster_activation_map(self, smc_logits, mic_feature):\n top_k_value, top_k_index = torch.topk(smc_logits, 1, 1)\n cam = torch.cat([mic_feature[i:i+1, top_k_index[i], :, :] for i in range(mic_feature.size()[0])])\n\n cam_norm = self._feature_norm(cam)\n return cam_norm\n\n def salient_map_divide(self, cam_norm_up, obj_th=0.7, bg_th=0.2, more_obj=False):\n cam_norm_up = self._feature_norm(cam_norm_up)\n\n label = torch.zeros(tuple(cam_norm_up.size())).fill_(255)\n label = label.cuda() if torch.cuda.is_available() else label\n\n label[cam_norm_up < bg_th] = 0.0\n\n if more_obj:\n for i in range(cam_norm_up.size()[0]):\n mask_pos_i = cam_norm_up[i] > obj_th\n if torch.sum(mask_pos_i) < 28:\n mask_pos_i = cam_norm_up[i] > (obj_th * 0.9)\n pass\n label[i][mask_pos_i] = 1.0\n pass\n pass\n else:\n label[cam_norm_up > obj_th] = 1.0\n pass\n\n return label\n\n @staticmethod\n def _feature_norm(feature_map):\n feature_shape = feature_map.size()\n batch_min, _ = torch.min(feature_map.view((feature_shape[0], -1)), dim=-1, keepdim=True)\n batch_max, _ = torch.max(feature_map.view((feature_shape[0], -1)), dim=-1, keepdim=True)\n norm = torch.div(feature_map.view((feature_shape[0], -1)) - batch_min, batch_max - batch_min)\n return norm.view(feature_shape)\n\n @staticmethod\n def _mask_gaussian(image_size, where=None, sigma=20):\n\n x = np.arange(0, image_size[1], 1, float)\n y = np.arange(0, image_size[0], 1, float)\n y = y[:, np.newaxis]\n\n if where:\n x0, y0 = where[1], where[0]\n else:\n x0, y0 = image_size[1] // 2, image_size[0] // 2\n pass\n\n # 生成高斯掩码\n mask = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma).astype(np.float32)\n return mask\n\n pass\n\n\n#######################################################################################################################\n# 3 Runner\n\n\nclass BASRunner(object):\n\n def __init__(self, epoch_num=1000, batch_size_train=8, has_mask=True, more_obj=False,\n clustering_num_1=128, clustering_num_2=256, clustering_num_3=512,\n clustering_ratio_1=1, clustering_ratio_2=1.5, clustering_ratio_3=2,\n data_dir='/mnt/4T/Data/SOD/DUTS/DUTS-TR', tra_image_dir='DUTS-TR-Image',\n tra_label_dir='DUTS-TR-Mask', model_dir=\"./saved_models/my_train_mic_only\"):\n self.epoch_num = epoch_num\n self.batch_size_train = batch_size_train\n self.has_mask = has_mask\n self.more_obj = more_obj\n\n # Dataset\n self.model_dir = model_dir\n self.data_dir = data_dir\n self.tra_image_dir = tra_image_dir\n self.tra_label_dir = tra_label_dir\n self.tra_img_name_list, tra_lbl_name_list = self.get_tra_img_label_name()\n self.dataset_usod = DatasetUSOD(img_name_list=self.tra_img_name_list, is_train=True)\n self.dataloader_usod = DataLoader(self.dataset_usod, self.batch_size_train, shuffle=True, num_workers=8)\n\n # Model\n self.net = BASNet(3, clustering_num_list=[clustering_num_1, clustering_num_2, clustering_num_3],\n pretrained=True, has_mask=self.has_mask, more_obj=self.more_obj)\n self.net = self.net.cuda() if torch.cuda.is_available() else self.net\n\n # MIC\n self.produce_class_1 = MICProduceClass(n_sample=len(self.dataset_usod),\n out_dim=clustering_num_1, ratio=clustering_ratio_1)\n self.produce_class_2 = MICProduceClass(n_sample=len(self.dataset_usod),\n out_dim=clustering_num_2, ratio=clustering_ratio_2)\n self.produce_class_3 = MICProduceClass(n_sample=len(self.dataset_usod),\n out_dim=clustering_num_3, ratio=clustering_ratio_3)\n\n # Loss and Optim\n self.bce_loss = nn.BCELoss()\n self.mic_loss = nn.CrossEntropyLoss()\n self.bce_loss = self.bce_loss.cuda() if torch.cuda.is_available() else self.bce_loss\n self.mic_loss = self.mic_loss.cuda() if torch.cuda.is_available() else self.mic_loss\n\n self.optimizer = optim.Adam(self.net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)\n pass\n\n def load_model(self, model_file_name):\n self.net.load_state_dict(torch.load(model_file_name), strict=False)\n Tools.print(\"restore from {}\".format(model_file_name))\n pass\n\n def get_tra_img_label_name(self):\n tra_img_name_list = glob.glob(os.path.join(self.data_dir, self.tra_image_dir, '*.jpg'))\n tra_lbl_name_list = [os.path.join(self.data_dir, self.tra_label_dir, '{}.png'.format(\n os.path.splitext(os.path.basename(img_path))[0])) for img_path in tra_img_name_list]\n Tools.print(\"train images: {}\".format(len(tra_img_name_list)))\n Tools.print(\"train labels: {}\".format(len(tra_lbl_name_list)))\n return tra_img_name_list, tra_lbl_name_list\n\n def all_loss_fusion(self, mic_1_out, mic_2_out, mic_3_out,\n mic_labels_1, mic_labels_2, mic_labels_3, sod_sigmoid, sod_label):\n loss_mic_1 = self.mic_loss(mic_1_out, mic_labels_1)\n loss_mic_2 = self.mic_loss(mic_2_out, mic_labels_2)\n loss_mic_3 = self.mic_loss(mic_3_out, mic_labels_3)\n\n positions = sod_label.view(-1, 1) < 255.0\n loss_bce = self.bce_loss(sod_sigmoid.view(-1, 1)[positions], sod_label.view(-1, 1)[positions])\n\n loss_all = (loss_mic_1 + loss_mic_2 + loss_mic_3) / 3 + 5 * loss_bce\n return loss_all, loss_mic_1, loss_mic_2, loss_mic_3, loss_bce\n\n def train(self, save_epoch_freq=5, print_ite_num=100, update_epoch_freq=1):\n\n for epoch in range(0, self.epoch_num):\n\n ###########################################################################\n # 0 更新标签\n if epoch % update_epoch_freq == 0:\n Tools.print()\n Tools.print(\"Update label {} .......\".format(epoch))\n self.net.eval()\n\n self.produce_class_1.reset()\n self.produce_class_2.reset()\n self.produce_class_3.reset()\n for batch_idx, (inputs, indexes) in enumerate(self.dataloader_usod):\n inputs = inputs.type(torch.FloatTensor)\n inputs = inputs.cuda() if torch.cuda.is_available() else inputs\n indexes = indexes.cuda() if torch.cuda.is_available() else indexes\n\n return_m, return_d = self.net(inputs)\n\n self.produce_class_1.cal_label(return_m[\"m1\"][\"smc_l2norm\"], indexes)\n self.produce_class_2.cal_label(return_m[\"m2\"][\"smc_l2norm\"], indexes)\n self.produce_class_3.cal_label(return_m[\"m3\"][\"smc_l2norm\"], indexes)\n pass\n\n Tools.print(\"Epoch: [{}] {}/{} {}/{} {}/{}\".format(\n epoch, self.produce_class_1.count, self.produce_class_1.count_2, self.produce_class_2.count,\n self.produce_class_2.count_2, self.produce_class_3.count, self.produce_class_3.count_2))\n Tools.print()\n pass\n\n ###########################################################################\n # 1 训练模型\n all_loss, all_loss_mic_1, all_loss_mic_2, all_loss_mic_3, all_loss_bce = 0.0, 0.0, 0.0, 0.0, 0.0\n self.net.train()\n for i, (inputs, indexes) in enumerate(self.dataloader_usod):\n inputs = inputs.type(torch.FloatTensor)\n inputs = inputs.cuda() if torch.cuda.is_available() else inputs\n indexes = indexes.cuda() if torch.cuda.is_available() else indexes\n self.optimizer.zero_grad()\n\n return_m, return_d = self.net(inputs)\n\n mic_labels_1 = self.produce_class_1.get_label(indexes)\n mic_labels_1 = mic_labels_1.cuda() if torch.cuda.is_available() else mic_labels_1\n mic_labels_2 = self.produce_class_2.get_label(indexes)\n mic_labels_2 = mic_labels_2.cuda() if torch.cuda.is_available() else mic_labels_2\n mic_labels_3 = self.produce_class_3.get_label(indexes)\n mic_labels_3 = mic_labels_3.cuda() if torch.cuda.is_available() else mic_labels_3\n\n # 1\n target = return_d[\"d2\"][\"out_up_sigmoid\"]\n # 2\n # target = return_d[\"d3\"][\"out_up_sigmoid\"]\n\n loss, loss_mic_1, loss_mic_2, loss_mic_3, loss_bce = self.all_loss_fusion(\n return_m[\"m1\"][\"smc_logits\"], return_m[\"m2\"][\"smc_logits\"], return_m[\"m3\"][\"smc_logits\"],\n mic_labels_1, mic_labels_2, mic_labels_3, target, return_d[\"label\"][\"label\"])\n loss.backward()\n self.optimizer.step()\n\n all_loss += loss.item()\n all_loss_mic_1 += loss_mic_1.item()\n all_loss_mic_2 += loss_mic_2.item()\n all_loss_mic_3 += loss_mic_3.item()\n all_loss_bce += loss_bce.item()\n if i % print_ite_num == 0:\n Tools.print(\"[E:{:4d}/{:4d}, b:{:4d}/{:4d}] \"\n \"a loss:{:.2f} loss:{:.2f} \"\n \"a mic 1:{:.2f} mic 1:{:.2f} \"\n \"a mic 2:{:.2f} mic 2:{:.2f} \"\n \"a mic 3:{:.2f} mic 3:{:.2f} \"\n \"a bce:{:.2f} bce:{:.2f}\".format(\n epoch, self.epoch_num, i, len(self.dataloader_usod),\n all_loss/(i+1), loss.item(),\n all_loss_mic_1/(i+1), loss_mic_1.item(),\n all_loss_mic_2/(i+1), loss_mic_2.item(),\n all_loss_mic_3/(i+1), loss_mic_3.item(),\n all_loss_bce/(i+1), loss_bce.item()))\n pass\n\n pass\n\n ###########################################################################\n # 2 保存模型\n if epoch % save_epoch_freq == 0:\n save_file_name = Tools.new_dir(os.path.join(\n self.model_dir, \"{}_train_{:.3f}.pth\".format(epoch, all_loss / len(self.dataloader_usod))))\n torch.save(self.net.state_dict(), save_file_name)\n\n Tools.print()\n Tools.print(\"Save Model to {}\".format(save_file_name))\n Tools.print()\n pass\n\n pass\n\n pass\n\n pass\n\n\n#######################################################################################################################\n# 4 Main\n\n\nif __name__ == '__main__':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n bas_runner = BASRunner(batch_size_train=8, has_mask=True, more_obj=False,\n model_dir=\"./saved_models/my_train_mic5_decoder8_aug_mask_norm_5bce_d2\")\n bas_runner.load_model('./saved_models/my_train5_diff_aug_mask/125_train_6.569.pth')\n\n bas_runner.train()\n pass\n" ]
[ [ "torch.load", "numpy.asarray", "torch.utils.data.DataLoader", "torch.cuda.is_available", "torch.topk" ], [ "torch.sigmoid", "torch.nn.CrossEntropyLoss", "numpy.log", "torch.load", "numpy.arange", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.nn.functional.adaptive_avg_pool2d", "torch.tensor", "torch.nn.MaxPool2d", "torch.nn.Upsample", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.topk", "torch.nn.ReLU", "numpy.zeros", "torch.nn.MSELoss" ], [ "torch.sigmoid", "torch.nn.CrossEntropyLoss", "numpy.log", "torch.load", "numpy.arange", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.sum", "torch.nn.BCELoss", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.MaxPool2d", "torch.tensor", "torch.nn.Upsample", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.topk", "torch.nn.ReLU", "numpy.zeros" ] ]
walkagain/name_generator
[ "e7b43c917b8a68563518e65b8d63a6c40fc2285d" ]
[ "name_generator_rnn.py" ]
[ "# -*- coding:utf-8 -*-\r\nfrom __future__ import print_function, unicode_literals, division\r\nfrom io import open\r\nimport glob\r\nimport os\r\nimport unicodedata\r\nimport string\r\nimport argparse\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport random\r\n\r\nimport time\r\nimport math\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as ticker\r\n\r\n\r\nall_letters = string.ascii_letters + \" .,;'-\"\r\nn_letters = len(all_letters) + 1 # plus EOS marker\r\n\r\ncategory_line = {}\r\nall_category = []\r\nn_categories = 0\r\ntrain_category = None\r\n\r\nsave_dir=\"data/save\"\r\n\r\ndef parse():\r\n parser = argparse.ArgumentParser(description=\"rnn model for name generator\")\r\n parser.add_argument('-it', '--iteration', type=int, default=100000, help=\"iterations of training\")\r\n parser.add_argument('-p', '--print_every', type=int, default=5000, help=\"print the training result every iterations\")\r\n parser.add_argument('-pl', '--plot_every', type=int, default=500, help=\"plotting the loss every iterations\")\r\n parser.add_argument('-s', '--save_every', type=int, default=5000, help=\"save model params every iterations\")\r\n parser.add_argument('-tr', '--train', action='store_true', help=\"Train the model with dataset\")\r\n parser.add_argument('-te', '--test', action='store_true', help=\"test the saved model\")\r\n parser.add_argument('-lm', '--load_model', help=\"load the saved model(e.g.model/name_generator_model_100000.tar)\")\r\n parser.add_argument('-fn', '--filename', help=\"dataset file for training (e.g.data/names/*.txt)\")\r\n parser.add_argument('-sl', '--single_letter', help=\"generate name with a letter, e.g. -sl A\")\r\n parser.add_argument('-ml', '--multi_letters', help=\"generate names with letters, e.g. -ml ACD\")\r\n parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001, help=\"learning rate for training\")\r\n parser.add_argument('-c', '--category', type=str, choices=['Arabic', 'Chinese', 'Czech', 'Dutch', 'English',\r\n 'French', 'German', 'Greek', 'Irish', 'Italian',\r\n 'Japanese', 'Korean', 'Polish', 'Portuguese', 'Russian',\r\n 'Scottish', 'Spanish', 'Vietnamese'],\r\n help=\"language category to train or test\")\r\n\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\n# search specify file type\r\ndef findFiles(path):\r\n return glob.glob(path)\r\n\r\n# turn unicode string to ascii plain, thanks to https://stackoverflow.com/a/518232/2809427\r\ndef Unicode2Ascii(s):\r\n return \"\".join(\r\n c for c in unicodedata.normalize(\"NFD\", s)\r\n if c in all_letters\r\n and \"MN\" != unicodedata.category(c))\r\n\r\n# read line from file and split by '\\n'\r\ndef readLines(filePath):\r\n lines = open(filePath, encoding=\"utf-8\").read().strip().split('\\n')\r\n return [Unicode2Ascii(line) for line in lines]\r\n\r\n# create dataset from files\r\n\"\"\"\r\nargs: filename with regular expression like data/names/*.txt\r\n\"\"\"\r\ndef loadTrainingDataset(filenames):\r\n global category_line\r\n global all_category\r\n global n_categories\r\n for fileName in findFiles(filenames):\r\n category = os.path.splitext(os.path.basename(fileName))[0]\r\n all_category.append(category)\r\n lines = readLines(fileName)\r\n category_line[category] = lines\r\n\r\n n_categories = len(all_category)\r\n if n_categories == 0:\r\n raise RuntimeError('Data not found. Make sure that you downloaded data '\r\n 'from https://download.pytorch.org/tutorial/data.zip and extract it to '\r\n 'the current directory.')\r\n\r\n # print(all_category)\r\n return category_line, all_category, n_categories\r\n\r\nclass RNN(nn.Module):\r\n def __init__(self, input_size, hidden_size, output_size):\r\n super(RNN, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size)\r\n self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size)\r\n self.o2o = nn.Linear(output_size + hidden_size, output_size)\r\n self.dropout = nn.Dropout(0.1)\r\n\r\n self.softmax = nn.LogSoftmax(dim=1)\r\n\r\n def forward(self, categories, input, hidden):\r\n in_combined = torch.cat((categories, input, hidden), dim=1)\r\n hidden = self.i2h(in_combined)\r\n output = self.i2o(in_combined)\r\n\r\n out_combined = torch.cat((output, hidden), dim=1)\r\n output = self.o2o(out_combined)\r\n\r\n output = self.softmax(self.dropout(output))\r\n return output, hidden\r\n\r\n def InitHidden(self):\r\n return torch.zeros(1, self.hidden_size)\r\n\r\n\r\n# prepare data for training\r\n# choose a item from list randomly\r\ndef randomChoice(l):\r\n return l[random.randint(0, len(l) -1)]\r\n\r\n# choose training data pairs\r\ndef randomTrainingPairs(category=None):\r\n global train_category\r\n if category is None:\r\n category = randomChoice(all_category)\r\n train_category = category\r\n name = randomChoice(category_line[category])\r\n return category, name\r\n\r\n\r\n# one-hot vector for category\r\ndef CategoryTensor(category):\r\n tensor = torch.zeros(1, n_categories)\r\n idx = all_category.index(category)\r\n tensor[0][idx] = 1\r\n return tensor\r\n\r\n# one-hot matrix for input, ont include EOS\r\ndef InputTensor(line):\r\n tensor = torch.zeros(len(line), 1, n_letters)\r\n for idx in range(len(line)):\r\n letter = line[idx]\r\n tensor[idx][0][all_letters.find(letter)] = 1\r\n\r\n return tensor\r\n\r\n# longTensor for second letter to EOS\r\ndef TargetTensor(line):\r\n letter_indexes = [all_letters.find(line[idx]) for idx in range(1, len(line))]\r\n letter_indexes.append(n_letters - 1) # add index of EOS\r\n return torch.LongTensor(letter_indexes)\r\n\r\n# make category, input and target tensors from random category, line pairs\r\n\r\ndef randomTrainingSample(category=None):\r\n category, line = randomTrainingPairs(category)\r\n category_tensor = CategoryTensor(category)\r\n input_line_tensor = InputTensor(line)\r\n target_line_tensor = TargetTensor(line)\r\n\r\n return category_tensor, input_line_tensor, target_line_tensor\r\n\r\ndef train(category_tensor, input_line_tensor, target_line_tensor):\r\n target_line_tensor.unsqueeze_(-1)\r\n hidden = rnn.InitHidden()\r\n rnn.zero_grad()\r\n loss = 0\r\n\r\n for i in range(input_line_tensor.size(0)):\r\n output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)\r\n per_loss = criterion(output, target_line_tensor[i])\r\n\r\n loss += per_loss\r\n\r\n loss.backward()\r\n\r\n for p in rnn.parameters():\r\n p.data.add_(-lr, p.grad.data)\r\n\r\n return output, loss.item() / input_line_tensor.size(0)\r\n\r\ndef TimeCalulate(since):\r\n now = time.time()\r\n interval = now - since\r\n m = math.floor(interval/60)\r\n s = interval - 60 * m\r\n return \"%dm %ds\" %(m,s)\r\n\r\ndef runTrainingModel(n_iters=100000, print_every=5000, plot_every=500, save_every=5000, category=None, modelFile=None):\r\n all_losses = []\r\n total_loss = 0 # Reset every plot_every iters\r\n start = time.time()\r\n\r\n checkpoint = None\r\n start_iteration = 1\r\n if modelFile:\r\n checkpoint = torch.load(modelFile)\r\n rnn.load_state_dict(checkpoint[\"rnn\"])\r\n start_iteration = checkpoint[\"iteration\"]\r\n\r\n for iter in range(start_iteration, n_iters + 1):\r\n output, loss = train(*randomTrainingSample(category))\r\n total_loss += loss\r\n\r\n if iter % print_every == 0:\r\n print('%s (%d %d%%) %.4f' % (TimeCalulate(start), iter, iter / n_iters * 100, loss))\r\n\r\n if iter % plot_every == 0:\r\n all_losses.append((total_loss / plot_every) if (iter - start_iteration >= plot_every) else loss)\r\n total_loss = 0\r\n\r\n if iter % save_every == 0:\r\n directory = os.path.join(save_dir, 'model')\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n torch.save({\r\n 'iteration': iter,\r\n 'rnn': rnn.state_dict(),\r\n 'category': train_category,\r\n 'loss': loss\r\n }, os.path.join(directory, '{}_{}.tar'.format('name_generator_model', iter)))\r\n\r\n return all_losses\r\n\r\n# sample from a category and starting letter\r\ndef Sample(category, start_letter='A', modelFile=None, max_lenght = 20):\r\n if modelFile:\r\n checkpoint = torch.load(modelFile)\r\n rnn.load_state_dict(checkpoint[\"rnn\"])\r\n if category is None:\r\n category = checkpoint[\"category\"]\r\n\r\n hidden = rnn.InitHidden()\r\n category_tensor = CategoryTensor(category)\r\n input_tensor = InputTensor(start_letter)\r\n output_name = start_letter\r\n for i in range(max_lenght):\r\n output, hidden = rnn(category_tensor, input_tensor[0], hidden)\r\n topv, topi = output.topk(1)\r\n\r\n idx = topi[0][0]\r\n if idx == n_letters - 1: break\r\n else:\r\n letter = all_letters[idx]\r\n output_name += letter\r\n input_tensor = InputTensor(letter)\r\n return output_name\r\n\r\ndef Sampeles(category, start_letters=\"ABC\", modelFile=None):\r\n names = []\r\n for letter in start_letters:\r\n names.append(Sample(category, letter, modelFile))\r\n return names\r\n\r\ndef run(args):\r\n modelFile = None\r\n if args.load_model:\r\n modelFile = args.load_model\r\n\r\n category = None\r\n if args.category:\r\n category = args.category\r\n if args.test:\r\n if modelFile is None:\r\n raise RuntimeError('Please choose a saved model to load')\r\n\r\n if args.single_letter:\r\n start_letter = args.single_letter\r\n print(Sample(category, start_letter, modelFile))\r\n elif args.multi_letters:\r\n print(Sampeles(category, args.multi_letters, modelFile))\r\n\r\n else:\r\n raise RuntimeError(\"please specify evaluate mode\")\r\n\r\n elif args.train:\r\n runTrainingModel(category=category, modelFile=modelFile)\r\n\r\n else:\r\n raise RuntimeError(\"please specify running mode[test/train]\")\r\n\r\nif __name__==\"__main__\":\r\n\r\n args = parse()\r\n filename = \"data/names/*.txt\"\r\n if args.filename:\r\n filename = args.filename\r\n loadTrainingDataset(filename)\r\n\r\n criterion = nn.NLLLoss()\r\n lr = 0.0001\r\n if args.learning_rate:\r\n lr = args.learning_rate\r\n\r\n rnn = RNN(n_letters, 128, n_letters)\r\n run(args)" ]
[ [ "torch.nn.NLLLoss", "torch.LongTensor", "torch.nn.Dropout", "torch.nn.LogSoftmax", "torch.zeros", "torch.cat", "torch.load", "torch.nn.Linear" ] ]
dan1keen/dissertation_counter
[ "1265ee9563d349849c9a68d204e0f427e33f0f48" ]
[ "kalman_tracker/main.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport glob\n#from moviepy.editor import VideoFileClip\nfrom collections import deque\nfrom sklearn.utils.linear_assignment_ import linear_assignment\n\nfrom kalman_tracker import helpers\nfrom kalman_tracker import detector\nfrom kalman_tracker import tracker\nimport cv2\n\n# Global variables to be used by funcitons of VideoFileClop\nframe_count = 0 # frame counter\n\nmax_age = 15 # no.of consecutive unmatched detection before\n # a track is deleted\n\nmin_hits =1 # no. of consecutive matches needed to establish a track\n\ntracker_list =[] # list for trackers\n# list for track ID\ntrack_id_list= deque(['1', '2', '3', '4', '5', '6', '7', '7', '8', '9', '10'])\n\ndef assign_detections_to_trackers(trackers, detections, iou_thrd = 0.3):\n '''\n From current list of trackers and new detections, output matched detections,\n unmatchted trackers, unmatched detections.\n '''\n\n IOU_mat = np.zeros((len(trackers), len(detections)), dtype=np.float32)\n for t, trk in enumerate(trackers):\n # trk = convert_to_cv2bbox(trk)\n for d, det in enumerate(detections):\n # det = convert_to_cv2bbox(det)\n IOU_mat[t, d] = helpers.box_iou2(trk, det)\n\n # Produces matches\n # Solve the maximizing the sum of IOU assignment problem using the\n # Hungarian algorithm (also known as Munkres algorithm)\n\n matched_idx = linear_assignment(-IOU_mat)\n\n unmatched_trackers, unmatched_detections = [], []\n for t, trk in enumerate(trackers):\n if (t not in matched_idx[:, 0]):\n unmatched_trackers.append(t)\n\n for d, det in enumerate(detections):\n if (d not in matched_idx[:, 1]):\n unmatched_detections.append(d)\n\n matches = []\n # For creating trackers we consider any detection with an\n # overlap less than iou_thrd to signifiy the existence of\n # an untracked object\n\n for m in matched_idx:\n if (IOU_mat[m[0], m[1]] < iou_thrd):\n unmatched_trackers.append(m[0])\n unmatched_detections.append(m[1])\n else:\n matches.append(m.reshape(1, 2))\n\n if (len(matches) == 0):\n matches = np.empty((0, 2), dtype=int)\n else:\n matches = np.concatenate(matches, axis=0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)\n\ndef pipeline(img, det):\n '''\n Pipeline function for detection and tracking\n '''\n global frame_count\n global tracker_list\n global max_age\n global min_hits\n global track_id_list\n\n frame_count+=1\n\n img_dim = (img.shape[1], img.shape[0])\n z_box = det.get_localization(img) # measurement\n x_box = []\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n if len(tracker_list) > 0:\n for trk in tracker_list:\n x_box.append(trk.box)\n\n matched, unmatched_dets, unmatched_trks \\\n = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.3)\n\n # Deal with matched detections\n if matched.size > 0:\n for trk_idx, det_idx in matched:\n z = z_box[det_idx]\n z = np.expand_dims(z, axis=0).T\n tmp_trk = tracker_list[trk_idx]\n tmp_trk.kalman_filter(z)\n xx = tmp_trk.x_state.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n x_box[trk_idx] = xx\n tmp_trk.box = xx\n tmp_trk.hits += 1\n\n # Deal with unmatched detections\n if len(unmatched_dets) > 0:\n for idx in unmatched_dets:\n z = z_box[idx]\n z = np.expand_dims(z, axis=0).T\n tmp_trk = tracker.Tracker() # Create a new tracker\n x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T\n tmp_trk.x_state = x\n tmp_trk.predict_only()\n xx = tmp_trk.x_state\n xx = xx.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n tmp_trk.box = xx\n tmp_trk.id = track_id_list.popleft() # assign an ID for the tracker\n print(tmp_trk.id, 'wasd')\n tracker_list.append(tmp_trk)\n x_box.append(xx)\n\n # Deal with unmatched tracks\n if len(unmatched_trks) > 0:\n for trk_idx in unmatched_trks:\n tmp_trk = tracker_list[trk_idx]\n tmp_trk.no_losses += 1\n tmp_trk.predict_only()\n xx = tmp_trk.x_state\n xx = xx.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n tmp_trk.box = xx\n x_box[trk_idx] = xx\n\n # The list of tracks to be annotated\n good_tracker_list = []\n for trk in tracker_list:\n if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)):\n good_tracker_list.append(trk)\n x_cv2 = trk.box\n img = helpers.draw_box_label(trk.id, img, x_cv2) # Draw the bounding boxes on the images\n tracker_coordinate = (x_cv2[0] + x_cv2[2]) / 2\n # if (tracker_coordinate >= roi):\n # counter.append(trk)\n # Book keeping\n deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list)\n\n for trk in deleted_tracks:\n track_id_list.append(trk.id)\n\n tracker_list = [x for x in tracker_list if x.no_losses <= max_age]\n # cv2.line(img, (roi, 0), (roi, height), (0, 0, 0xFF), 5)\n # cv2.line(img, (0, roi), (width, roi), (0, 0, 0xFF), 5)\n\n cv2.putText(img,\n 'Detected Pedestrians: ' + str(len(good_tracker_list)),\n (10, 35),\n font,\n 0.8,\n (0, 0xFF, 0xFF),\n 2,\n cv2.LINE_4)\n\n # cv2.putText(\n # img,\n # 'ROI Line',\n # (545, roi - 10),\n # font,\n # 0.6,\n # (0, 0, 0xFF),\n # 2,\n # cv2.LINE_AA,\n # )\n\n cv2.imshow(\"frame\", img)\n return img\n\nif __name__ == \"__main__\":\n det = detector.PersonDetector()\n cap = cv2.VideoCapture('inputs/example_01.mp4')\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output.avi', fourcc, 8.0, (640, 480))\n roi = 200\n counter = []\n\n if cap.isOpened():\n # get cap property\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n while (True):\n\n ret, img = cap.read()\n # print(img)\n\n np.asarray(img)\n font = cv2.FONT_HERSHEY_SIMPLEX\n # trackers_count = pipeline(img)[1]\n\n new_img = pipeline(img)\n out.write(new_img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n" ]
[ [ "sklearn.utils.linear_assignment_.linear_assignment", "numpy.expand_dims", "numpy.asarray", "numpy.concatenate", "numpy.array", "numpy.empty" ] ]
s10singh97/GSQuantify2018
[ "a18df022414659cafdbc010df31db5a4f957a1d6" ]
[ "1.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('train.csv')\nX = dataset.iloc[:, 1:4].values\ny = dataset.iloc[:, 0].values\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 2] = labelencoder_X_1.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features = [2])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X, y)\n\ndataset_test = pd.read_csv('private_test_x.csv')\nX_test = dataset_test.iloc[:, 1:4].values\n\nX_test[:, 2] = labelencoder_X_1.transform(X_test[:, 2])\nX_test = onehotencoder.transform(X_test).toarray()\nX_test = X_test[:, 1:]\n\ny_expected = dataset_test.iloc[:, 0].values\ny_pred = regressor.predict(X_test)\ny_pred = y_pred.astype(np.int64)\n\nxx = dataset_test.iloc[:, 1:4].values\nxx = xx.tolist()\n#xx[:, 0:2] = xx[:, 0:2].astype(np.int64)\noutput = np.column_stack((y_pred, xx))\nheaders = [\"Usage\", \"Timestep\", \"InventoryCode\", \"Domain\"]\nop = np.row_stack((headers, output))\ndf = pd.DataFrame(op)\ndf.to_csv(\"privatetest.csv\", index = False, header = None)\n\nfinal = pd.read_csv(\"privatetest.csv\")" ]
[ [ "pandas.read_csv", "sklearn.tree.DecisionTreeRegressor", "sklearn.preprocessing.OneHotEncoder", "pandas.DataFrame", "numpy.row_stack", "numpy.column_stack", "sklearn.preprocessing.LabelEncoder" ] ]
nicehiro/multiagent-particle-envs
[ "9028a9f73306b4044d352dd46356ed451ca82c7b" ]
[ "multiagent/environment.py" ]
[ "import gym\nfrom gym import spaces\nfrom gym.envs.registration import EnvSpec\nimport numpy as np\nfrom gym.spaces import MultiDiscrete\n\n\nclass MultiAgentEnv(gym.Env):\n \"\"\"Environment for all agents in the multiagent world.\n currently code assumes that no agents will be created/destroyed at runtime!\n \"\"\"\n metadata = {\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, world, reset_callback=None, reward_callback=None,\n observation_callback=None, info_callback=None,\n done_callback=None, shared_viewer=True):\n\n self.world = world\n self.agents = self.world.policy_agents\n # set required vectorized gym env property\n self.n = len(world.policy_agents)\n # scenario callbacks\n self.reset_callback = reset_callback\n self.reward_callback = reward_callback\n self.observation_callback = observation_callback\n self.info_callback = info_callback\n self.done_callback = done_callback\n # environment parameters\n self.discrete_action_space = True\n # if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector\n self.discrete_action_input = False\n # if true, even the action is continuous, action will be performed discretely\n self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False\n # if true, every agent has the same reward\n self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False\n self.time = 0\n\n # configure spaces\n self.action_space = []\n self.observation_space = []\n for agent in self.agents:\n total_action_space = []\n # physical action space\n if self.discrete_action_space:\n u_action_space = spaces.Discrete(world.dim_p * 2 + 1)\n else:\n u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)\n if agent.movable:\n total_action_space.append(u_action_space)\n # communication action space\n if self.discrete_action_space:\n c_action_space = spaces.Discrete(world.dim_c)\n else:\n c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)\n if not agent.silent:\n total_action_space.append(c_action_space)\n # total action space\n if len(total_action_space) > 1:\n # all action spaces are discrete, so simplify to MultiDiscrete action space\n if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):\n act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])\n else:\n act_space = spaces.Tuple(total_action_space)\n self.action_space.append(act_space)\n else:\n self.action_space.append(total_action_space[0])\n # observation space\n obs_dim = len(observation_callback(agent, self.world))\n self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))\n agent.action.c = np.zeros(self.world.dim_c)\n\n # rendering\n self.shared_viewer = shared_viewer\n if self.shared_viewer:\n self.viewers = [None]\n else:\n self.viewers = [None] * self.n\n self._reset_render()\n\n def step(self, action_n):\n \"\"\"Make a step for every movable agent.\n \"\"\"\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n self.agents = self.world.policy_agents\n # set action for each agent\n for i, agent in enumerate(self.agents):\n self._set_action(action_n[i], agent, self.action_space[i])\n # advance world state\n self.world.step()\n # record observation for each agent\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n reward_n.append(self._get_reward(agent))\n done_n.append(self._get_done(agent))\n\n info_n['n'].append(self._get_info(agent))\n\n # all agents get total reward in cooperative case\n reward = np.sum(reward_n)\n if self.shared_reward:\n reward_n = [reward] * self.n\n\n return obs_n, reward_n, done_n, info_n\n\n def reset(self):\n \"\"\"Reset environment and agents.\n \"\"\"\n # reset world\n self.reset_callback(self.world)\n # reset renderer\n self._reset_render()\n # record observations for each agent\n obs_n = []\n self.agents = self.world.policy_agents\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n return obs_n\n\n def _get_info(self, agent):\n \"\"\"Get info used for benchmarking.\n \"\"\"\n if self.info_callback is None:\n return {}\n return self.info_callback(agent, self.world)\n\n def _get_obs(self, agent):\n \"\"\"Get observation for a particular agent.\n \"\"\"\n if self.observation_callback is None:\n return np.zeros(0)\n return self.observation_callback(agent, self.world)\n\n def _get_done(self, agent):\n \"\"\"Get dones for a particular agent.\n TODO: Unused right now -- agents are allowed to go beyond the viewing screen.\n \"\"\"\n if self.done_callback is None:\n return False\n return self.done_callback(agent, self.world)\n\n def _get_reward(self, agent):\n \"\"\"Get reward for a particular agent.\n \"\"\"\n if self.reward_callback is None:\n return 0.0\n return self.reward_callback(agent, self.world)\n\n def _set_action(self, action, agent, action_space, time=None):\n \"\"\"Set env action for a particular agent.\n \"\"\"\n agent.action.u = np.zeros(self.world.dim_p)\n agent.action.c = np.zeros(self.world.dim_c)\n # process action\n if isinstance(action_space, MultiDiscrete):\n act = []\n size = action_space.high - action_space.low + 1\n index = 0\n for s in size:\n act.append(action[index:(index+s)])\n index += s\n action = act\n else:\n action = [action]\n\n if agent.movable:\n # physical action\n if self.discrete_action_input:\n agent.action.u = np.zeros(self.world.dim_p)\n # process discrete action\n if action[0] == 1: agent.action.u[0] = -1.0\n if action[0] == 2: agent.action.u[0] = +1.0\n if action[0] == 3: agent.action.u[1] = -1.0\n if action[0] == 4: agent.action.u[1] = +1.0\n else:\n if self.force_discrete_action:\n d = np.argmax(action[0])\n action[0][:] = 0.0\n action[0][d] = 1.0\n if self.discrete_action_space:\n agent.action.u[0] += action[0][1] - action[0][2]\n agent.action.u[1] += action[0][3] - action[0][4]\n else:\n agent.action.u = action[0]\n sensitivity = 5.0\n if agent.accel is not None:\n sensitivity = agent.accel\n agent.action.u *= sensitivity\n action = action[1:]\n if not agent.silent:\n # communication action\n if self.discrete_action_input:\n agent.action.c = np.zeros(self.world.dim_c)\n agent.action.c[action[0]] = 1.0\n else:\n agent.action.c = action[0]\n action = action[1:]\n # make sure we used all elements of action\n assert len(action) == 0\n\n # reset rendering assets\n def _reset_render(self):\n self.render_geoms = None\n self.render_geoms_xform = None\n\n # render environment\n def render(self, mode='human'):\n if mode == 'human':\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n message = ''\n for agent in self.world.agents:\n comm = []\n for other in self.world.agents:\n if other is agent: continue\n if np.all(other.state.c == 0):\n word = '_'\n else:\n word = alphabet[np.argmax(other.state.c)]\n message += (other.name + ' to ' + agent.name + ': ' + word + ' ')\n print(message)\n\n for i in range(len(self.viewers)):\n # create viewers (if necessary)\n if self.viewers[i] is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from multiagent import rendering\n self.viewers[i] = rendering.Viewer(700,700)\n\n # create rendering geometry\n if self.render_geoms is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from multiagent import rendering\n self.render_geoms = []\n self.render_geoms_xform = []\n for entity in self.world.entities:\n geom = rendering.make_circle(entity.size)\n xform = rendering.Transform()\n if 'agent' in entity.name:\n geom.set_color(*entity.color, alpha=0.5)\n else:\n geom.set_color(*entity.color)\n geom.add_attr(xform)\n self.render_geoms.append(geom)\n self.render_geoms_xform.append(xform)\n\n # add geoms to viewer\n for viewer in self.viewers:\n viewer.geoms = []\n for geom in self.render_geoms:\n viewer.add_geom(geom)\n\n results = []\n for i in range(len(self.viewers)):\n from multiagent import rendering\n # update bounds to center around agent\n cam_range = 1\n if self.shared_viewer:\n pos = np.zeros(self.world.dim_p)\n else:\n pos = self.agents[i].state.p_pos\n self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)\n # update geometry positions\n for e, entity in enumerate(self.world.entities):\n self.render_geoms_xform[e].set_translation(*entity.state.p_pos)\n # render to display or array\n results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))\n\n return results\n\n def _make_receptor_locations(self, agent):\n \"\"\"Create receptor field locations in local coordinate frame.\n \"\"\"\n receptor_type = 'polar'\n range_min = 0.05 * 2.0\n range_max = 1.00\n dx = []\n # circular receptive field\n if receptor_type == 'polar':\n for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):\n for distance in np.linspace(range_min, range_max, 3):\n dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))\n # add origin\n dx.append(np.array([0.0, 0.0]))\n # grid receptive field\n if receptor_type == 'grid':\n for x in np.linspace(-range_max, +range_max, 5):\n for y in np.linspace(-range_max, +range_max, 5):\n dx.append(np.array([x,y]))\n return dx\n\n\nclass BatchMultiAgentEnv(gym.Env):\n \"\"\"Vectorized wrapper for a batch of multi-agent environments.\n assumes all environments have the same observation and action space\n \"\"\"\n metadata = {\n 'runtime.vectorized': True,\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, env_batch):\n self.env_batch = env_batch\n\n @property\n def n(self):\n return np.sum([env.n for env in self.env_batch])\n\n @property\n def action_space(self):\n return self.env_batch[0].action_space\n\n @property\n def observation_space(self):\n return self.env_batch[0].observation_space\n\n def step(self, action_n, time):\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n i = 0\n for env in self.env_batch:\n obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)\n i += env.n\n obs_n += obs\n # reward = [r / len(self.env_batch) for r in reward]\n reward_n += reward\n done_n += done\n return obs_n, reward_n, done_n, info_n\n\n def reset(self):\n obs_n = []\n for env in self.env_batch:\n obs_n += env.reset()\n return obs_n\n\n # render environment\n def render(self, mode='human', close=True):\n results_n = []\n for env in self.env_batch:\n results_n += env.render(mode, close)\n return results_n\n" ]
[ [ "numpy.linspace", "numpy.cos", "numpy.sin", "numpy.all", "numpy.argmax", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
artanzand/neural_style_transfer
[ "134ff775a706e1c08d836b43e11986b6f2d00543" ]
[ "stylize.py" ]
[ "# author: Artan Zandian\r\n# date: 2022-01-22\r\n\r\n\"\"\"\r\nReads two source images, one as the initial content image and second as the target style image,\r\nand applies Neural Style Transfer on the content image to create a stylized rendering of the content\r\nimage based on the texture and style of the style image.\r\nUsage: python stylize.py --content <content image> --style <style image> --save <save directory> --similarity <direction> --epochs <num_iter>\r\nOptions:\r\n--content=<image_path> file path of the content image - initial \r\n--style=<csv_path> file path of the style image - target\r\n--save=<save_path> file path to save the stylized image without image format\r\n--similarity=<direction> Whether the generated image is similar to \"content\", \"style\", \"balanced\"\r\n--epochs=<num_iter> number of epochs - 2,000 for speed, 10,000 for quality\r\n\"\"\"\r\n\r\n\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\nfrom docopt import docopt\r\n\r\nopt = docopt(__doc__)\r\n\r\n\r\ndef main(content, style, save, similarity=\"balanced\", epochs=500):\r\n \"\"\"\r\n The main function reads two source images, one as the initial content image\r\n and second as the target style image, and applies Neural Style Transfer on\r\n the content image to create a stylized rendering of the content image based on\r\n the texture and style of the style image.\r\n Parameters\r\n ----------\r\n content: str\r\n The image path to the content image to start from\r\n style: str\r\n The image path to the target style image\r\n save: str\r\n The path to save the image without image type\r\n similarity: str, optional\r\n whether the generate image is similar to 'content', 'style' or 'balanced'\r\n epochs: int, optional\r\n number of iterations to train the generate image.\r\n Returns\r\n -------\r\n image\r\n saved stylized image\r\n \"\"\"\r\n # Exception handelings\r\n try:\r\n type(int(epochs)) == int\r\n except Exception:\r\n raise (\"epochs should be an integer value!\")\r\n\r\n try:\r\n # Limit the image size to increase performance\r\n image_size = 400\r\n\r\n # capture content image size to reshape at end\r\n content_image = Image.open(content)\r\n content_width, content_height = content_image.size\r\n\r\n # Load pretrained VGG19 model\r\n vgg = tf.keras.applications.VGG19(\r\n include_top=False,\r\n input_shape=(image_size, image_size, 3),\r\n weights=\"imagenet\",\r\n )\r\n # Lock in the model weights\r\n vgg.trainable = False\r\n\r\n # Load Content and Style images\r\n content_image = preprocess_image(content, image_size)\r\n style_image = preprocess_image(style, image_size)\r\n\r\n # Randomly initialize Generated image\r\n # Define the generated image as as tensorflow variable to optimize\r\n generated_image = tf.Variable(\r\n tf.image.convert_image_dtype(content_image, tf.float32)\r\n )\r\n # Add random noise to initial generated image\r\n noise = tf.random.uniform(tf.shape(generated_image), -0.25, 0.25)\r\n generated_image = tf.add(generated_image, noise)\r\n generated_image = tf.clip_by_value(\r\n generated_image, clip_value_min=0.0, clip_value_max=1.0\r\n )\r\n\r\n # Define output layers\r\n style_layers = get_style_layers(similarity=similarity)\r\n content_layer = [(\"block5_conv4\", 1)] # The last layer of VGG19\r\n\r\n vgg_model_outputs = get_layer_outputs(vgg, style_layers + content_layer)\r\n\r\n # Content encoder\r\n # Define activation encoding for the content image (a_C)\r\n # Assign content image as the input of VGG19\r\n preprocessed_content = tf.Variable(\r\n tf.image.convert_image_dtype(content_image, tf.float32)\r\n )\r\n a_C = vgg_model_outputs(preprocessed_content)\r\n\r\n # Style encoder\r\n # Define activation encoding for the style image (a_S)\r\n # Assign style image as the input of VGG19\r\n preprocessed_style = tf.Variable(\r\n tf.image.convert_image_dtype(style_image, tf.float32)\r\n )\r\n a_S = vgg_model_outputs(preprocessed_style)\r\n\r\n # Initialize the optimizer\r\n optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\r\n # Need to redefine the clipped image as a tf.variable to be optimized\r\n generated_image = tf.Variable(generated_image)\r\n\r\n # Check if GPU is available\r\n print(\"Num GPUs Available: \", len(tf.config.list_physical_devices(\"GPU\")))\r\n\r\n # Train the model\r\n epochs = int(epochs)\r\n for i in range(epochs):\r\n train_step(\r\n generated_image, vgg_model_outputs, style_layers, optimizer, a_C, a_S\r\n )\r\n if i % 500 == 0:\r\n print(f\"Epoch {i} >>>\")\r\n\r\n # Resize to original size and save\r\n image = tensor_to_image(generated_image)\r\n image = image.resize((content_width, content_height))\r\n image.save(save + \".jpg\")\r\n print(\"Image saved.\")\r\n\r\n except Exception as message:\r\n print(message)\r\n\r\n\r\ndef get_layer_outputs(vgg, layer_names):\r\n \"\"\"\r\n Creates a vgg model that returns a list of intermediate output values.\r\n \"\"\"\r\n outputs = [vgg.get_layer(layer[0]).output for layer in layer_names]\r\n\r\n model = tf.keras.Model([vgg.input], outputs)\r\n return model\r\n\r\n\r\ndef get_style_layers(similarity=\"balanced\"):\r\n \"\"\"\r\n Assigns weights to style layer outputs to define whether the generated image\r\n is similar to \"content\", \"style\", or \"balanced\". The function is picking the\r\n last convolutional layer in each of the five blocks of the VGG network. The\r\n activations of each of these layers along with the content layer (last layer)\r\n will be the outputs of the neural style transfer network.\r\n Parameters\r\n ----------\r\n similarity: str, optional\r\n a string identifying the similarity to either content, style or both\r\n Returns\r\n -------\r\n style_layers\r\n a list of tuples identifying the name of style layer along with their weights\r\n \"\"\"\r\n if similarity == \"balanced\":\r\n style_layers = [\r\n (\"block1_conv1\", 0.2),\r\n (\"block2_conv1\", 0.2),\r\n (\"block3_conv1\", 0.2),\r\n (\"block4_conv1\", 0.2),\r\n (\"block5_conv1\", 0.2),\r\n ]\r\n elif similarity == \"content\":\r\n style_layers = [\r\n (\"block1_conv1\", 0.02),\r\n (\"block2_conv1\", 0.08),\r\n (\"block3_conv1\", 0.2),\r\n (\"block4_conv1\", 0.3),\r\n (\"block5_conv1\", 0.4),\r\n ]\r\n elif similarity == \"style\":\r\n style_layers = [\r\n (\"block1_conv1\", 0.4),\r\n (\"block2_conv1\", 0.3),\r\n (\"block3_conv1\", 0.2),\r\n (\"block4_conv1\", 0.08),\r\n (\"block5_conv1\", 0.02),\r\n ]\r\n else:\r\n raise Exception(\r\n \"Please provide either of 'content', 'style' or 'balanced' for --similarity\"\r\n )\r\n\r\n return style_layers\r\n\r\n\r\ndef preprocess_image(image_path, image_size):\r\n \"\"\"\r\n loads the image and makes it compatible with VGG input size\r\n Parameters\r\n ----------\r\n image_path: str\r\n directory path of the image\r\n Returns\r\n -------\r\n image\r\n loaded and standardaized image\r\n \"\"\"\r\n # Load and resize Content and Style images to a square image\r\n image = np.array(Image.open(image_path).resize((image_size, image_size)))\r\n # Add one dim for VGG compatibility\r\n image = tf.constant(np.reshape(image, ((1,) + image.shape)))\r\n\r\n return image\r\n\r\n\r\ndef tensor_to_image(tensor):\r\n \"\"\"\r\n Converts the calculated final vector into a PIL image\r\n Parameters\r\n ----------\r\n tensor: Tensor\r\n Returns\r\n -------\r\n Image\r\n A PIL image\r\n \"\"\"\r\n tensor = tensor * 255\r\n tensor = np.array(tensor, dtype=np.uint8)\r\n if np.ndim(tensor) > 3:\r\n tensor = tensor[0]\r\n return Image.fromarray(tensor)\r\n\r\n\r\[email protected]()\r\ndef train_step(generated_image, vgg_model_outputs, style_layers, optimizer, a_C, a_S):\r\n \"\"\"\r\n Uses precomputed encoded images a_S and a_C as constants, calculates\r\n a_G as the encoding of the newly generated image, and uses the three\r\n to compute the cost function, and respectively, one gradient step.\r\n Parameters\r\n ----------\r\n generated_image: tensor\r\n image in shape of a vector\r\n \"\"\"\r\n with tf.GradientTape() as tape:\r\n\r\n # a_G as the vgg_model_outputs for the current generated image\r\n a_G = vgg_model_outputs(generated_image)\r\n\r\n # Compute content cost\r\n J_content = compute_content_cost(a_C, a_G)\r\n\r\n # Compute style cost\r\n J_style = compute_style_cost(a_S, a_G, style_layers)\r\n\r\n # Compute total cost\r\n J = total_cost(J_content, J_style, alpha=10, beta=40)\r\n\r\n grad = tape.gradient(J, generated_image)\r\n\r\n optimizer.apply_gradients([(grad, generated_image)])\r\n generated_image.assign(\r\n tf.clip_by_value(generated_image, clip_value_min=0.0, clip_value_max=1.0)\r\n )\r\n\r\n\r\ndef compute_content_cost(content_output, generated_output):\r\n \"\"\"\r\n Computes the content cost.\r\n Parameters\r\n ----------\r\n a_C: tensor\r\n hidden layer activations representing content of the image C - dimension (1, n_H, n_W, n_C)\r\n a_G: tensor\r\n hidden layer activations representing content of the image G - dimension (1, n_H, n_W, n_C)\r\n Returns\r\n -------\r\n J_content: float64\r\n the content cost between a_C and a_G\r\n \"\"\"\r\n # Exclude the last layer output\r\n a_C = content_output[-1]\r\n a_G = generated_output[-1]\r\n\r\n # Retrieve dimensions from a_G\r\n _, n_H, n_W, n_C = a_G.get_shape().as_list()\r\n\r\n # Reshape a_C and a_G\r\n a_C_unrolled = tf.reshape(a_C, shape=(1, -1, n_C))\r\n a_G_unrolled = tf.reshape(a_G, shape=(1, -1, n_C))\r\n\r\n # compute the cost with tensorflow\r\n J_content = (1 / (4 * n_C * n_H * n_W)) * tf.reduce_sum(\r\n tf.square(tf.subtract(a_C_unrolled, a_G_unrolled))\r\n )\r\n\r\n return J_content\r\n\r\n\r\ndef compute_layer_style_cost(a_S, a_G):\r\n \"\"\"\r\n Computes the style cost of one layer.\r\n Parameters\r\n ----------\r\n a_C: tensor\r\n hidden layer activations representing content of the image C - dimension (1, n_H, n_W, n_C)\r\n a_G: tensor\r\n hidden layer activations representing content of the image G - dimension (1, n_H, n_W, n_C)\r\n Returns\r\n -------\r\n J_style_layer\r\n A scalar value representing style cost for a layer\r\n \"\"\"\r\n\r\n # Retrieve dimensions from a_G\r\n _, n_H, n_W, n_C = a_G.get_shape().as_list()\r\n\r\n # Reshape the images from (1, n_H, n_W, n_C) to have them of shape (n_C, n_H * n_W)\r\n a_S = tf.reshape(tf.transpose(a_S, perm=[3, 0, 1, 2]), shape=(n_C, -1))\r\n a_G = tf.reshape(tf.transpose(a_G, perm=[3, 0, 1, 2]), shape=(n_C, -1))\r\n\r\n # Computing gram_matrices for both images S and G\r\n GS = tf.matmul(a_S, tf.transpose(a_S))\r\n GG = tf.matmul(a_G, tf.transpose(a_G))\r\n\r\n # Computing the loss\r\n J_style_layer = (1 / (2 * n_C * n_H * n_W) ** 2) * tf.reduce_sum(\r\n tf.square(tf.subtract(GS, GG))\r\n )\r\n\r\n return J_style_layer\r\n\r\n\r\ndef compute_style_cost(style_image_output, generated_image_output, style_layers):\r\n \"\"\"\r\n Computes the overall style cost from several chosen layers\r\n Parameters\r\n ----------\r\n style_image_output: tensor\r\n output of VGG model for the style image (activations of style layers & content layer)\r\n generated_image_output: tensor\r\n output of VGG model for the generated image (activations of style layers & content layer)\r\n style_layers : list of tuples\r\n containing the names of the layers we would like to extract style from and a coefficient for each of them\r\n Returns\r\n -------\r\n J_style\r\n A scalar value representing style cost\r\n \"\"\"\r\n\r\n # initialize the cost\r\n J_style = 0\r\n\r\n # Excluding the last element of the array which contains the content layer image\r\n a_S = style_image_output[:-1] # a_S is the hidden layer activations\r\n a_G = generated_image_output[:-1] # a_G is the hidden layer activations\r\n\r\n for i, weight in zip(range(len(a_S)), style_layers):\r\n # Compute style_cost for the current layer\r\n J_style_layer = compute_layer_style_cost(a_S[i], a_G[i])\r\n\r\n # Add weight * J_style_layer of this layer to overall style cost\r\n J_style += weight[1] * J_style_layer\r\n\r\n return J_style\r\n\r\n\r\[email protected]()\r\ndef total_cost(J_content, J_style, alpha=10, beta=40):\r\n \"\"\"\r\n Computes the total cost function. Because the main purpose of the algorithm\r\n is on matching the style of a target photo a bigger weight (beta) is given to\r\n the style image.\r\n Parameters\r\n ----------\r\n J_content: float\r\n content cost computed in compute_content_cost\r\n J_style: float\r\n style cost computed in compute_style_cost\r\n alpha: float\r\n hyperparameter weighting the importance of the content cost\r\n beta: float\r\n hyperparameter weighting the importance of the style cost\r\n Returns\r\n -------\r\n J\r\n total cost\r\n \"\"\"\r\n J = alpha * J_content + beta * J_style\r\n\r\n return J\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(\r\n opt[\"--content\"],\r\n opt[\"--style\"],\r\n opt[\"--save\"],\r\n opt[\"--similarity\"],\r\n opt[\"--epochs\"],\r\n )\r\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.transpose", "tensorflow.Variable", "tensorflow.shape", "numpy.reshape", "tensorflow.reshape", "tensorflow.keras.applications.VGG19", "tensorflow.keras.Model", "numpy.ndim", "tensorflow.subtract", "tensorflow.function", "tensorflow.add", "tensorflow.keras.optimizers.Adam", "tensorflow.image.convert_image_dtype", "tensorflow.config.list_physical_devices", "numpy.array", "tensorflow.GradientTape" ] ]
oreh/gseapy
[ "d3212afb2e8d61f37957d685da6ef28f723d98e6", "d3212afb2e8d61f37957d685da6ef28f723d98e6" ]
[ "gseapy/gsea_plot.py", "gseapy/gsea.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nfrom matplotlib.colors import Normalize\n\n\n\nclass _MidpointNormalize(Normalize):\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n # I'm ignoring masked values and all kinds of edge cases to make a\n # simple example...\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y))\n\n\n\n\n\n\ndef gsea_plot(rank_metric, enrich_term, hit_ind, nes, pval, fdr, RES,\n phenoPos=None, phenoNeg=None, figsize =(6.5,6), **kwarg):\n \"\"\"This is the main function for reproducing the gsea plot.\n \n :param rank_metric: rankings, rank_metric['rank'].values.\n :param enrich_term: gene_set name\n :param hit_ind: hit indexs of rank_metric['gene_name'] presented in gene set S.\n :param nes: Normalized enrichment scores.\n :param pval: nominal p-value.\n :param fdr: false discoveray rate.\n :param RES: ranking enrichment scores of all genes in rank_metric['gene_name'].\n :param phenoPos: phenotype lable, positive correlated.\n :param phenoNeg: phenotype lable, negative correlated.\n :param figsize: matplotlib figsize.\n :return: fig object of gsea plot.\n \"\"\" \n \n # center color map at midpoint = 0\n norm = _MidpointNormalize(midpoint=0)\n \n #dataFrame of ranked matrix scores \n x = rank_metric.index.values \n #figsize = (6,6)\n phenoP_label = phenoPos + ' (Positively Correlated)'\n phenoN_label = phenoNeg + ' (Negatively Correlated)'\n zero_score_ind = np.abs(rank_metric['rank']).argmin()\n z_score_label = 'Zero score at ' + str(zero_score_ind)\n nes_label = 'NES: '+ \"{:.3f}\".format(float(nes))\n pval_label = 'Pval: '+ \"{:.3f}\".format(float(pval))\n fdr_label = 'FDR: '+ \"{:.3f}\".format(float(fdr)) \n im_matrix = rank_metric.ix[:,1:].T\n\n #in most case, we will have mangy plots, so do not display plots\n #It's also convinient to run this script on command line. \n plt.ioff() \n #GSEA Plots\n gs = plt.GridSpec(16,1)\n fig = plt.figure(figsize=figsize)\n #Ranked Metric Scores Plot\n ax1 = fig.add_subplot(gs[11:])\n ax1.fill_between(x, y1= rank_metric['rank'], y2=0, color='#C9D3DB')\n ax1.set_ylabel(\"Ranked list metric\",fontsize=14) \n ax1.text(.05, .9, phenoP_label, color='red', horizontalalignment='left', verticalalignment='top',\n transform=ax1.transAxes)\n ax1.text(.95, .05, phenoN_label, color='Blue', horizontalalignment='right', verticalalignment='bottom',\n transform=ax1.transAxes)\n\n # the x coords of this transformation are data, and the y coord are axes\n trans1 = transforms.blended_transform_factory(ax1.transData, ax1.transAxes)\n ax1.vlines(zero_score_ind, 0, 1, linewidth=.5, transform=trans1, linestyles='--', color='grey')\n ax1.text(zero_score_ind, 0.5, z_score_label, horizontalalignment='center', verticalalignment='center',\n transform=trans1) \n ax1.set_xlabel(\"Rank in Ordered Dataset\", fontsize=14)\n ax1.spines['top'].set_visible(False)\n ax1.tick_params(axis='both', which='both', top='off', right='off', left='off')\n ax1.locator_params(axis='y', nbins=5) \n ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc) ))\n \n # use round method to control float number\n #ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : round(tick_loc, 1) ))\n \n #gene hits\n ax2 = fig.add_subplot(gs[8:10], sharex=ax1)\n\n # the x coords of this transformation are data, and the y coord are axes\n trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)\n ax2.vlines(hit_ind, 0, 1,linewidth=.5,transform=trans2)\n ax2.spines['bottom'].set_visible(False)\n ax2.tick_params(axis='both', which='both', bottom='off', top='off', \n labelbottom='off', right='off', left='off',labelleft='off')\n #colormap\n ax3 = fig.add_subplot(gs[10],sharex=ax1)\n ax3.imshow(im_matrix, aspect='auto', norm=norm, cmap=plt.cm.seismic, interpolation='none') # cm.coolwarm\n ax3.spines['bottom'].set_visible(False)\n ax3.tick_params(axis='both', which='both', bottom='off', top='off', \n labelbottom='off', right='off', left='off',labelleft='off')\n\n # Enrichment score plot\n ax4 = fig.add_subplot(gs[:8],sharex=ax1)\n ax4.plot(x,RES,linewidth=4,color ='#88C544')\n ax4.text(.1, .1, fdr_label, transform=ax4.transAxes)\n ax4.text(.1, .2, pval_label, transform=ax4.transAxes)\n ax4.text(.1, .3, nes_label, transform=ax4.transAxes)\n\n # the y coords of this transformation are data, and the x coord are axes\n trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)\n ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')\n ax4.set_ylabel(\"Enrichment score (ES)\", fontsize=14)\n ax4.set_xlim(min(x), max(x))\n ax4.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off')\n ax4.locator_params(axis='y', nbins=5)\n # FuncFormatter need two argment, I don't know why. this lambda function used to format yaxis tick labels.\n ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )\n \n #fig adjustment\n fig.suptitle(enrich_term, fontsize=16)\n fig.subplots_adjust(hspace=0)\n #fig.tight_layout()\n plt.close(fig)\n \n return fig\n \n", "#! python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division\n\nimport os\nimport sys\nimport time\n\n\nfrom .parser import gsea_edb_parser,gsea_rank_metric,gsea_gmt_parser,gsea_cls_parser\nfrom .algorithm import enrichment_score,gsea_compute,preprocess,ranking_metric\nfrom .gsea_plot import gsea_plot\nfrom collections import OrderedDict\n\nimport pandas as pd\n\ndef replot(indir,outdir='gseapy_out', weight=1,figsize=[6.5,6], format='pdf',min_size=3, max_size=5000):\n \"\"\"The main fuction to run inside python.\n \n :param indir: GSEA desktop results directory. In the sub folder, you must contain edb file foder. \n :param outdir: Output directory.\n :param weight: weighted score type. choose from {0,1,1.5,2}. Default: 1.\n :param figsize: matplotlib output figure figsize. Defult: [6.5,6].\n :param format: matplotlib output figure format. Default: 'pdf'.\n :param min_size: min size of input genes presented in Gene Sets. Default: 3.\n :param max_size: max size of input genes presented in Gene Sets. Default: 5000.\n you will not encourage to use min_size, or max_size argment in :func:`replot` function.\n Because gmt file has already been filter.\n \n :return: Generate new figures with seleted figure format. Default: 'pdf'. \n \"\"\"\n import glob\n from bs4 import BeautifulSoup \n #parsing files....... \n results_path = glob.glob(indir+'*/edb/results.edb')[0]\n rank_path = glob.glob(indir+'*/edb/*.rnk')[0]\n gene_set_path = glob.glob(indir+'*/edb/gene_sets.gmt')[0]\n cls_path = glob.glob(indir+'*/edb/*.cls')[0]\n file_list = [results_path, rank_path, gene_set_path, cls_path] \n for file in file_list: \n if not os.path.isfile(file):\n print(\"Incorrect Input %s !\" %file)\n sys.exit(1) \n #extract sample names from .cls file\n phenoPos, phenoNeg, classes = gsea_cls_parser(cls_path) \n #obtain gene sets\n gene_set_dict = gsea_gmt_parser(gene_set_path, min_size=min_size, max_size=max_size)\n #obtain rank_metrics\n rank_metric = gsea_rank_metric(rank_path)\n correl_vector = rank_metric['rank'].values \n gene_list = rank_metric['gene_name']\n #extract each enriment term in the results.edb files and plot.\n database = BeautifulSoup(open(results_path),features='xml')\n length = len(database.findAll('DTG'))\n os.system(\"mkdir \"+ outdir)\n\n for idx in range(length):\n #extract statistical resutls from results.edb file\n enrich_term, hit_ind, nes, pval, fdr= gsea_edb_parser(results_path, index=idx)\n gene_set = gene_set_dict.get(enrich_term)\n #calculate enrichment score \n RES = enrichment_score(gene_list=gene_list, gene_set=gene_set, weighted_score_type=weight, \n correl_vector=correl_vector)[2]\n #plotting\n fig = gsea_plot(rank_metric, enrich_term,hit_ind, nes, pval,\n fdr, RES, phenoPos, phenoNeg, figsize=figsize) \n fig.savefig('{a}/{b}.{c}'.format(a=outdir, b=enrich_term, c=format), dpi=300,)\n \n print(\"Congratulations! Your plots have been reproduced successfully!\")\n\ndef call(data, gene_sets, cls, outdir='gseapy_out', min_size=15, max_size=1000, permutation_n=1000, weighted_score_type=1,\n permutation_type='gene_set', method='log2_ratio_of_classes', ascending=False, figsize=[6.5,6], format='pdf', \n graph_num=20, seed=None):\n \"\"\" Run Gene Set Enrichment Analysis.\n\n :param data: Gene expression data table. \n :param gene_sets: Gene sets file. e.g. gmt files. Same input with GSEA.\n :param permutation_n: Number of permutations for significance computation. Default: 1000.\n :param permutation_type: Permutation type, \"phenotype\" (default) for phenotypes, \"gene_set\" for genes.\n :param int min_size: Minimum allowed number of genes from gene set also the data set. Defaut: 15.\n :param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 15 and 1000.\n :param weighted_score_type: refer to :func:`algorithm.enrichment_socre`. Default:1.\n :param method: ranking metric method, refer to :func:`algorithm.ranking_metric`.\n :param ascending: sorting order of rankings. Default: False.\n :param outdir: results output directory.\n :param figsize: matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].\n :param format: matplotlib figure format. Default: pdf.\n :param graph_num: plot graphs for top sets of each phenotype\n :param seed: random seed. expect an interger. Defalut:None.\n :return: Generate ``GSEA`` plots and store a dictionary into csv file,\n where dictionary key is a gene set and values are::\n\n | {es: enrichment score, \n | nes: normalized enrichment score, \n | p: P-value, \n | fdr: FDR, \n | size: gene set size,\n | matched_size: genes matched to the data, \n | genes: gene names from the data set}\n \n \"\"\"\n assert permutation_type in [\"phenotype\", \"gene_set\"]\n df = pd.read_table(data) \n assert len(df) > 1 \n \n #select correct expression genes and values.\n dat = preprocess(df)\n \n # phenotype labels parsing\n phenoPos, phenoNeg, classes = gsea_cls_parser(cls)\n \n #ranking metrics calculation. \n dat2 = ranking_metric(df=dat, method=method, phenoPos=phenoPos, phenoNeg=phenoNeg, classes=classes, ascending=ascending)\n \n #filtering out gene sets and build gene sets dictionary\n gmt = gsea_gmt_parser(gene_sets, min_size=min_size, max_size=max_size,gene_list=dat2['gene_name'].values)\n \n #compute ES, NES, pval, FDR, RES\n results,hit_ind,rank_ES, subsets = gsea_compute(data=dat, n=permutation_n,gmt=gmt, weighted_score_type=weighted_score_type,\n permutation_type=permutation_type, method=method,\n phenoPos=phenoPos, phenoNeg=phenoNeg, classes=classes, ascending=ascending,\n seed=seed)\n \n \n os.system(\"mkdir \"+ outdir)\n res = OrderedDict()\n for gs, gseale,ind,RES in zip(subsets, list(results), hit_ind, rank_ES): \n rdict = OrderedDict() \n rdict['es'] = gseale[0]\n rdict['nes'] = gseale[1]\n rdict['pval'] = gseale[2]\n rdict['fdr'] = gseale[3]\n rdict['gene_set_size'] = len(gmt[gs])\n rdict['matched_size'] = len(ind)\n rdict['rank_ES'] = RES\n rdict['genes'] = dat.iloc[ind].index.tolist()\n rdict['hit_index'] = ind\n res[gs] = rdict \n \n res_df = pd.DataFrame.from_dict(res,orient='index')\n res_df.index.name = 'Enrich_terms'\n #res_df = res_df[['es','nes','pval','fdr','gene_set_size','matched_size','rank_ES','genes']]\n res_df.sort_values(by='fdr', inplace=True)\n res_final = res_df.head(graph_num)\n res_final.to_csv('{a}/{b}.csv'.format(a=outdir, b='gseapy_reports'), float_format ='%.7f')\n \n print(\"Start to generate gseapy reports, and produce figures.......\", time.ctime())\n #Plotting\n for gs in res_df.index.values:\n fig = gsea_plot(rank_metric=dat2, enrich_term=gs, hit_ind=res.get(gs)['hit_index'],\n nes=res.get(gs)['nes'], pval=res.get(gs)['pval'], fdr=res.get(gs)['fdr'], \n RES=res.get(gs)['rank_ES'], phenoPos=phenoPos, phenoNeg=phenoNeg, figsize=figsize) \n fig.savefig('{a}/{b}.{c}'.format(a=outdir, b=gs, c=format), dpi=300,)\n \n #print(res_df.head(10))\n print(\"...Congratulations. GSEAPY run successfully!!!.............\\n...The Job is done...........................Goodbye!\")\n \n return \n\ndef prerank(rnk, gene_sets, outdir='gseapy_out', pheno_pos='Postive', pheno_neg='Negative',\n min_size=15, max_size=1000, permutation_n=1000, weighted_score_type=1,\n ascending=False, figsize=[6.5,6], format='pdf',graph_num=20, seed=None):\n \"\"\" Run Gene Set Enrichment Analysis with pre-ranked correlation defined by user.\n\n :param rnk: pre-ranked correlation table, Same input with ``GSEA`` .rnk file. \n :param gene_sets: Gene sets file. e.g. gmt files. Same input with GSEA.\n :param outdir: results output directory.\n :param permutation_n: Number of permutations for significance computation. Default: 1000.\n :param int min_size: Minimum allowed number of genes from gene set also the data set. Defaut: 15.\n :param int max_size: Maximum allowed number of genes from gene set also the data set. Defaults: 15 and 1000.\n :param weighted_score_type: refer to :func:`algorithm.enrichment_socre`. Default:1.\n :param ascending: sorting order of rankings. Default: False.\n :param figsize: matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].\n :param format: matplotlib figure format. Default: pdf.\n :param graph_num: plot graphs for top sets of each phenotype\n :param seed: random seed. expect an interger. Defalut:None. \n :return: Generate ``GSEA`` plots and store a dictionary into csv file,\n where dictionary key is a gene set and values are::\n\n | {es: enrichment score, \n | nes: normalized enrichment score, \n | p: P-value, \n | fdr: FDR, \n | size: gene set size,\n | matched_size: genes matched to the data, \n | genes: gene names from the data set}\n \n \"\"\"\n #drop duplicates in ranking metrics.\n dat2 = gsea_rank_metric(rnk) \n dat2.drop_duplicates(subset='gene_name',inplace=True,keep='first')\n assert len(dat2) > 1 \n \n #filtering out gene sets and build gene sets dictionary\n gmt = gsea_gmt_parser(gene_sets, min_size=min_size, max_size=max_size, gene_list=dat2['gene_name'].values)\n \n #compute ES, NES, pval, FDR, RES\n results,hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=permutation_n, gmt=gmt, weighted_score_type=weighted_score_type,\n permutation_type='gene_set', method=None, phenoPos=pheno_pos, phenoNeg=pheno_neg,\n classes=None, ascending=ascending, seed=seed, prerank=True)\n \n print(\"Start to generate gseapy reports, and produce figures...\", time.ctime())\n os.system(\"mkdir \"+ outdir)\n res = OrderedDict()\n for gs,gseale,ind,RES in zip(subsets, list(results), hit_ind, rank_ES): \n rdict = OrderedDict() \n rdict['es'] = gseale[0]\n rdict['nes'] = gseale[1]\n rdict['pval'] = gseale[2]\n rdict['fdr'] = gseale[3]\n rdict['gene_set_size'] = len(gmt[gs])\n rdict['matched_size'] = len(ind)\n rdict['rank_ES'] = RES\n rdict['genes'] = dat2.ix[ind,'gene_name'].tolist()\n rdict['hit_index'] = ind\n res[gs] = rdict \n\n\n res_df = pd.DataFrame.from_dict(res, orient='index')\n res_df.index.name = 'Enrich_terms'\n res_df.sort_values(by='fdr', inplace=True)\n #res_df = res_df[['es','nes','pval','fdr','gene_set_size','matched_size','rank_ES','genes']]\n res_final = res_df.head(graph_num)\n res_final.to_csv('{a}/{b}.csv'.format(a=outdir, b='gseapy_reports'), float_format ='%.7f')\n\n for gs in res_df.index.values:\n fig = gsea_plot(rank_metric=dat2, enrich_term=gs, hit_ind=res.get(gs)['hit_index'],\n nes=res.get(gs)['nes'], pval=res.get(gs)['pval'], fdr=res.get(gs)['fdr'], \n RES=res.get(gs)['rank_ES'], phenoPos=pheno_pos, phenoNeg=pheno_neg, figsize=figsize) \n fig.savefig('{a}/{b}.{c}'.format(a=outdir, b=gs, c=format), dpi=300,)\n\n\n\n print(\"Congratulations. GSEAPY run successfully................\")\n print(\"The Job is done.................................Goodbye!\", time.ctime())\n \n return \n" ]
[ [ "numpy.abs", "matplotlib.colors.Normalize.__init__", "matplotlib.pyplot.ioff", "numpy.interp", "matplotlib.pyplot.close", "matplotlib.pyplot.GridSpec", "matplotlib.transforms.blended_transform_factory", "matplotlib.pyplot.figure" ], [ "pandas.read_table", "pandas.DataFrame.from_dict" ] ]
TangleSpace/hotstepper
[ "4d8a278d94f19fee2bc4d3ba25628fa69ed3653d" ]
[ "hotstepper/mixins/operations.py" ]
[ "import numpy as np\nfrom hotstepper.core.data_model import DataModel\nfrom hotstepper.utilities.helpers import get_epoch_start\n\n\ndef apply_math_function(caller,other,math_function, sample_points=None):\n \"\"\"\n Apply the supplied function to two objects evaluated at the union of all their unique step keys.\n\n For example, math_function = numpy.multiply will multiple the step values from each steps object together at each of the step keys that forms the union set of all step keys.\n Simply, a list of unique step keys will be generated based on those from each steps object and the provided function will be applied across all steps values at each of those keys to generate a new steps object.\n\n If the second argument is a scalar, this value will be broadcast to match the number of step keys in the longest step or steps object.\n\n Parameters\n ===========\n caller : Step, Steps\n The parent object with step values to perform the math operation on\n\n other : int, float, Step, Steps\n Steps or scalar value to be combined with the caller object values evaluated at the common union of step keys using the provided math_function.\n\n math_function : math_like function, e.g. numpy.add, operator.__mul__\n A binary math function that accepts two arguments and returns an array the same length as the longest input, e.g +,-,*,/, np.add, np.multiply etc.\n\n sample_points : array_like of int,float or datetime_like, Optional\n Specifiy the specific points the math_function function is to be evaluated across all provided step functions.\n\n Returns\n ==========\n Steps\n A new steps object containing the result of the function application across the provided objects.\n\n See Also\n ============\n filter_values\n apply_reduction_function\n\n Examples\n ==========\n\n \"\"\"\n\n return _apply_aggreduce_function(\n steps_to_combine=[caller,other],\n agg_reduce_function=math_function,\n sample_points=sample_points,\n is_agg_function=True\n )\n\n\ndef apply_reduction_function(steps_to_combine,reduction_function,sample_points=None):\n \"\"\"\n Apply the supplied function to all provided objects evaluated at the union of all their unique step keys.\n\n For example, reduction_function = numpy.mean will find the mean across all objects evaluated at each step key that is from the union set of all keys.\n Simply, a list of unique step keys will be generated based on those from each steps object and the provided function will be applied across all steps values at each of those keys to generate a new steps object.\n\n If the second argument is a scalar, this value will be broadcast to match the number of step keys in the longest step or steps object.\n\n Parameters\n ===========\n steps_to_combine : int, float, Step, Steps\n Objects and/or numbers to apply the reduction function at each of the unique keys.\n\n reduction_function : math_like function\n A reduction function that returns a scalar for each input array, e.g mean, variance, np.mean, np.std, np.median etc.\n\n sample_points: array_like of int,float or datetime_like, Optional\n Specifiy the specific points the reduction_function function is to be evaluated across all provided step functions.\n\n Returns\n ==========\n Steps\n A new steps object containing the result of the function application across the provided objects.\n\n See Also\n ============\n filter_values\n apply_math_function\n\n Examples\n ==========\n\n \"\"\"\n\n return _apply_aggreduce_function(\n steps_to_combine=steps_to_combine,\n agg_reduce_function=reduction_function,\n sample_points=sample_points,\n is_agg_function=False\n )\n\n\ndef _apply_aggreduce_function(steps_to_combine,agg_reduce_function,sample_points=None, is_agg_function=True):\n \"\"\"\n Apply the supplied function to all provided objects evaluated at the union of their unique step keys.\n\n For example, aggregation_function = numpy.mean will find the mean across all objects evaluated at each step key that is from the union set of all keys from each steps function.\n Simply, a list of unique step keys will be generated based on those from each steps object and the provided function will be applied across all steps values at each of those keys to generate a new steps object.\n\n If the second argument is a scalar, this value will be broadcast to match the number of step keys in the longest step or steps object.\n\n Parameters\n ===========\n steps_to_combine : int, float, Step, Steps\n Any value to compare each step component against.\n\n agg_reduce_function : math_like function, e.g. aggreation functions like numpy.add, operator.__mul__ or reduction functions like mean, std etc.\n A reduction function that returns a scalar for each input array, e.g mean, variance, np.mean, np.std, np.median etc.\n\n sample_points : array_like of int,float or datetime_like, Optional\n Specifiy the specific points the agg_reduce_function function is to be evaluated across all provided step functions.\n\n is_agg_function : bool, Optional\n Flag to indicate if the is_agg_function is either an aggregation type such as the mathematical operations +,-,/,* or a reduction type such as mean, max, median.\n\n Returns\n ==========\n Steps\n A new steps object containing the result of the function application across the provided objects.\n\n See Also\n ============\n filter_values\n apply_reduction_function\n apply_math_function\n\n Examples\n ==========\n\n \"\"\"\n \n #used to check if objects are implementing the AbstractSteps interface\n ty = type(steps_to_combine[0])\n base_parent = ty.__base__\n\n if sample_points is None:\n keys = np.sort(np.unique(np.concatenate([s.step_keys() for s in steps_to_combine if isinstance(s,base_parent)])))\n else:\n keys = sample_points\n\n #to handle int float as well as AbstractSteps in one go\n get_stack_value = lambda x: x.step(keys,False) if isinstance(x,base_parent) else np.full(len(keys),x)\n stack = np.array([get_stack_value(s) for s in steps_to_combine])\n\n if is_agg_function:\n result = np.diff(agg_reduce_function(*stack),prepend=0)\n else:\n result = np.diff(agg_reduce_function(stack,axis=0),prepend=0)\n \n step_data = np.empty((keys.shape[0],3))\n step_data[:,DataModel.START.value] = keys\n step_data[:,DataModel.DIRECTION.value] = 1\n step_data[:,DataModel.WEIGHT.value] = result\n\n #filter out values that create issues\n step_data = step_data[~np.isnan(step_data[:,DataModel.WEIGHT.value])]\n step_data = step_data[step_data[:,DataModel.WEIGHT.value]!=0]\n step_data = step_data[step_data[:,DataModel.WEIGHT.value]!=np.PINF]\n step_data = step_data[step_data[:,DataModel.WEIGHT.value]!=np.NINF]\n \n #promote the Steps key type if any of the steps to combine are using datetime\n any_using_datetime = (np.array([s.using_datetime() for s in steps_to_combine if isinstance(s,base_parent)])==True).any()\n\n ty = type(steps_to_combine[0])\n result_step = ty(use_datetime=any_using_datetime,basis=steps_to_combine[0].basis())\n\n if step_data.shape[0] > 0:\n return result_step.add_steps(step_data)\n else:\n return result_step\n\n\ndef filter_values(caller,other, operation_func, normalise_value = 0):\n \"\"\"\n This function is used to create a filtered version of the steps by removing steps not evaluating to true from applying the comparison function at all step change locations.\n\n Parameters\n ===========\n other : int, float\n Any value to compare each steps value against.\n\n operation_func : binary boolean function\n A binary comparison function that returns a bool, e.g >,<,==.\n\n normalise_value: int, float, Optional\n A value to assign at the step keys that are included in the return object. If a value of zero is used, the return object will have the value of the step function between the included step keys.\n\n Returns\n ==========\n Steps\n A new steps object containing the result of the function application across the provided objects.\n\n See Also\n ============\n apply_reduction_function\n apply_math_function\n\n \"\"\"\n\n if type(other) in [float,int]:\n\n caller_step_data = caller.steps()\n mask = np.where(operation_func(caller_step_data[:,DataModel.WEIGHT.value],other), True,False)\n\n if np.alltrue(mask):\n if normalise_value == 0:\n return caller\n else:\n ty = type(caller)\n return ty(use_datetime=caller.using_datetime(),\n basis=caller.basis(),\n start=caller.first(),\n end=caller.last(),\n weight=normalise_value\n )\n\n new_steps = _filter_by_mask(caller_step_data,mask,normalise_value)\n else:\n caller_step_data = caller.steps()\n other_step_values = other(caller.step_keys())\n\n mask = np.where(operation_func(caller_step_data[:,DataModel.WEIGHT.value],other_step_values), True,False)\n\n if np.alltrue(mask):\n if normalise_value == 0:\n return caller\n else:\n ty = type(caller)\n return ty(use_datetime=caller.using_datetime(),\n basis=caller.basis(),\n start=caller.first(),\n end=caller.last(),\n weight=normalise_value\n )\n \n new_steps = _filter_by_mask(caller_step_data,mask,normalise_value)\n\n #we have the data, now return an object matching the caller, something that implements the AbstractSteps interface\n ty = type(caller)\n result_step = ty(use_datetime=caller.using_datetime(),basis=caller.basis())\n\n if len(new_steps) > 0:\n return result_step.add_steps(np.array(new_steps))\n else:\n return result_step\n\n\ndef _filter_by_mask(step_data,mask,normalise_value = 0):\n\n if np.alltrue(mask):\n return step_data\n\n new_steps = []\n\n st = None\n adj = 0\n for i ,s in enumerate(step_data[:,DataModel.START.value]):\n if mask[i]:\n if st is None:\n st = i\n if normalise_value == 0:\n new_steps.append([s,1,step_data[i,DataModel.WEIGHT.value]])\n else:\n new_steps.append([s,1,normalise_value])\n elif st is not None and (i > st) and normalise_value == 0:\n new_steps.append([s,1,step_data[i,DataModel.DIRECTION.value]])\n adj += step_data[i,DataModel.DIRECTION.value]\n else:\n if st is not None and st != get_epoch_start(False):\n if normalise_value == 0:\n new_steps.append([s,1,-1*(step_data[st,DataModel.WEIGHT.value] + adj)])\n adj = 0\n else:\n new_steps.append([s,1,-1*normalise_value])\n st = None\n \n return new_steps\n" ]
[ [ "numpy.isnan", "numpy.array", "numpy.alltrue", "numpy.empty" ] ]
cyberflax2020/21-S1-2-C-Cinema-Code
[ "6c3358168996529cbb0745a7c3f5aa257d790360" ]
[ "Build_Body_Samples.py" ]
[ "import csv\nimport numpy as np\nimport mediapipe as mp\nimport cv2\n\nclass_name = \"Speaking\"\n\nmp_drawing = mp.solutions.drawing_utils # Drawing helpers\nmp_holistic = mp.solutions.holistic # Mediapipe Solutions\n\nstr_source = input(\"dir:\")\ncap = cv2.VideoCapture(str_source)\n# Initiate holistic model\nwith mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n while cap.isOpened():\n ret, frame = cap.read()\n\n # Recolor Feed\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n\n # Make Detections\n results = holistic.process(image)\n\n # Recolor image back to BGR for rendering\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n # 1. Draw face landmarks\n mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(80, 110, 10), thickness=1, circle_radius=1),\n mp_drawing.DrawingSpec(color=(80, 256, 121), thickness=1, circle_radius=1)\n )\n\n # 2. Right hand\n mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(80, 22, 10), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(80, 44, 121), thickness=2, circle_radius=2)\n )\n\n # 3. Left Hand\n mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(121, 44, 250), thickness=2, circle_radius=2)\n )\n\n # 4. Pose Detections\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)\n )\n # Export coordinates\n try:\n # Extract Pose landmarks\n pose = results.pose_landmarks.landmark\n pose_row = list(\n np.array([[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in pose]).flatten())\n\n row = pose_row\n\n # Append class name\n row.insert(0, class_name)\n\n # Export to CSV\n with open('body_coords.csv', mode='a', newline='') as f:\n csv_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(row)\n\n except:\n pass\n\n cv2.imshow('Video', image)\n\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.array" ] ]
wdxtub/deep-learning-note
[ "47b83a039b80d4757e0436d5cbd2fa3037de3904", "47b83a039b80d4757e0436d5cbd2fa3037de3904" ]
[ "mlds/1-numpy/4_numpy_100.py", "cs20/16_basic_kernels.py" ]
[ "import numpy as np\nimport time\n\nprint('1. 创建大小为 10 的空向量')\na = np.zeros(10)\nprint(a)\n\nprint('2. 查看矩阵占据的内存大小')\nprint('用元素个数乘以每个元素的大小')\nprint(f'占据 {a.size * a.itemsize} 字节')\n\nprint('3. 创建一个向量,值从 10 到 49')\na = np.arange(10, 50)\nprint(a)\n\nprint('4. 翻转一个向量')\na = a[::-1]\nprint(a)\n\nprint('5. 创建一个 3x3 的矩阵,值从 0 到 8')\na = np.arange(9).reshape(3,3)\nprint(a)\n\nprint('6. 从 [1, 2, 0, 0, 4, 0] 中寻找非零元素索引')\nnz = np.nonzero([1, 2, 0, 0, 4, 0])\nprint(nz)\n\nprint('7. 创建 3x3 单位矩阵(对角线元素为 1 的方阵)')\na = np.eye(3)\nprint(a)\n\nprint('8. 创建一个 3x3x3 的随机矩阵')\na = np.random.random((3, 3, 3))\nprint(a)\n\nprint('9. 创建一个 10x10 的矩阵并寻找最大最小值')\na = np.random.random((10, 10))\na_min, a_max = a.min(), a.max()\nprint('min', a_min, ', max', a_max)\n\nprint('10. 创建一个长度为 30 的向量,并求均值')\na = np.random.random(30)\nprint('mean', a.mean())\n\nprint('11. 创建一个边界为 1 其他为 0 的二维矩阵')\na = np.ones((10, 10))\na[1:-1,1:-1] = 0\nprint(a)\n\nprint('12. 为已经存在的矩阵填充 0 的边界')\na = np.ones((5, 5))\nprint(a)\na = np.pad(a, pad_width=1, mode='constant', constant_values=0)\nprint(a)\n\nprint('13. 给出下列计算的结果')\nprint('0 * np.nan =', 0 * np.nan)\nprint('np.nan == np.nan =', np.nan == np.nan)\nprint('np.inf > np.nan =', np.inf > np.nan)\nprint('np.nan - np.nan =', np.nan - np.nan)\nprint('np.nan in set([np.nan]) =', np.nan in set([np.nan]))\nprint('0.3 == 3 * 0.1 =', 0.3 == 3 * 0.1)\n\nprint('14. 创建一个 5x5 的矩阵,对角线下的数值为 1 2 3 4')\na = np.diag(1 + np.arange(4), k=-1)\nprint(a)\n\nprint('15. 创建一个 8x8 矩阵,其中 0 和 1 间隔分布')\na = np.zeros((8, 8), dtype=int)\na[1::2, ::2] = 1\na[::2, 1::2] = 1\nprint(a)\n\nprint('16. 使用 tile 函数创建一个 8x8 矩阵,其中 0 和 1 间隔分布')\na = np.tile(np.array([[0, 1], [1, 0]]), (4, 4))\nprint(a)\n\nprint('17. 假设有一个 (6, 7, 8) 大小的矩阵,那么第 100 个元素的索引是多少')\nprint(np.unravel_index(100, (6, 7, 8)))\n\nprint('18. 归一化一个随机 5x5 矩阵')\na = np.random.random((5, 5))\na = (a - np.mean(a)) / np.std(a)\nprint(a)\n\nprint('19. 点乘一个 5x3 和 3x2 的矩阵')\na = np.dot(np.ones((5, 3)), np.ones((3, 2)))\nprint(a)\n\nprint('20. 给定一个一维数组,不新增空间,把 3~8 之间的数字变成负数')\na = np.arange(10)\na[(3 < a) & (a <= 8)] *= -1\nprint(a)\n\nprint('21. 两个数组求交集')\na1 = np.random.randint(0, 10, 10)\na2 = np.random.randint(0, 10, 10)\nprint(np.intersect1d(a1, a2))\n\nprint('22. 获取 2020 年 6 月的所有日期')\na = np.arange('2020-06', '2020-07', dtype='datetime64[D]')\nprint(a)\n\nprint('23. 用 5 种方法去掉小数部分')\na = np.random.uniform(0, 10, 10)\nprint('a', a)\nprint('1:', a - a%1)\nprint('2:', np.floor(a))\nprint('3:', np.ceil(a) - 1)\nprint('4:', a.astype(int))\nprint('5:', np.trunc(a))\n\nprint('24. 创建一个 5x5 的矩阵,每一行都是从 0 到 4')\na = np.zeros((5, 5))\na += np.arange(5)\nprint(a)\n\nprint('25. 创建一个大小为 10,值从 0 到 1 的向量(不包括 0 和 1)')\na = np.linspace(0, 1, 11, endpoint=False)[1:]\nprint(a)\n\nprint('26. 创建一个大小为 10 的随机向量并排序')\na = np.random.random(10)\na.sort()\nprint(a)\n\nprint('27. 如何用比 np.sum 更快的方法对一个小数组求和')\na = np.arange(10)\nprint('a', a)\nstart = time.time()\nprint('add.reduct', np.add.reduce(a))\nend = time.time()\nprint('add.reduce time:', end-start)\nstart = time.time()\nprint('np.sum', np.sum(a))\nend = time.time()\nprint('np.sum time:', end - start)\n\nprint('28. 比较两个数组是否相等')\na = np.random.randint(0, 10, 10)\nb = np.random.randint(0, 10, 10)\nprint(np.allclose(a, b))\nprint(np.array_equal(a, b))\n\nprint('29. 将一个 10x2 的笛卡尔坐标系的点转成极坐标')\na = np.random.random((10, 2))\nx, y = a[:, 0], a[:, 1]\nr = np.sqrt(x**2 + y**2)\nt = np.arctan2(y, x)\nprint(r)\nprint(t)\n\nprint('30. 创建一个大小为 10 的随机向量,并将最大的替换成 0')\na = np.random.random(10)\nprint('before', a)\na[a.argmax()] = 0\nprint('after', a)\n\nprint('31. 不用额外空间将 float 矩阵变成 int 矩阵')\na = np.arange(10, dtype=np.float32)\na = a.astype(np.int32, copy=False)\nprint(a)\n\nprint('32. 在一个 2 维矩阵中随机放 p 个元素')\nn, p = 10, 3\na = np.zeros((n, n))\nnp.put(a, np.random.choice(range(n*n), p, replace=False), 1)\nprint(a)\n\nprint('33. 矩阵的每行减去每行的均值')\na = np.random.randint(0, 10, (5, 10))\nprint('before', a)\nb = a - a.mean(axis=1, keepdims=True)\nprint('after', b)\n\nprint('34. 根据第 i 列给矩阵排序')\na = np.random.randint(0, 10, (3, 3))\nprint('before', a)\nprint('after', a[a[:, 1].argsort()])\n\nprint('35. 交换矩阵的两行')\na = np.arange(25).reshape(5, 5)\na[[0,1]] = a[[1, 0]]\nprint(a)\n\nprint('36. 如何计算一个数组的滑动窗口')\ndef moving_averate(a, n=3):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n-1:] / n\na = np.arange(20)\nprint(moving_averate(a, n=4))\n\nprint('37. 如何找到数组中出现次数最多的元素')\na = np.random.randint(0, 10, 50)\nprint(np.bincount(a).argmax())\n\nprint('38. 如何获取数组中最大的 n 个数')\na = np.arange(1000)\nnp.random.shuffle(a)\nn = 5\nstart = time.time()\nprint('slow', a[np.argsort(a)[-n:]])\nend = time.time()\nprint('slow time', end - start)\nstart = time.time()\nprint('fast', a[np.argpartition(-a, n)[:n]])\nend = time.time()\nprint('fast time', end - start)", "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\nimport sys\nsys.path.append('..')\n\nfrom matplotlib import gridspec as gridspec\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport kernels\n\n\ndef read_one_image(filename):\n ''' This method is to show how to read image from a file into a tensor.\n The output is a tensor object.\n '''\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_image(image_string)\n image = tf.cast(image_decoded, tf.float32) / 256.0\n return image\n\n\ndef convolve(image, kernels, rgb=True, strides=[1, 3, 3, 1], padding='SAME'):\n images = [image[0]]\n for i, kernel in enumerate(kernels):\n filtered_image = tf.nn.conv2d(image,\n kernel,\n strides=strides,\n padding=padding)[0]\n if i == 2:\n filtered_image = tf.minimum(tf.nn.relu(filtered_image), 255)\n images.append(filtered_image)\n return images\n\n\ndef show_images(images, rgb=True):\n gs = gridspec.GridSpec(1, len(images))\n for i, image in enumerate(images):\n plt.subplot(gs[0, i])\n if rgb:\n plt.imshow(image)\n else:\n image = image.reshape(image.shape[0], image.shape[1])\n plt.imshow(image, cmap='gray')\n plt.axis('off')\n plt.show()\n\n\ndef main():\n rgb = False\n if rgb:\n kernels_list = [kernels.BLUR_FILTER_RGB,\n kernels.SHARPEN_FILTER_RGB,\n kernels.EDGE_FILTER_RGB,\n kernels.TOP_SOBEL_RGB,\n kernels.EMBOSS_FILTER_RGB]\n else:\n kernels_list = [kernels.BLUR_FILTER,\n kernels.SHARPEN_FILTER,\n kernels.EDGE_FILTER,\n kernels.TOP_SOBEL,\n kernels.EMBOSS_FILTER]\n\n kernels_list = kernels_list[1:]\n image = read_one_image('data/images/naruto.jpeg')\n if not rgb:\n image = tf.image.rgb_to_grayscale(image)\n image = tf.expand_dims(image, 0) # make it into a batch of 1 element\n images = convolve(image, kernels_list, rgb)\n with tf.Session() as sess:\n images = sess.run(images) # convert images from tensors to float values\n show_images(images, rgb)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.cumsum", "numpy.arctan2", "numpy.mean", "numpy.random.randint", "numpy.trunc", "numpy.pad", "numpy.allclose", "numpy.arange", "numpy.eye", "numpy.add.reduce", "numpy.intersect1d", "numpy.std", "numpy.ceil", "numpy.argpartition", "numpy.unravel_index", "numpy.zeros", "numpy.nonzero", "numpy.floor", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.random.random", "numpy.array_equal", "numpy.random.shuffle", "numpy.ones", "numpy.bincount", "numpy.random.uniform" ], [ "tensorflow.nn.relu", "matplotlib.pyplot.imshow", "tensorflow.read_file", "matplotlib.use", "tensorflow.cast", "tensorflow.expand_dims", "tensorflow.image.rgb_to_grayscale", "matplotlib.pyplot.subplot", "tensorflow.image.decode_image", "tensorflow.Session", "matplotlib.pyplot.axis", "matplotlib.pyplot.show", "tensorflow.nn.conv2d" ] ]
BitGo/statsmodels
[ "31a73250495d63dfc853625ce1d2b3566d3ac95a" ]
[ "statsmodels/tsa/vector_ar/tests/test_var.py" ]
[ "\"\"\"\nTest VAR Model\n\"\"\"\nfrom __future__ import print_function\n# pylint: disable=W0612,W0231\nfrom statsmodels.compat.python import (iteritems, StringIO, lrange, BytesIO,\n range)\nfrom nose.tools import assert_raises\nimport nose\nimport os\nimport sys\n\nimport numpy as np\n\nimport statsmodels.api as sm\nimport statsmodels.tsa.vector_ar.util as util\nimport statsmodels.tools.data as data_util\nfrom statsmodels.tsa.vector_ar.var_model import VAR\n\n\nfrom numpy.testing import (assert_almost_equal, assert_equal, assert_,\n assert_allclose)\n\nDECIMAL_12 = 12\nDECIMAL_6 = 6\nDECIMAL_5 = 5\nDECIMAL_4 = 4\nDECIMAL_3 = 3\nDECIMAL_2 = 2\n\nclass CheckVAR(object):\n # just so pylint won't complain\n res1 = None\n res2 = None\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)\n\n def test_neqs(self):\n assert_equal(self.res1.neqs, self.res2.neqs)\n\n def test_nobs(self):\n assert_equal(self.res1.avobs, self.res2.nobs)\n\n def test_df_eq(self):\n assert_equal(self.res1.df_eq, self.res2.df_eq)\n\n def test_rmse(self):\n results = self.res1.results\n for i in range(len(results)):\n assert_almost_equal(results[i].mse_resid**.5,\n eval('self.res2.rmse_'+str(i+1)), DECIMAL_6)\n\n def test_rsquared(self):\n results = self.res1.results\n for i in range(len(results)):\n assert_almost_equal(results[i].rsquared,\n eval('self.res2.rsquared_'+str(i+1)), DECIMAL_3)\n\n def test_llf(self):\n results = self.res1.results\n assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)\n for i in range(len(results)):\n assert_almost_equal(results[i].llf,\n eval('self.res2.llf_'+str(i+1)), DECIMAL_2)\n\n def test_aic(self):\n assert_almost_equal(self.res1.aic, self.res2.aic)\n\n def test_bic(self):\n assert_almost_equal(self.res1.bic, self.res2.bic)\n\n def test_hqic(self):\n assert_almost_equal(self.res1.hqic, self.res2.hqic)\n\n def test_fpe(self):\n assert_almost_equal(self.res1.fpe, self.res2.fpe)\n\n def test_detsig(self):\n assert_almost_equal(self.res1.detomega, self.res2.detsig)\n\n def test_bse(self):\n assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)\n\ndef get_macrodata():\n data = sm.datasets.macrodata.load().data[['realgdp','realcons','realinv']]\n names = data.dtype.names\n nd = data.view((float,3), type=np.ndarray)\n nd = np.diff(np.log(nd), axis=0)\n return nd.ravel().view(data.dtype, type=np.ndarray)\n\ndef generate_var():\n from rpy2.robjects import r\n import pandas.rpy.common as prp\n r.source('tests/var.R')\n return prp.convert_robj(r['result'], use_pandas=False)\n\ndef write_generate_var():\n result = generate_var()\n np.savez('tests/results/vars_results.npz', **result)\n\nclass RResults(object):\n \"\"\"\n Simple interface with results generated by \"vars\" package in R.\n \"\"\"\n\n def __init__(self):\n #data = np.load(resultspath + 'vars_results.npz')\n from .results.results_var_data import var_results\n data = var_results.__dict__\n\n self.names = data['coefs'].dtype.names\n self.params = data['coefs'].view((float, len(self.names)), type=np.ndarray)\n self.stderr = data['stderr'].view((float, len(self.names)), type=np.ndarray)\n\n self.irf = data['irf'].item()\n self.orth_irf = data['orthirf'].item()\n\n self.nirfs = int(data['nirfs'][0])\n self.nobs = int(data['obs'][0])\n self.totobs = int(data['totobs'][0])\n\n crit = data['crit'].item()\n self.aic = crit['aic'][0]\n self.sic = self.bic = crit['sic'][0]\n self.hqic = crit['hqic'][0]\n self.fpe = crit['fpe'][0]\n\n self.detomega = data['detomega'][0]\n self.loglike = data['loglike'][0]\n\n self.nahead = int(data['nahead'][0])\n self.ma_rep = data['phis']\n\n self.causality = data['causality']\n\ndef close_plots():\n try:\n import matplotlib.pyplot as plt\n plt.close('all')\n except ImportError:\n pass\n\n_orig_stdout = None\n\ndef setup_module():\n global _orig_stdout\n _orig_stdout = sys.stdout\n sys.stdout = StringIO()\n\ndef teardown_module():\n sys.stdout = _orig_stdout\n close_plots()\n\ndef have_matplotlib():\n try:\n import matplotlib\n return True\n except ImportError:\n return False\n\nclass CheckIRF(object):\n\n ref = None; res = None; irf = None\n k = None\n\n #---------------------------------------------------------------------------\n # IRF tests\n\n def test_irf_coefs(self):\n self._check_irfs(self.irf.irfs, self.ref.irf)\n self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)\n\n\n def _check_irfs(self, py_irfs, r_irfs):\n for i, name in enumerate(self.res.names):\n ref_irfs = r_irfs[name].view((float, self.k), type=np.ndarray)\n res_irfs = py_irfs[:, :, i]\n assert_almost_equal(ref_irfs, res_irfs)\n\n\n def test_plot_irf(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n import matplotlib.pyplot as plt\n self.irf.plot()\n plt.close('all')\n self.irf.plot(plot_stderr=False)\n plt.close('all')\n\n self.irf.plot(impulse=0, response=1)\n plt.close('all')\n self.irf.plot(impulse=0)\n plt.close('all')\n self.irf.plot(response=0)\n plt.close('all')\n\n self.irf.plot(orth=True)\n plt.close('all')\n self.irf.plot(impulse=0, response=1, orth=True)\n close_plots()\n\n\n def test_plot_cum_effects(self):\n if not have_matplotlib():\n raise nose.SkipTest\n # I need close after every plot to avoid segfault, see #3158\n import matplotlib.pyplot as plt\n plt.close('all')\n self.irf.plot_cum_effects()\n plt.close('all')\n self.irf.plot_cum_effects(plot_stderr=False)\n plt.close('all')\n self.irf.plot_cum_effects(impulse=0, response=1)\n plt.close('all')\n\n self.irf.plot_cum_effects(orth=True)\n plt.close('all')\n self.irf.plot_cum_effects(impulse=0, response=1, orth=True)\n close_plots()\n\n\nclass CheckFEVD(object):\n\n fevd = None\n\n #---------------------------------------------------------------------------\n # FEVD tests\n\n def test_fevd_plot(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.fevd.plot()\n close_plots()\n\n def test_fevd_repr(self):\n self.fevd\n\n def test_fevd_summary(self):\n self.fevd.summary()\n\n def test_fevd_cov(self):\n # test does not crash\n # not implemented\n # covs = self.fevd.cov()\n\n pass\n\nclass TestVARResults(CheckIRF, CheckFEVD):\n\n @classmethod\n def setupClass(cls):\n cls.p = 2\n\n cls.data = get_macrodata()\n cls.model = VAR(cls.data)\n cls.names = cls.model.endog_names\n\n cls.ref = RResults()\n cls.k = len(cls.ref.names)\n cls.res = cls.model.fit(maxlags=cls.p)\n\n cls.irf = cls.res.irf(cls.ref.nirfs)\n cls.nahead = cls.ref.nahead\n\n cls.fevd = cls.res.fevd()\n\n def test_constructor(self):\n # make sure this works with no names\n ndarr = self.data.view((float, 3), type=np.ndarray)\n model = VAR(ndarr)\n res = model.fit(self.p)\n\n def test_names(self):\n assert_equal(self.model.endog_names, self.ref.names)\n\n model2 = VAR(self.data)\n assert_equal(model2.endog_names, self.ref.names)\n\n def test_get_eq_index(self):\n assert(type(self.res.names) is list)\n\n for i, name in enumerate(self.names):\n idx = self.res.get_eq_index(i)\n idx2 = self.res.get_eq_index(name)\n\n assert_equal(idx, i)\n assert_equal(idx, idx2)\n\n assert_raises(Exception, self.res.get_eq_index, 'foo')\n\n def test_repr(self):\n # just want this to work\n foo = str(self.res)\n bar = repr(self.res)\n\n def test_params(self):\n assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)\n\n def test_cov_params(self):\n # do nothing for now\n self.res.cov_params\n\n def test_cov_ybar(self):\n self.res.cov_ybar()\n\n def test_tstat(self):\n self.res.tvalues\n\n def test_pvalues(self):\n self.res.pvalues\n\n def test_summary(self):\n summ = self.res.summary()\n\n\n def test_detsig(self):\n assert_almost_equal(self.res.detomega, self.ref.detomega)\n\n def test_aic(self):\n assert_almost_equal(self.res.aic, self.ref.aic)\n\n def test_bic(self):\n assert_almost_equal(self.res.bic, self.ref.bic)\n\n def test_hqic(self):\n assert_almost_equal(self.res.hqic, self.ref.hqic)\n\n def test_fpe(self):\n assert_almost_equal(self.res.fpe, self.ref.fpe)\n\n def test_lagorder_select(self):\n ics = ['aic', 'fpe', 'hqic', 'bic']\n\n for ic in ics:\n res = self.model.fit(maxlags=10, ic=ic, verbose=True)\n\n assert_raises(Exception, self.model.fit, ic='foo')\n\n def test_nobs(self):\n assert_equal(self.res.nobs, self.ref.nobs)\n\n def test_stderr(self):\n assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)\n\n def test_loglike(self):\n assert_almost_equal(self.res.llf, self.ref.loglike)\n\n def test_ma_rep(self):\n ma_rep = self.res.ma_rep(self.nahead)\n assert_almost_equal(ma_rep, self.ref.ma_rep)\n\n #--------------------------------------------------\n # Lots of tests to make sure stuff works...need to check correctness\n\n def test_causality(self):\n causedby = self.ref.causality['causedby']\n\n for i, name in enumerate(self.names):\n variables = self.names[:i] + self.names[i + 1:]\n result = self.res.test_causality(name, variables, kind='f')\n assert_almost_equal(result['pvalue'], causedby[i], DECIMAL_4)\n\n rng = lrange(self.k)\n rng.remove(i)\n result2 = self.res.test_causality(i, rng, kind='f')\n assert_almost_equal(result['pvalue'], result2['pvalue'], DECIMAL_12)\n\n # make sure works\n result = self.res.test_causality(name, variables, kind='wald')\n\n # corner cases\n _ = self.res.test_causality(self.names[0], self.names[1])\n _ = self.res.test_causality(0, 1)\n\n assert_raises(Exception,self.res.test_causality, 0, 1, kind='foo')\n\n def test_select_order(self):\n result = self.model.fit(10, ic='aic', verbose=True)\n result = self.model.fit(10, ic='fpe', verbose=True)\n\n # bug\n model = VAR(self.model.endog)\n model.select_order()\n\n def test_is_stable(self):\n # may not necessarily be true for other datasets\n assert(self.res.is_stable(verbose=True))\n\n def test_acf(self):\n # test that it works...for now\n acfs = self.res.acf(10)\n\n # defaults to nlags=lag_order\n acfs = self.res.acf()\n assert(len(acfs) == self.p + 1)\n\n def test_acorr(self):\n acorrs = self.res.acorr(10)\n\n def test_forecast(self):\n point = self.res.forecast(self.res.y[-5:], 5)\n\n def test_forecast_interval(self):\n y = self.res.y[:-self.p:]\n point, lower, upper = self.res.forecast_interval(y, 5)\n\n def test_plot_sim(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.res.plotsim(steps=100)\n close_plots()\n\n def test_plot(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.res.plot()\n close_plots()\n\n def test_plot_acorr(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.res.plot_acorr()\n close_plots()\n\n def test_plot_forecast(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.res.plot_forecast(5)\n close_plots()\n\n def test_reorder(self):\n #manually reorder\n data = self.data.view((float,3), type=np.ndarray)\n names = self.names\n data2 = np.append(np.append(data[:,2,None], data[:,0,None], axis=1), data[:,1,None], axis=1)\n names2 = []\n names2.append(names[2])\n names2.append(names[0])\n names2.append(names[1])\n res2 = VAR(data2).fit(maxlags=self.p)\n\n #use reorder function\n res3 = self.res.reorder(['realinv','realgdp', 'realcons'])\n\n #check if the main results match\n assert_almost_equal(res2.params, res3.params)\n assert_almost_equal(res2.sigma_u, res3.sigma_u)\n assert_almost_equal(res2.bic, res3.bic)\n assert_almost_equal(res2.stderr, res3.stderr)\n\n def test_pickle(self):\n fh = BytesIO()\n #test wrapped results load save pickle\n self.res.save(fh)\n fh.seek(0,0)\n res_unpickled = self.res.__class__.load(fh)\n assert_(type(res_unpickled) is type(self.res))\n\n\nclass E1_Results(object):\n \"\"\"\n Results from Lutkepohl (2005) using E2 dataset\n \"\"\"\n\n def __init__(self):\n # Lutkepohl p. 120 results\n\n # I asked the author about these results and there is probably rounding\n # error in the book, so I adjusted these test results to match what is\n # coming out of the Python (double-checked) calculations\n self.irf_stderr = np.array([[[.125, 0.546, 0.664 ],\n [0.032, 0.139, 0.169],\n [0.026, 0.112, 0.136]],\n\n [[0.129, 0.547, 0.663],\n [0.032, 0.134, 0.163],\n [0.026, 0.108, 0.131]],\n\n [[0.084, .385, .479],\n [.016, .079, .095],\n [.016, .078, .103]]])\n\n self.cum_irf_stderr = np.array([[[.125, 0.546, 0.664 ],\n [0.032, 0.139, 0.169],\n [0.026, 0.112, 0.136]],\n\n [[0.149, 0.631, 0.764],\n [0.044, 0.185, 0.224],\n [0.033, 0.140, 0.169]],\n\n [[0.099, .468, .555],\n [.038, .170, .205],\n [.033, .150, .185]]])\n\n self.lr_stderr = np.array([[.134, .645, .808],\n [.048, .230, .288],\n [.043, .208, .260]])\n\nbasepath = os.path.split(sm.__file__)[0]\nresultspath = basepath + '/tsa/vector_ar/tests/results/'\n\ndef get_lutkepohl_data(name='e2'):\n lut_data = basepath + '/tsa/vector_ar/data/'\n path = lut_data + '%s.dat' % name\n\n return util.parse_lutkepohl_data(path)\n\ndef test_lutkepohl_parse():\n files = ['e%d' % i for i in range(1, 7)]\n\n for f in files:\n get_lutkepohl_data(f)\n\nclass TestVARResultsLutkepohl(object):\n \"\"\"\n Verify calculations using results from Lutkepohl's book\n \"\"\"\n\n def __init__(self):\n self.p = 2\n sdata, dates = get_lutkepohl_data('e1')\n\n data = data_util.struct_to_ndarray(sdata)\n adj_data = np.diff(np.log(data), axis=0)\n # est = VAR(adj_data, p=2, dates=dates[1:], names=names)\n\n self.model = VAR(adj_data[:-16], dates=dates[1:-16], freq='BQ-MAR')\n self.res = self.model.fit(maxlags=self.p)\n self.irf = self.res.irf(10)\n self.lut = E1_Results()\n\n def test_approx_mse(self):\n # 3.5.18, p. 99\n mse2 = np.array([[25.12, .580, 1.300],\n [.580, 1.581, .586],\n [1.300, .586, 1.009]]) * 1e-4\n\n assert_almost_equal(mse2, self.res.forecast_cov(3)[1],\n DECIMAL_3)\n\n def test_irf_stderr(self):\n irf_stderr = self.irf.stderr(orth=False)\n for i in range(1, 1 + len(self.lut.irf_stderr)):\n assert_almost_equal(np.round(irf_stderr[i], 3),\n self.lut.irf_stderr[i-1])\n\n def test_cum_irf_stderr(self):\n stderr = self.irf.cum_effect_stderr(orth=False)\n for i in range(1, 1 + len(self.lut.cum_irf_stderr)):\n assert_almost_equal(np.round(stderr[i], 3),\n self.lut.cum_irf_stderr[i-1])\n\n def test_lr_effect_stderr(self):\n stderr = self.irf.lr_effect_stderr(orth=False)\n orth_stderr = self.irf.lr_effect_stderr(orth=True)\n assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)\n\ndef test_get_trendorder():\n results = {\n 'c' : 1,\n 'nc' : 0,\n 'ct' : 2,\n 'ctt' : 3\n }\n\n for t, trendorder in iteritems(results):\n assert(util.get_trendorder(t) == trendorder)\n\n\ndef test_var_constant():\n # see 2043\n import datetime\n from pandas import DataFrame, DatetimeIndex\n\n series = np.array([[2., 2.], [1, 2.], [1, 2.], [1, 2.], [1., 2.]])\n data = DataFrame(series)\n\n d = datetime.datetime.now()\n delta = datetime.timedelta(days=1)\n index = []\n for i in range(data.shape[0]):\n index.append(d)\n d += delta\n\n data.index = DatetimeIndex(index)\n\n model = VAR(data)\n assert_raises(ValueError, model.fit, 1)\n\ndef test_var_trend():\n # see 2271\n data = get_macrodata().view((float,3), type=np.ndarray)\n\n model = sm.tsa.VAR(data)\n results = model.fit(4) #, trend = 'c')\n irf = results.irf(10)\n\n\n data_nc = data - data.mean(0)\n model_nc = sm.tsa.VAR(data_nc)\n results_nc = model_nc.fit(4, trend = 'nc')\n assert_raises(ValueError, model.fit, 4, trend='t')\n\n\ndef test_irf_trend():\n # test for irf with different trend see #1636\n # this is a rough comparison by adding trend or subtracting mean to data\n # to get similar AR coefficients and IRF\n data = get_macrodata().view((float,3), type=np.ndarray)\n\n model = sm.tsa.VAR(data)\n results = model.fit(4) #, trend = 'c')\n irf = results.irf(10)\n\n\n data_nc = data - data.mean(0)\n model_nc = sm.tsa.VAR(data_nc)\n results_nc = model_nc.fit(4, trend = 'nc')\n irf_nc = results_nc.irf(10)\n\n assert_allclose(irf_nc.stderr()[1:4], irf.stderr()[1:4], rtol=0.01)\n\n trend = 1e-3 * np.arange(len(data)) / (len(data) - 1)\n # for pandas version, currently not used, if data is a pd.DataFrame\n #data_t = pd.DataFrame(data.values + trend[:,None], index=data.index, columns=data.columns)\n data_t = data + trend[:,None]\n\n model_t = sm.tsa.VAR(data_t)\n results_t = model_t.fit(4, trend = 'ct')\n irf_t = results_t.irf(10)\n\n assert_allclose(irf_t.stderr()[1:4], irf.stderr()[1:4], rtol=0.03)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "numpy.testing.assert_equal", "numpy.log", "numpy.savez", "pandas.DatetimeIndex", "pandas.DataFrame", "numpy.round", "numpy.testing.assert_almost_equal", "numpy.append", "pandas.rpy.common.convert_robj", "matplotlib.pyplot.close", "numpy.array" ] ]
hnwarid/DQLabAcademy
[ "e03d82f97536ae103b6abc65db0ae16520fb68c7", "e03d82f97536ae103b6abc65db0ae16520fb68c7", "e03d82f97536ae103b6abc65db0ae16520fb68c7", "e03d82f97536ae103b6abc65db0ae16520fb68c7", "e03d82f97536ae103b6abc65db0ae16520fb68c7", "e03d82f97536ae103b6abc65db0ae16520fb68c7", "e03d82f97536ae103b6abc65db0ae16520fb68c7" ]
[ "1_PythonDataProcessing/3_14_index_method.py", "2_PythonDataVisualization/3_06_coord_flip.py", "1_PythonDataProcessing/6_00_scatterplot_pandas_matplotlib.py", "1_PythonDataProcessing/7_06_datapreprocess_impute.py", "1_PythonDataProcessing/0_01_readcsv_head.py", "2_PythonDataVisualization/2_17_miniproject_case2.py", "3_PythonDataScienceApplications/1_07_filling_emptydata.py" ]
[ "import pandas as pd\n# Baca file sample_tsv.tsv untuk 10 baris pertama saja\ndf = pd.read_csv(\"https://storage.googleapis.com/dqlab-dataset/sample_tsv.tsv\", sep=\"\\t\", nrows=10)\n# Cetak data frame awal\nprint(\"Dataframe awal:\\n\", df)\n# Set index baru\ndf.index = [\"Pesanan ke-\" + str(i) for i in range(1, 11)]\n# Cetak data frame dengan index baru\nprint(\"Dataframe dengan index baru:\\n\", df)", "import matplotlib.pyplot as plt\nfrom plotnine import *\nimport pandas as pd\ndf_penduduk = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/datakependudukandki-dqlab.csv')\n\n(ggplot(data=df_penduduk)\n+ aes(x='NAMA KABUPATEN/KOTA', y='JUMLAH')\n+ geom_col()\n+ coord_flip()\n).draw()\nplt.tight_layout()\nplt.show()", "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.clf()\n\n## mengambil data contoh\nraw_data = pd.read_csv(\"https://storage.googleapis.com/dqlab-dataset/dataset_statistic.csv\", sep=';')\n\n## melihat isi dari data\nprint(raw_data)\n\nplt.figure()\n# visualisasi diagram pencar untuk variabel 'Pendapatan' dan 'Total' menggunakan 'plot.scatter' dari pandas\nraw_data.plot.scatter(x='Pendapatan', y='Total')\nplt.title('plot.scatter dari pandas', size=14)\nplt.tight_layout()\nplt.show()\n\n# visualisasi diagram pencar untuk variabel 'Pendapatan' dan 'Total' menggunakan 'plt.scatter' dari matplotlib\nplt.scatter(x='Pendapatan', y='Total', data=raw_data)\nplt.title('plt.scatter dari matplotlib', size=14)\nplt.tight_layout()\nplt.show()", "import pandas as pd\ndataset = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/pythonTutorial/online_raw.csv')\n\nprint(\"Before imputation:\")\n# Checking missing value for each feature\nprint(dataset.isnull().sum())\n# Counting total missing value\nprint(dataset.isnull().sum().sum())\n\nprint(\"\\nAfter imputation:\")\n# Fill missing value with mean of feature value\ndataset.fillna(dataset.mean(), inplace=True)\n# Checking missing value for each feature\nprint(dataset.isnull().sum())\n# Counting total missing value\nprint(dataset.isnull().sum().sum())", "import pandas as pd\n\ncsv_data = pd.read_csv(\"https://storage.googleapis.com/dqlab-dataset/shopping_data.csv\")\n\nprint(csv_data.head())", "import datetime\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndataset = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/retail_raw_reduced.csv')\ndataset['order_month'] = dataset['order_date'].apply(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\").strftime('%Y-%m'))\ndataset['gmv'] = dataset['item_price']*dataset['quantity']\ntop_brands = (dataset[dataset['order_month']=='2019-12'].groupby('brand')['quantity']\n .sum()\n .reset_index()\n .sort_values(by='quantity',ascending=False)\n .head(5))\ndataset_top5brand_dec = dataset[(dataset['order_month']=='2019-12') & (dataset['brand'].isin(top_brands['brand'].to_list()))]\n\ndataset_top5brand_dec.groupby(['order_date','brand'])['quantity'].sum().unstack().plot(marker='.', cmap='plasma')\nplt.title('Daily Sold Quantity Dec 2019 - Breakdown by Brands',loc='center',pad=30, fontsize=15, color='blue')\nplt.xlabel('Order Date', fontsize = 12)\nplt.ylabel('Quantity',fontsize = 12)\nplt.grid(color='darkgray', linestyle=':', linewidth=0.5)\nplt.ylim(ymin=0)\nplt.legend(loc='upper center', bbox_to_anchor=(1.1, 1), shadow=True, ncol=1)\nplt.annotate('Terjadi lonjakan', xy=(7, 310), xytext=(8, 300),\n weight='bold', color='red',\n arrowprops=dict(arrowstyle='->',\n connectionstyle=\"arc3\",\n color='red'))\nplt.gcf().set_size_inches(10, 5)\nplt.tight_layout()\nplt.show()", "import pandas as pd\n\ndef concat_df(train_data, test_data):\n\treturn pd.concat([train_data, test_data], sort=True).reset_index(drop=True)\n\ndf_train = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/challenge/feature-engineering/titanic_train.csv')\ndf_test = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/challenge/feature-engineering/titanic_test.csv')\ndf_all = concat_df(df_train, df_test)\ndf_train.name = 'Training Set'\ndf_test.name = 'Test Set'\ndf_all.name = 'All Set'\ndfs = [df_train, df_test]\n\nage_by_pclass_sex = df_all.groupby(['Sex', 'Pclass']).median()['Age']\ndf_all['Age'] = df_all.groupby(['Sex', 'Pclass'])['Age'].apply(lambda x: x.fillna(x.median()))\n\n# Ketikkan kode yang dicontohkan\n# Filling the missing values in Embarked with S\ndf_all['Embarked'] = df_all['Embarked'].fillna('S')\nmed_fare = df_all.groupby(['Pclass', 'Parch', 'SibSp']).Fare.median()[3][0][0]\n# Filling the missing value in Fare with the median Fare of 3rd class alone passenger\ndf_all['Fare'] = df_all['Fare'].fillna(med_fare)" ]
[ [ "pandas.read_csv" ], [ "matplotlib.pyplot.tight_layout", "pandas.read_csv", "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.tight_layout", "pandas.read_csv", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "matplotlib.pyplot.clf", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "pandas.read_csv" ], [ "pandas.read_csv" ], [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.gcf", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "pandas.concat", "pandas.read_csv" ] ]
vibhatha/cylon
[ "3f2c5b08935a4332b820818ca113cb44f7ac5da3" ]
[ "python/examples/op_benchmark/null_handling_benchmark.py" ]
[ "##\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\nimport pyarrow as pa\nimport numpy as np\nimport pandas as pd\nimport pycylon as cn\nfrom pycylon import CylonContext\nfrom pycylon import Table\nfrom bench_util import get_dataframe\nimport time\nimport argparse\n\n\"\"\"\nRun benchmark:\n\n>>> python python/examples/op_benchmark/null_handling_benchmark.py --start_size 1_000_000 \\\n --step_size 1_000_000 \\\n --end_size 10_000_000 \\\n --num_cols 2 \\\n --stats_file /tmp/dropna_bench.csv \\\n --repetitions 1 \\\n --duplication_factor 0.9\n\"\"\"\n\n\ndef dropna_op(num_rows: int, num_cols: int, duplication_factor: float):\n ctx: CylonContext = CylonContext(config=None, distributed=False)\n\n df = get_dataframe(num_rows=num_rows, num_cols=num_cols, duplication_factor=duplication_factor, with_null=True)\n\n ct = Table.from_pandas(ctx, df)\n\n pandas_time = time.time()\n df.dropna(axis=1)\n pandas_time = time.time() - pandas_time\n\n cylon_time = time.time()\n ct.dropna(axis=0)\n cylon_time = time.time() - cylon_time\n\n pandas_eval_time = time.time()\n pd.eval('df.dropna(axis=1)')\n pandas_eval_time = time.time() - pandas_eval_time\n\n return pandas_time, cylon_time, pandas_eval_time\n\n\ndef bench_dropna(start: int, end: int, step: int, num_cols: int, repetitions: int, stats_file: str,\n duplication_factor: float):\n all_data = []\n schema = [\"num_records\", \"num_cols\", \"pandas\", \"cylon\", \"pandas[eval]\", \"speed up\", \"speed up [eval]\"]\n assert repetitions >= 1\n assert start > 0\n assert step > 0\n assert num_cols > 0\n\n for records in range(start, end + step, step):\n print(f\"DropNa Op : Records={records}, Columns={num_cols}\")\n times = []\n for idx in range(repetitions):\n pandas_time, cylon_time, pandas_eval_time = dropna_op(num_rows=records, num_cols=num_cols,\n duplication_factor=duplication_factor)\n times.append([pandas_time, cylon_time, pandas_eval_time])\n times = np.array(times).sum(axis=0) / repetitions\n print(f\"DropNa Op : Records={records}, Columns={num_cols}, \"\n f\"Pandas Time : {times[0]}, Cylon Time : {times[1]}, Pandas Eval Time : {times[2]}\")\n all_data.append([records, num_cols, times[0], times[1], times[2], times[0] / times[1], times[2]/ times[1]])\n pdf = pd.DataFrame(all_data, columns=schema)\n print(pdf)\n pdf.to_csv(stats_file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--start_size\",\n help=\"initial data size\",\n type=int)\n parser.add_argument(\"-e\", \"--end_size\",\n help=\"end data size\",\n type=int)\n parser.add_argument(\"-d\", \"--duplication_factor\",\n help=\"random data duplication factor\",\n type=float)\n parser.add_argument(\"-s\", \"--step_size\",\n help=\"Step size\",\n type=int)\n parser.add_argument(\"-c\", \"--num_cols\",\n help=\"number of columns\",\n type=int)\n parser.add_argument(\"-r\", \"--repetitions\",\n help=\"number of experiments to be repeated\",\n type=int)\n parser.add_argument(\"-f\", \"--stats_file\",\n help=\"stats file to be saved\",\n type=str)\n\n args = parser.parse_args()\n print(f\"Start Data Size : {args.start_size}\")\n print(f\"End Data Size : {args.end_size}\")\n print(f\"Step Data Size : {args.step_size}\")\n print(f\"Data Duplication Factor : {args.duplication_factor}\")\n print(f\"Number of Columns : {args.num_cols}\")\n print(f\"Number of Repetitions : {args.repetitions}\")\n print(f\"Stats File : {args.stats_file}\")\n bench_dropna(start=args.start_size,\n end=args.end_size,\n step=args.step_size,\n num_cols=args.num_cols,\n repetitions=args.repetitions,\n stats_file=args.stats_file,\n duplication_factor=args.duplication_factor)\n" ]
[ [ "pandas.eval", "numpy.array", "pandas.DataFrame" ] ]
mieldehabanero/stable-baselines3
[ "b37052cbf059b6f81314f5b98205e4a3403e4112" ]
[ "tests/test_dict_env.py" ]
[ "import gym\nimport numpy as np\nimport pytest\nfrom gym import spaces\n\nfrom stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.common.envs import BitFlippingEnv, SimpleMultiObsEnv\nfrom stable_baselines3.common.evaluation import evaluate_policy\nfrom stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecFrameStack, VecNormalize\n\n\nclass DummyDictEnv(gym.Env):\n \"\"\"Custom Environment for testing purposes only\"\"\"\n\n metadata = {\"render.modes\": [\"human\"]}\n\n def __init__(\n self,\n use_discrete_actions=False,\n channel_last=False,\n nested_dict_obs=False,\n vec_only=False,\n ):\n super().__init__()\n if use_discrete_actions:\n self.action_space = spaces.Discrete(3)\n else:\n self.action_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32)\n N_CHANNELS = 1\n HEIGHT = 64\n WIDTH = 64\n\n if channel_last:\n obs_shape = (HEIGHT, WIDTH, N_CHANNELS)\n else:\n obs_shape = (N_CHANNELS, HEIGHT, WIDTH)\n\n self.observation_space = spaces.Dict(\n {\n # Image obs\n \"img\": spaces.Box(low=0, high=255, shape=obs_shape, dtype=np.uint8),\n # Vector obs\n \"vec\": spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32),\n # Discrete obs\n \"discrete\": spaces.Discrete(4),\n }\n )\n\n # For checking consistency with normal MlpPolicy\n if vec_only:\n self.observation_space = spaces.Dict(\n {\n # Vector obs\n \"vec\": spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32),\n }\n )\n\n if nested_dict_obs:\n # Add dictionary observation inside observation space\n self.observation_space.spaces[\"nested-dict\"] = spaces.Dict({\"nested-dict-discrete\": spaces.Discrete(4)})\n\n def seed(self, seed=None):\n if seed is not None:\n self.observation_space.seed(seed)\n\n def step(self, action):\n reward = 0.0\n done = False\n return self.observation_space.sample(), reward, done, {}\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n return np.zeros((len(achieved_goal),))\n\n def reset(self):\n return self.observation_space.sample()\n\n def render(self, mode=\"human\"):\n pass\n\n\[email protected](\"policy\", [\"MlpPolicy\", \"CnnPolicy\"])\ndef test_policy_hint(policy):\n # Common mistake: using the wrong policy\n with pytest.raises(ValueError):\n PPO(policy, BitFlippingEnv(n_bits=4))\n\n\[email protected](\"model_class\", [PPO, A2C])\ndef test_goal_env(model_class):\n env = BitFlippingEnv(n_bits=4)\n # check that goal env works for PPO/A2C that cannot use HER replay buffer\n model = model_class(\"MultiInputPolicy\", env, n_steps=64).learn(250)\n evaluate_policy(model, model.get_env())\n\n\[email protected](\"model_class\", [PPO, A2C, DQN, DDPG, SAC, TD3])\ndef test_consistency(model_class):\n \"\"\"\n Make sure that dict obs with vector only vs using flatten obs is equivalent.\n This ensures notable that the network architectures are the same.\n \"\"\"\n use_discrete_actions = model_class == DQN\n dict_env = DummyDictEnv(use_discrete_actions=use_discrete_actions, vec_only=True)\n dict_env = gym.wrappers.TimeLimit(dict_env, 100)\n env = gym.wrappers.FlattenObservation(dict_env)\n dict_env.seed(10)\n obs = dict_env.reset()\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n )\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features and make learning faster\n kwargs = dict(\n buffer_size=250,\n train_freq=8,\n gradient_steps=1,\n )\n if model_class == DQN:\n kwargs[\"learning_starts\"] = 0\n\n dict_model = model_class(\"MultiInputPolicy\", dict_env, gamma=0.5, seed=1, **kwargs)\n action_before_learning_1, _ = dict_model.predict(obs, deterministic=True)\n dict_model.learn(total_timesteps=n_steps)\n\n normal_model = model_class(\"MlpPolicy\", env, gamma=0.5, seed=1, **kwargs)\n action_before_learning_2, _ = normal_model.predict(obs[\"vec\"], deterministic=True)\n normal_model.learn(total_timesteps=n_steps)\n\n action_1, _ = dict_model.predict(obs, deterministic=True)\n action_2, _ = normal_model.predict(obs[\"vec\"], deterministic=True)\n\n assert np.allclose(action_before_learning_1, action_before_learning_2)\n assert np.allclose(action_1, action_2)\n\n\[email protected](\"model_class\", [PPO, A2C, DQN, DDPG, SAC, TD3])\[email protected](\"channel_last\", [False, True])\ndef test_dict_spaces(model_class, channel_last):\n \"\"\"\n Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support\n with mixed observation.\n \"\"\"\n use_discrete_actions = model_class not in [SAC, TD3, DDPG]\n env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=channel_last)\n env = gym.wrappers.TimeLimit(env, 100)\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n )\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features and make learning faster\n kwargs = dict(\n buffer_size=250,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n train_freq=8,\n gradient_steps=1,\n )\n if model_class == DQN:\n kwargs[\"learning_starts\"] = 0\n\n model = model_class(\"MultiInputPolicy\", env, gamma=0.5, seed=1, **kwargs)\n\n model.learn(total_timesteps=n_steps)\n\n evaluate_policy(model, env, n_eval_episodes=5, warn=False)\n\n\[email protected](\"model_class\", [PPO, A2C])\ndef test_multiprocessing(model_class):\n use_discrete_actions = model_class not in [SAC, TD3, DDPG]\n\n def make_env():\n env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=False)\n env = gym.wrappers.TimeLimit(env, 100)\n return env\n\n env = make_vec_env(make_env, n_envs=2, vec_env_cls=SubprocVecEnv)\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n )\n\n model = model_class(\"MultiInputPolicy\", env, gamma=0.5, seed=1, **kwargs)\n\n model.learn(total_timesteps=n_steps)\n\n\[email protected](\"model_class\", [PPO, A2C, DQN, DDPG, SAC, TD3])\[email protected](\"channel_last\", [False, True])\ndef test_dict_vec_framestack(model_class, channel_last):\n \"\"\"\n Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support\n for Dictionary spaces and VecEnvWrapper using MultiInputPolicy.\n \"\"\"\n use_discrete_actions = model_class not in [SAC, TD3, DDPG]\n channels_order = {\"vec\": None, \"img\": \"last\" if channel_last else \"first\"}\n env = DummyVecEnv(\n [lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions, channel_last=channel_last)]\n )\n\n env = VecFrameStack(env, n_stack=3, channels_order=channels_order)\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n )\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features and make learning faster\n kwargs = dict(\n buffer_size=250,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n train_freq=8,\n gradient_steps=1,\n )\n if model_class == DQN:\n kwargs[\"learning_starts\"] = 0\n\n model = model_class(\"MultiInputPolicy\", env, gamma=0.5, seed=1, **kwargs)\n\n model.learn(total_timesteps=n_steps)\n\n evaluate_policy(model, env, n_eval_episodes=5, warn=False)\n\n\[email protected](\"model_class\", [PPO, A2C, DQN, DDPG, SAC, TD3])\ndef test_vec_normalize(model_class):\n \"\"\"\n Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support\n for GoalEnv and VecNormalize using MultiInputPolicy.\n \"\"\"\n env = DummyVecEnv([lambda: gym.wrappers.TimeLimit(DummyDictEnv(use_discrete_actions=model_class == DQN), 100)])\n env = VecNormalize(env, norm_obs_keys=[\"vec\"])\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n policy_kwargs=dict(\n net_arch=[32],\n ),\n )\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features and make learning faster\n kwargs = dict(\n buffer_size=250,\n policy_kwargs=dict(\n net_arch=[32],\n ),\n train_freq=8,\n gradient_steps=1,\n )\n if model_class == DQN:\n kwargs[\"learning_starts\"] = 0\n\n model = model_class(\"MultiInputPolicy\", env, gamma=0.5, seed=1, **kwargs)\n\n model.learn(total_timesteps=n_steps)\n\n evaluate_policy(model, env, n_eval_episodes=5, warn=False)\n\n\ndef test_dict_nested():\n \"\"\"\n Make sure we throw an appropiate error with nested Dict observation spaces\n \"\"\"\n # Test without manual wrapping to vec-env\n env = DummyDictEnv(nested_dict_obs=True)\n\n with pytest.raises(NotImplementedError):\n _ = PPO(\"MultiInputPolicy\", env, seed=1)\n\n # Test with manual vec-env wrapping\n\n with pytest.raises(NotImplementedError):\n env = DummyVecEnv([lambda: DummyDictEnv(nested_dict_obs=True)])\n" ]
[ [ "numpy.allclose" ] ]
OliviaWang123456/ncnet
[ "d45920d57ea1c01befb96785a2f1af8bd50e7390", "d45920d57ea1c01befb96785a2f1af8bd50e7390" ]
[ "lib/pf_dataset.py", "lib/flow.py" ]
[ "from __future__ import print_function, division\nimport os\nimport torch\nfrom torch.autograd import Variable\nfrom skimage import io\nimport pandas as pd\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom lib.transformation import AffineTnf\n \nclass PFPascalDataset(Dataset):\n \n \"\"\"\n \n Proposal Flow PASCAL image pair dataset\n \n\n Args:\n csv_file (string): Path to the csv file with image names and transformations.\n dataset_path (string): Directory with the images.\n output_size (2-tuple): Desired output size\n transform (callable): Transformation for post-processing the training pair (eg. image normalization)\n \n \"\"\"\n\n def __init__(self, csv_file, dataset_path, output_size=(240,240), transform=None, category=None, pck_procedure='pf'):\n\n self.category_names=['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow','diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']\n self.out_h, self.out_w = output_size\n self.pairs = pd.read_csv(csv_file)\n self.category = self.pairs.iloc[:,2].values.astype('float') #as_matrix\n if category is not None:\n cat_idx = np.nonzero(self.category==category)[0]\n self.category=self.category[cat_idx]\n self.pairs=self.pairs.iloc[cat_idx,:]\n self.img_A_names = self.pairs.iloc[:,0]\n self.img_B_names = self.pairs.iloc[:,1]\n self.point_A_coords = self.pairs.iloc[:, 3:5]\n self.point_B_coords = self.pairs.iloc[:, 5:]\n self.dataset_path = dataset_path \n self.transform = transform\n # no cuda as dataset is called from CPU threads in dataloader and produces confilct\n self.affineTnf = AffineTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False) \n self.pck_procedure = pck_procedure\n \n def __len__(self):\n return len(self.pairs)\n\n def __getitem__(self, idx):\n # get pre-processed images\n image_A,im_size_A = self.get_image(self.img_A_names,idx)\n image_B,im_size_B = self.get_image(self.img_B_names,idx)\n\n # get pre-processed point coords\n point_A_coords = self.get_points(self.point_A_coords,idx)\n point_B_coords = self.get_points(self.point_B_coords,idx)\n \n # compute PCK reference length L_pck (equal to max bounding box side in image_A)\n #L_pck = torch.FloatTensor([torch.max(point_A_coords.max(1)[0]-point_A_coords.min(1)[0])])\n N_pts = torch.sum(torch.ne(point_A_coords[0,:],-1))\n\n if self.pck_procedure=='pf':\n L_pck = torch.FloatTensor([torch.max(point_A_coords[:,:N_pts].max(1)[0]-point_A_coords[:,:N_pts].min(1)[0])])\n elif self.pck_procedure=='scnet':\n #modification to follow the evaluation procedure of SCNet\n point_A_coords[0,0:N_pts]=point_A_coords[0,0:N_pts]*224/im_size_A[1]\n point_A_coords[1,0:N_pts]=point_A_coords[1,0:N_pts]*224/im_size_A[0]\n\n point_B_coords[0,0:N_pts]=point_B_coords[0,0:N_pts]*224/im_size_B[1]\n point_B_coords[1,0:N_pts]=point_B_coords[1,0:N_pts]*224/im_size_B[0]\n\n im_size_A[0:2]=torch.FloatTensor([224,224])\n im_size_B[0:2]=torch.FloatTensor([224,224])\n\n L_pck = torch.FloatTensor([224.0])\n \n sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'source_points': point_A_coords, 'target_points': point_B_coords, 'L_pck': L_pck}\n \n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n def get_image(self,img_name_list,idx):\n img_name = os.path.join(self.dataset_path, img_name_list.iloc[idx])\n image = io.imread(img_name)\n \n # get image size\n im_size = np.asarray(image.shape)\n \n # convert to torch Variable\n image = np.expand_dims(image.transpose((2,0,1)),0)\n image = torch.Tensor(image.astype(np.float32))\n image_var = Variable(image,requires_grad=False)\n \n # Resize image using bilinear sampling with identity affine tnf\n image = self.affineTnf(image_var).data.squeeze(0)\n \n im_size = torch.Tensor(im_size.astype(np.float32))\n \n return (image, im_size)\n \n def get_points(self,point_coords_list,idx):\n X=np.fromstring(point_coords_list.iloc[idx,0],sep=';')\n Y=np.fromstring(point_coords_list.iloc[idx,1],sep=';')\n Xpad = -np.ones(20); Xpad[:len(X)]=X\n Ypad = -np.ones(20); Ypad[:len(X)]=Y\n point_coords = np.concatenate((Xpad.reshape(1,20),Ypad.reshape(1,20)),axis=0)\n \n # make arrays float tensor for subsequent processing\n point_coords = torch.Tensor(point_coords.astype(np.float32))\n return point_coords\n\n ", "import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom geotnf.point_tnf import normalize_axis, unnormalize_axis\n\ndef read_flo_file(filename,verbose=False):\n \"\"\"\n Read from .flo optical flow file (Middlebury format)\n :param flow_file: name of the flow file\n :return: optical flow data in matrix\n \n adapted from https://github.com/liruoteng/OpticalFlowToolkit/\n \n \"\"\"\n f = open(filename, 'rb')\n magic = np.fromfile(f, np.float32, count=1)\n data2d = None\n\n if 202021.25 != magic:\n raise TypeError('Magic number incorrect. Invalid .flo file')\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n if verbose:\n print(\"Reading %d x %d flow file in .flo format\" % (h, w))\n data2d = np.fromfile(f, np.float32, count=int(2 * w * h))\n # reshape data into 3D array (columns, rows, channels)\n data2d = np.resize(data2d, (h[0], w[0], 2))\n f.close()\n return data2d\n\ndef write_flo_file(flow, filename):\n \"\"\"\n Write optical flow in Middlebury .flo format\n \n :param flow: optical flow map\n :param filename: optical flow file path to be saved\n :return: None\n \n from https://github.com/liruoteng/OpticalFlowToolkit/\n \n \"\"\"\n # forcing conversion to float32 precision\n flow = flow.astype(np.float32)\n f = open(filename, 'wb')\n magic = np.array([202021.25], dtype=np.float32)\n (height, width) = flow.shape[0:2]\n w = np.array([width], dtype=np.int32)\n h = np.array([height], dtype=np.int32)\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n flow.tofile(f)\n f.close()\n\n\ndef warp_image(image, flow):\n \"\"\"\n Warp image (np.ndarray, shape=[h_src,w_src,3]) with flow (np.ndarray, shape=[h_tgt,w_tgt,2])\n \n \"\"\"\n h_src,w_src=image.shape[0],image.shape[1]\n sampling_grid_torch = np_flow_to_th_sampling_grid(flow, h_src, w_src)\n image_torch = Variable(torch.FloatTensor(image.astype(np.float32)).transpose(1,2).transpose(0,1).unsqueeze(0))\n warped_image_torch = F.grid_sample(image_torch, sampling_grid_torch)\n warped_image = warped_image_torch.data.squeeze(0).transpose(0,1).transpose(1,2).numpy().astype(np.uint8)\n return warped_image\n\ndef np_flow_to_th_sampling_grid(flow,h_src,w_src,use_cuda=False):\n h_tgt,w_tgt=flow.shape[0],flow.shape[1]\n grid_x,grid_y = np.meshgrid(range(1,w_tgt+1),range(1,h_tgt+1))\n disp_x=flow[:,:,0]\n disp_y=flow[:,:,1]\n source_x=grid_x+disp_x\n source_y=grid_y+disp_y\n source_x_norm=normalize_axis(source_x,w_src) \n source_y_norm=normalize_axis(source_y,h_src) \n sampling_grid=np.concatenate((np.expand_dims(source_x_norm,2),\n np.expand_dims(source_y_norm,2)),2)\n sampling_grid_torch = Variable(torch.FloatTensor(sampling_grid).unsqueeze(0))\n if use_cuda:\n sampling_grid_torch = sampling_grid_torch.cuda()\n return sampling_grid_torch\n\n# def th_sampling_grid_to_np_flow(source_grid,h_src,w_src):\n# batch_size = source_grid.size(0)\n# h_tgt,w_tgt=source_grid.size(1),source_grid.size(2)\n# source_x_norm=source_grid[:,:,:,0]\n# source_y_norm=source_grid[:,:,:,1]\n# source_x=unnormalize_axis(source_x_norm,w_src) \n# source_y=unnormalize_axis(source_y_norm,h_src) \n# source_x=source_x.data.cpu().numpy()\n# source_y=source_y.data.cpu().numpy()\n# grid_x,grid_y = np.meshgrid(range(1,w_tgt+1),range(1,h_tgt+1))\n# grid_x = np.repeat(grid_x,axis=0,repeats=batch_size)\n# grid_y = np.repeat(grid_y,axis=0,repeats=batch_size)\n# disp_x=source_x-grid_x\n# disp_y=source_y-grid_y\n# flow = np.concatenate((np.expand_dims(disp_x,3),np.expand_dims(disp_y,3)),3)\n# return flow\n\ndef th_sampling_grid_to_np_flow(source_grid,h_src,w_src):\n # remove batch dimension\n source_grid = source_grid.squeeze(0)\n # get mask\n in_bound_mask=(source_grid.data[:,:,0]>-1) & (source_grid.data[:,:,0]<1) & (source_grid.data[:,:,1]>-1) & (source_grid.data[:,:,1]<1)\n in_bound_mask=in_bound_mask.cpu().numpy()\n # convert coords\n h_tgt,w_tgt=source_grid.size(0),source_grid.size(1)\n source_x_norm=source_grid[:,:,0]\n source_y_norm=source_grid[:,:,1]\n source_x=unnormalize_axis(source_x_norm,w_src) \n source_y=unnormalize_axis(source_y_norm,h_src) \n source_x=source_x.data.cpu().numpy()\n source_y=source_y.data.cpu().numpy()\n grid_x,grid_y = np.meshgrid(range(1,w_tgt+1),range(1,h_tgt+1))\n disp_x=source_x-grid_x\n disp_y=source_y-grid_y\n # apply mask\n disp_x = disp_x*in_bound_mask+1e10*(1-in_bound_mask)\n disp_y = disp_y*in_bound_mask+1e10*(1-in_bound_mask)\n flow = np.concatenate((np.expand_dims(disp_x,2),np.expand_dims(disp_y,2)),2)\n return flow\n\n" ]
[ [ "pandas.read_csv", "numpy.nonzero", "numpy.asarray", "numpy.ones", "torch.autograd.Variable", "numpy.fromstring", "torch.FloatTensor", "torch.ne" ], [ "numpy.resize", "numpy.expand_dims", "numpy.fromfile", "torch.nn.functional.grid_sample", "torch.FloatTensor", "numpy.array" ] ]
jongtack/tensorflow
[ "2d5f0ac61fe4e4160fbb68d8031418528111dae9", "2d5f0ac61fe4e4160fbb68d8031418528111dae9" ]
[ "tensorflow/python/ops/rnn.py", "tensorflow/python/training/momentum_test.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"RNN helpers for TensorFlow models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import variable_scope as vs\n\n\ndef rnn(cell, inputs, initial_state=None, dtype=None,\n sequence_length=None, scope=None):\n \"\"\"Creates a recurrent neural network specified by RNNCell \"cell\".\n\n The simplest form of RNN network generated is:\n state = cell.zero_state(...)\n outputs = []\n states = []\n for input_ in inputs:\n output, state = cell(input_, state)\n outputs.append(output)\n states.append(state)\n return (outputs, states)\n\n However, a few other options are available:\n\n An initial state can be provided.\n If sequence_length is provided, dynamic calculation is performed.\n\n Dynamic calculation returns, at time t:\n (t >= max(sequence_length)\n ? (zeros(output_shape), zeros(state_shape))\n : cell(input, state)\n\n Thus saving computational time when unrolling past the max sequence length.\n\n Args:\n cell: An instance of RNNCell.\n inputs: A length T list of inputs, each a tensor of shape\n [batch_size, cell.input_size].\n initial_state: (optional) An initial state for the RNN. This must be\n a tensor of appropriate type and shape [batch_size x cell.state_size].\n dtype: (optional) The data type for the initial state. Required if\n initial_state is not provided.\n sequence_length: An int64 vector (tensor) size [batch_size].\n scope: VariableScope for the created subgraph; defaults to \"RNN\".\n\n Returns:\n A pair (outputs, states) where:\n outputs is a length T list of outputs (one for each input)\n states is a length T list of states (one state following each input)\n\n Raises:\n TypeError: If \"cell\" is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n\n if not isinstance(cell, rnn_cell.RNNCell):\n raise TypeError(\"cell must be an instance of RNNCell\")\n if not isinstance(inputs, list):\n raise TypeError(\"inputs must be a list\")\n if not inputs:\n raise ValueError(\"inputs must not be empty\")\n\n outputs = []\n states = []\n with vs.variable_scope(scope or \"RNN\"):\n batch_size = array_ops.shape(inputs[0])[0]\n if initial_state is not None:\n state = initial_state\n else:\n if not dtype:\n raise ValueError(\"If no initial_state is provided, dtype must be.\")\n state = cell.zero_state(batch_size, dtype)\n\n if sequence_length: # Prepare variables\n zero_output_state = (\n array_ops.zeros(array_ops.pack([batch_size, cell.output_size]),\n inputs[0].dtype),\n array_ops.zeros(array_ops.pack([batch_size, cell.state_size]),\n state.dtype))\n max_sequence_length = math_ops.reduce_max(sequence_length)\n\n for time, input_ in enumerate(inputs):\n if time > 0: vs.get_variable_scope().reuse_variables()\n # pylint: disable=cell-var-from-loop\n def output_state():\n return cell(input_, state)\n # pylint: enable=cell-var-from-loop\n if sequence_length:\n (output, state) = control_flow_ops.cond(\n time >= max_sequence_length,\n lambda: zero_output_state, output_state)\n else:\n (output, state) = output_state()\n\n outputs.append(output)\n states.append(state)\n\n return (outputs, states)\n\n\ndef state_saving_rnn(cell, inputs, state_saver, state_name,\n sequence_length=None, scope=None):\n \"\"\"RNN that accepts a state saver for time-truncated RNN calculation.\n\n Args:\n cell: An instance of RNNCell.\n inputs: A length T list of inputs, each a tensor of shape\n [batch_size, cell.input_size].\n state_saver: A state saver object with methods `state` and `save_state`.\n state_name: The name to use with the state_saver.\n sequence_length: (optional) An int64 vector (tensor) size [batch_size].\n See the documentation for rnn() for more details about sequence_length.\n scope: VariableScope for the created subgraph; defaults to \"RNN\".\n\n Returns:\n A pair (outputs, states) where:\n outputs is a length T list of outputs (one for each input)\n states is a length T list of states (one state following each input)\n\n Raises:\n TypeError: If \"cell\" is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n initial_state = state_saver.state(state_name)\n (outputs, states) = rnn(cell, inputs, initial_state=initial_state,\n sequence_length=sequence_length, scope=scope)\n save_state = state_saver.save_state(state_name, states[-1])\n with ops.control_dependencies([save_state]):\n outputs[-1] = array_ops.identity(outputs[-1])\n\n return (outputs, states)\n\n\ndef _reverse_seq(input_seq, lengths):\n \"\"\"Reverse a list of Tensors up to specified lengths.\n\n Args:\n input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)\n lengths: A tensor of dimension batch_size, containing lengths for each\n sequence in the batch. If \"None\" is specified, simply reverses\n the list.\n\n Returns:\n time-reversed sequence\n \"\"\"\n if lengths is None:\n return list(reversed(input_seq))\n\n # Join into (time, batch_size, depth)\n s_joined = array_ops.pack(input_seq)\n # Reverse along dimension 0\n s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)\n # Split again into list\n result = array_ops.unpack(s_reversed)\n return result\n\n\ndef bidirectional_rnn(cell_fw, cell_bw, inputs,\n initial_state_fw=None, initial_state_bw=None,\n dtype=None, sequence_length=None, scope=None):\n \"\"\"Creates a bidirectional recurrent neural network.\n\n Similar to the unidirectional case above (rnn) but takes input and builds\n independent forward and backward RNNs with the final forward and backward\n outputs depth-concatenated, such that the output will have the format\n [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of\n forward and backward cell must match. The initial state for both directions\n is zero by default (but can be set optionally) and no intermediate states are\n ever returned -- the network is fully unrolled for the given (passed in)\n length(s) of the sequence(s) or completely unrolled if length(s) is not given.\n\n Args:\n cell_fw: An instance of RNNCell, to be used for forward direction.\n cell_bw: An instance of RNNCell, to be used for backward direction.\n inputs: A length T list of inputs, each a tensor of shape\n [batch_size, cell.input_size].\n initial_state_fw: (optional) An initial state for the forward RNN.\n This must be a tensor of appropriate type and shape\n [batch_size x cell.state_size].\n initial_state_bw: (optional) Same as for initial_state_fw.\n dtype: (optional) The data type for the initial state. Required if either\n of the initial states are not provided.\n sequence_length: (optional) An int64 vector (tensor) of size [batch_size],\n containing the actual lengths for each of the sequences.\n scope: VariableScope for the created subgraph; defaults to \"BiRNN\"\n\n Returns:\n A set of output `Tensors` where:\n outputs is a length T list of outputs (one for each input), which\n are depth-concatenated forward and backward outputs\n\n Raises:\n TypeError: If \"cell_fw\" or \"cell_bw\" is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n\n if not isinstance(cell_fw, rnn_cell.RNNCell):\n raise TypeError(\"cell_fw must be an instance of RNNCell\")\n if not isinstance(cell_bw, rnn_cell.RNNCell):\n raise TypeError(\"cell_bw must be an instance of RNNCell\")\n if not isinstance(inputs, list):\n raise TypeError(\"inputs must be a list\")\n if not inputs:\n raise ValueError(\"inputs must not be empty\")\n\n name = scope or \"BiRNN\"\n # Forward direction\n with vs.variable_scope(name + \"_FW\"):\n output_fw, _ = rnn(cell_fw, inputs, initial_state_fw, dtype)\n # Backward direction\n with vs.variable_scope(name + \"_BW\"):\n tmp, _ = rnn(\n cell_bw, _reverse_seq(inputs, sequence_length), initial_state_bw, dtype)\n output_bw = _reverse_seq(tmp, sequence_length)\n # Concat each of the forward/backward outputs\n outputs = [array_ops.concat(1, [fw, bw])\n for fw, bw in zip(output_fw, output_bw)]\n\n return outputs\n", "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for Momentum.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.python.platform\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n\nclass MomentumOptimizerTest(tf.test.TestCase):\n\n def testBasic(self):\n with self.test_session():\n var0 = tf.Variable([1.0, 2.0])\n var1 = tf.Variable([3.0, 4.0])\n grads0 = tf.constant([0.1, 0.1])\n grads1 = tf.constant([0.01, 0.01])\n mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)\n mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n tf.initialize_all_variables().run()\n # Check we have slots\n self.assertEqual([\"momentum\"], mom_opt.get_slot_names())\n slot0 = mom_opt.get_slot(var0, \"momentum\")\n self.assertEquals(slot0.get_shape(), var0.get_shape())\n self.assertFalse(slot0 in tf.trainable_variables())\n slot1 = mom_opt.get_slot(var1, \"momentum\")\n self.assertEquals(slot1.get_shape(), var1.get_shape())\n self.assertFalse(slot1 in tf.trainable_variables())\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n # Step 1: the momentum accumulators where 0. So we should see a normal\n # update: v -= grad * learning_rate\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllClose(np.array([0.1, 0.1]), slot0.eval())\n self.assertAllClose(np.array([0.01, 0.01]), slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllClose(np.array([1.0 - (0.1 * 2.0),\n 2.0 - (0.1 * 2.0)]),\n var0.eval())\n self.assertAllClose(np.array([3.0 - (0.01 * 2.0),\n 4.0 - (0.01 * 2.0)]),\n var1.eval())\n # Step 2: the momentum accumulators contain the previous update.\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),\n slot0.eval())\n self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),\n slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllClose(\n np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),\n 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),\n var0.eval())\n self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),\n 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),\n var1.eval())\n\n def testTensorLearningRateAndMomentum(self):\n with self.test_session():\n var0 = tf.Variable([1.0, 2.0])\n var1 = tf.Variable([3.0, 4.0])\n grads0 = tf.constant([0.1, 0.1])\n grads1 = tf.constant([0.01, 0.01])\n mom_opt = tf.train.MomentumOptimizer(\n learning_rate=tf.constant(2.0), momentum=tf.constant(0.9))\n mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n tf.initialize_all_variables().run()\n # Check we have slots\n self.assertEqual([\"momentum\"], mom_opt.get_slot_names())\n slot0 = mom_opt.get_slot(var0, \"momentum\")\n self.assertEquals(slot0.get_shape(), var0.get_shape())\n self.assertFalse(slot0 in tf.trainable_variables())\n slot1 = mom_opt.get_slot(var1, \"momentum\")\n self.assertEquals(slot1.get_shape(), var1.get_shape())\n self.assertFalse(slot1 in tf.trainable_variables())\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n # Step 1: the momentum accumulators where 0. So we should see a normal\n # update: v -= grad * learning_rate\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllClose(np.array([0.1, 0.1]), slot0.eval())\n self.assertAllClose(np.array([0.01, 0.01]), slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllClose(np.array([1.0 - (0.1 * 2.0),\n 2.0 - (0.1 * 2.0)]),\n var0.eval())\n self.assertAllClose(np.array([3.0 - (0.01 * 2.0),\n 4.0 - (0.01 * 2.0)]),\n var1.eval())\n # Step 2: the momentum accumulators contain the previous update.\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),\n slot0.eval())\n self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),\n slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllClose(\n np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),\n 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),\n var0.eval())\n self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),\n 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),\n var1.eval())\n\n def testFloat64(self):\n with self.test_session():\n opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)\n\n # compute_gradients.\n values = [1.0, 3.0]\n good_vars = [tf.Variable([v]) for v in values]\n bad_loss = tf.constant(2.0, tf.float64, name=\"bad_loss\")\n self.assertRaisesRegexp(\n ValueError, r\"Invalid type.*float64.*bad_loss.*expected.*float32\",\n opt.compute_gradients, bad_loss, good_vars)\n bad_vars = [\n tf.Variable(np.array([v], np.float64), name=\"bad_var\")\n for v in values]\n self.assertRaisesRegexp(\n ValueError, r\"Invalid type.*float64.*bad_var.*expected.*float32\",\n opt.compute_gradients, tf.cast(bad_vars[0] + bad_vars[1], tf.float32),\n bad_vars)\n opt.compute_gradients(good_vars[0] + good_vars[1], good_vars)\n\n # apply_gradients.\n bad_grads = [\n tf.constant([0.1], dtype=np.float64, name=\"bad_grad\"),\n tf.constant([0.01])]\n self.assertRaisesRegexp(\n ValueError, r\"Invalid type.*float64.*bad_grad.*expected.*float32\",\n opt.apply_gradients, zip(bad_grads, good_vars))\n good_grads = [tf.constant([0.01]), tf.constant([0.02])]\n self.assertRaisesRegexp(\n ValueError, r\"Invalid type.*float64.*bad_var.*expected.*float32\",\n opt.apply_gradients, zip(good_grads, bad_vars))\n opt.apply_gradients(zip(good_grads, good_vars))\n\n def _dbParamsMom01(self):\n \"\"\"Return dist-belief momentum values.\n\n Return values been generated from the dist-belief momentum unittest,\n running with a learning rate of 0.1 and a momemntum of 0.1.\n\n These values record how a parameter vector of size 10, initialized with 0.0,\n gets updated with 10 consecutive momentum steps. It uses random gradients.\n\n Returns:\n db_grad: The gradients to apply\n db_out: The parameters after the momentum update.\n \"\"\"\n db_grad = [[]] * 10\n db_out = [[]] * 10\n # pylint: disable=line-too-long\n db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615]\n db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618]\n db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521]\n db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544]\n db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311]\n db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189]\n db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648]\n db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303]\n db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287]\n db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544]\n db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229]\n db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717]\n db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251]\n db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997]\n db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922]\n db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418]\n db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227]\n db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781]\n db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711]\n db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295]\n # pylint: enable=line-too-long\n return db_grad, db_out\n\n def testLikeDistBeliefMom01(self):\n with self.test_session():\n db_grad, db_out = self._dbParamsMom01()\n num_samples = len(db_grad)\n var0 = tf.Variable([0.0] * num_samples)\n grads0 = tf.constant([0.0] * num_samples)\n mom_opt = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.1)\n mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))\n tf.initialize_all_variables().run()\n for i in xrange(num_samples):\n mom_update.run(feed_dict={grads0: db_grad[i]})\n self.assertAllClose(np.array(db_out[i]), var0.eval())\n\n def testSparse(self):\n with self.test_session():\n var0 = tf.Variable(tf.zeros([4, 2]))\n var1 = tf.Variable(\n tf.constant(1.0, tf.float32, [4, 2]))\n grads0 = tf.IndexedSlices(tf.constant([[.1, .1]]),\n tf.constant([1]),\n tf.constant([4, 2]))\n grads1 = tf.IndexedSlices(tf.constant([[.01, .01], [.01, .01]]),\n tf.constant([2, 3]),\n tf.constant([4, 2]))\n mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)\n mom_update = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n tf.initialize_all_variables().run()\n\n # Check we have slots\n self.assertEqual([\"momentum\"], mom_opt.get_slot_names())\n slot0 = mom_opt.get_slot(var0, \"momentum\")\n self.assertEquals(slot0.get_shape(), var0.get_shape())\n slot1 = mom_opt.get_slot(var1, \"momentum\")\n self.assertEquals(slot1.get_shape(), var1.get_shape())\n\n # Fetch params to validate initial values\n self.assertAllClose([0, 0], var0.eval()[0])\n self.assertAllClose([0, 0], var0.eval()[1])\n self.assertAllClose([1, 1], var1.eval()[2])\n\n # Step 1: the momentum accumulators are 0. So we should see a normal\n # update: v -= grad * learning_rate\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllClose(np.array([0, 0]), slot0.eval()[0])\n self.assertAllClose(np.array([.1, .1]), slot0.eval()[1])\n self.assertAllClose(np.array([.01, .01]), slot1.eval()[2])\n # Check that the parameters have been updated.\n self.assertAllClose(np.array([0, 0]), var0.eval()[0])\n self.assertAllClose(np.array([- (0.1 * 2.0),\n - (0.1 * 2.0)]),\n var0.eval()[1])\n self.assertAllClose(np.array([1.0 - (0.01 * 2.0),\n 1.0 - (0.01 * 2.0)]),\n var1.eval()[2])\n # Step 2: the momentum accumulators contain the previous update.\n mom_update.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllClose(np.array([0, 0]), slot0.eval()[0])\n self.assertAllClose(np.array([(0.9 * 0.1 + 0.1),\n (0.9 * 0.1 + 0.1)]),\n slot0.eval()[1])\n self.assertAllClose(np.array([(0.9 * 0.01 + 0.01),\n (0.9 * 0.01 + 0.01)]),\n slot1.eval()[2])\n # Check that the parameters have been updated.\n self.assertAllClose(np.array([0, 0]), var0.eval()[0])\n self.assertAllClose(\n np.array([- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),\n - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),\n var0.eval()[1])\n self.assertAllClose(np.array([0.98 - ((0.9 * 0.01 + 0.01) * 2.0),\n 0.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),\n var1.eval()[2])\n\n def testSharing(self):\n with self.test_session():\n var0 = tf.Variable([1.0, 2.0])\n var1 = tf.Variable([3.0, 4.0])\n grads0 = tf.constant([0.1, 0.1])\n grads1 = tf.constant([0.01, 0.01])\n mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)\n mom_update1 = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n mom_update2 = mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))\n tf.initialize_all_variables().run()\n\n self.assertEqual([\"momentum\"], mom_opt.get_slot_names())\n slot0 = mom_opt.get_slot(var0, \"momentum\")\n self.assertEquals(slot0.get_shape(), var0.get_shape())\n slot1 = mom_opt.get_slot(var1, \"momentum\")\n self.assertEquals(slot1.get_shape(), var1.get_shape())\n\n # Fetch params to validate initial values\n self.assertAllClose([1.0, 2.0], var0.eval())\n self.assertAllClose([3.0, 4.0], var1.eval())\n # Step 1: the momentum accumulators where 0. So we should see a normal\n # update: v -= grad * learning_rate\n mom_update1.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllClose(np.array([0.1, 0.1]), slot0.eval())\n self.assertAllClose(np.array([0.01, 0.01]), slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllClose(np.array([1.0 - (0.1 * 2.0),\n 2.0 - (0.1 * 2.0)]),\n var0.eval())\n self.assertAllClose(np.array([3.0 - (0.01 * 2.0),\n 4.0 - (0.01 * 2.0)]),\n var1.eval())\n # Step 2: the second momentum accumulators contain the previous update.\n mom_update2.run()\n # Check that the momentum accumulators have been updated.\n self.assertAllClose(np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),\n slot0.eval())\n self.assertAllClose(np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),\n slot1.eval())\n # Check that the parameters have been updated.\n self.assertAllClose(\n np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),\n 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),\n var0.eval())\n self.assertAllClose(np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),\n 3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),\n var1.eval())\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.array_ops.reverse_sequence", "tensorflow.python.ops.variable_scope.get_variable_scope", "tensorflow.python.ops.array_ops.unpack", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.array_ops.pack" ], [ "tensorflow.constant", "tensorflow.Variable", "tensorflow.zeros", "tensorflow.cast", "tensorflow.test.main", "tensorflow.initialize_all_variables", "tensorflow.train.MomentumOptimizer", "tensorflow.trainable_variables", "numpy.array" ] ]
bozhenhhu/gvp-pytorch
[ "82af6b22eaf8311c15733117b0071408d24ed877" ]
[ "run_cpd.py" ]
[ "import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--models-dir', metavar='PATH', default='./models/',\n help='directory to save trained models, default=./models/')\nparser.add_argument('--num-workers', metavar='N', type=int, default=4,\n help='number of threads for loading data, default=4')\nparser.add_argument('--max-nodes', metavar='N', type=int, default=3000,\n help='max number of nodes per batch, default=3000')\nparser.add_argument('--epochs', metavar='N', type=int, default=100,\n help='training epochs, default=100')\nparser.add_argument('--cath-data', metavar='PATH', default='./data/chain_set.jsonl',\n help='location of CATH dataset, default=./data/chain_set.jsonl')\nparser.add_argument('--cath-splits', metavar='PATH', default='./data/chain_set_splits.json',\n help='location of CATH split file, default=./data/chain_set_splits.json')\nparser.add_argument('--ts50', metavar='PATH', default='./data/ts50.json',\n help='location of TS50 dataset, default=./data/ts50.json')\nparser.add_argument('--train', action=\"store_true\", help=\"train a model\")\nparser.add_argument('--test-r', metavar='PATH', default=None,\n help='evaluate a trained model on recovery (without training)')\nparser.add_argument('--test-p', metavar='PATH', default=None,\n help='evaluate a trained model on perplexity (without training)')\nparser.add_argument('--n-samples', metavar='N', default=100,\n help='number of sequences to sample (if testing recovery), default=100')\n\nargs = parser.parse_args()\nassert sum(map(bool, [args.train, args.test_p, args.test_r])) == 1, \\\n \"Specify exactly one of --train, --test_r, --test_p\"\n\nimport torch\nimport torch.nn as nn\nimport gvp.data, gvp.models\nfrom datetime import datetime\nimport tqdm, os, json\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport torch_geometric\nfrom functools import partial\nprint = partial(print, flush=True)\n\nnode_dim = (100, 16)\nedge_dim = (32, 1)\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nif not os.path.exists(args.models_dir): os.makedirs(args.models_dir)\nmodel_id = int(datetime.timestamp(datetime.now()))\ndataloader = lambda x: torch_geometric.data.DataLoader(x, \n num_workers=args.num_workers,\n batch_sampler=gvp.data.BatchSampler(\n x.node_counts, max_nodes=args.max_nodes))\n\ndef main():\n \n model = gvp.models.CPDModel((6, 3), node_dim, (32, 1), edge_dim).to(device)\n \n print(\"Loading CATH dataset\")\n cath = gvp.data.CATHDataset(path=\"data/chain_set.jsonl\",\n splits_path=\"data/chain_set_splits.json\") \n \n trainset, valset, testset = map(gvp.data.ProteinGraphDataset,\n (cath.train, cath.val, cath.test))\n \n if args.test_r or args.test_p:\n ts50set = gvp.data.ProteinGraphDataset(json.load(open(args.ts50)))\n model.load_state_dict(torch.load(args.test_r or args.test_p))\n \n if args.test_r:\n print(\"Testing on CATH testset\"); test_recovery(model, testset)\n print(\"Testing on TS50 set\"); test_recovery(model, ts50set)\n \n elif args.test_p:\n print(\"Testing on CATH testset\"); test_perplexity(model, testset)\n print(\"Testing on TS50 set\"); test_perplexity(model, ts50set)\n \n elif args.train:\n train(model, trainset, valset, testset)\n \n \ndef train(model, trainset, valset, testset):\n train_loader, val_loader, test_loader = map(dataloader,\n (trainset, valset, testset))\n optimizer = torch.optim.Adam(model.parameters())\n best_path, best_val = None, np.inf\n lookup = train_loader.dataset.num_to_letter\n for epoch in range(args.epochs):\n model.train()\n loss, acc, confusion = loop(model, train_loader, optimizer=optimizer)\n path = f\"{args.models_dir}/{model_id}_{epoch}.pt\"\n torch.save(model.state_dict(), path)\n print(f'EPOCH {epoch} TRAIN loss: {loss:.4f} acc: {acc:.4f}')\n print_confusion(confusion, lookup=lookup)\n \n model.eval()\n with torch.no_grad():\n loss, acc, confusion = loop(model, val_loader) \n print(f'EPOCH {epoch} VAL loss: {loss:.4f} acc: {acc:.4f}')\n print_confusion(confusion, lookup=lookup)\n \n if loss < best_val:\n best_path, best_val = path, loss\n print(f'BEST {best_path} VAL loss: {best_val:.4f}')\n \n print(f\"TESTING: loading from {best_path}\")\n model.load_state_dict(torch.load(best_path))\n \n model.eval()\n with torch.no_grad():\n loss, acc, confusion = loop(model, test_loader)\n print(f'TEST loss: {loss:.4f} acc: {acc:.4f}')\n print_confusion(confusion,lookup=lookup)\n\ndef test_perplexity(model, dataset):\n model.eval()\n with torch.no_grad():\n loss, acc, confusion = loop(model, dataloader(dataset))\n print(f'TEST perplexity: {np.exp(loss):.4f}')\n print_confusion(confusion, lookup=dataset.num_to_letter)\n\ndef test_recovery(model, dataset):\n recovery = []\n \n for protein in tqdm.tqdm(dataset):\n protein = protein.to(device)\n h_V = (protein.node_s, protein.node_v)\n h_E = (protein.edge_s, protein.edge_v) \n sample = model.sample(h_V, protein.edge_index, \n h_E, n_samples=args.n_samples)\n \n recovery_ = sample.eq(protein.seq).float().mean().cpu().numpy()\n recovery.append(recovery_)\n print(protein.name, recovery_, flush=True)\n\n recovery = np.median(recovery)\n print(f'TEST recovery: {recovery:.4f}')\n \ndef loop(model, dataloader, optimizer=None):\n\n confusion = np.zeros((20, 20))\n t = tqdm.tqdm(dataloader)\n loss_fn = nn.CrossEntropyLoss()\n total_loss, total_correct, total_count = 0, 0, 0\n \n for batch in t:\n if optimizer: optimizer.zero_grad()\n \n batch = batch.to(device)\n h_V = (batch.node_s, batch.node_v)\n h_E = (batch.edge_s, batch.edge_v)\n \n logits = model(h_V, batch.edge_index, h_E, seq=batch.seq)\n logits, seq = logits[batch.mask], batch.seq[batch.mask]\n loss_value = loss_fn(logits, seq)\n\n if optimizer:\n loss_value.backward()\n optimizer.step()\n\n num_nodes = int(batch.mask.sum())\n total_loss += float(loss_value) * num_nodes\n total_count += num_nodes\n pred = torch.argmax(logits, dim=-1).detach().cpu().numpy()\n true = seq.detach().cpu().numpy()\n total_correct += (pred == true).sum()\n confusion += confusion_matrix(true, pred, labels=range(20))\n t.set_description(\"%.5f\" % float(total_loss/total_count))\n \n torch.cuda.empty_cache()\n \n return total_loss / total_count, total_correct / total_count, confusion\n \ndef print_confusion(mat, lookup):\n counts = mat.astype(np.int32)\n mat = (counts.T / counts.sum(axis=-1, keepdims=True).T).T\n mat = np.round(mat * 1000).astype(np.int32)\n res = '\\n'\n for i in range(20):\n res += '\\t{}'.format(lookup[i])\n res += '\\tCount\\n'\n for i in range(20):\n res += '{}\\t'.format(lookup[i])\n res += '\\t'.join('{}'.format(n) for n in mat[i])\n res += '\\t{}\\n'.format(sum(counts[i]))\n print(res)\n \nif __name__== \"__main__\":\n main()" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.load", "numpy.median", "torch.cuda.empty_cache", "numpy.round", "torch.no_grad", "torch.cuda.is_available", "numpy.exp", "numpy.zeros", "torch.argmax" ] ]
italogfernandes/machine-learning
[ "7a0cb2bdf7fcc44dee1241fdf0ff59a68d8e45db" ]
[ "Part 2 - Regression/Section 4 - Simple Linear Regression/simple_linear_regression.py" ]
[ "# Simple Linear Regression\n\n# Importing the libraries\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\n# Importing the dataset\ndataset = pd.read_csv('../datasets/Salary_Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1.0/3.0, random_state = 0)\n\n# Fitting Simple Linear Regression to the Training set\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\n\n# Visualising the Training set results\nplt.scatter(X_train, y_train, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.title('Salary vs Experience (Training set)')\nplt.xlabel('Years of Experience')\nplt.ylabel('Salary')\nplt.show()\n\n# Visualising the Test set results\nplt.scatter(X_test, y_test, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.title('Salary vs Experience (Test set)')\nplt.xlabel('Years of Experience')\nplt.ylabel('Salary')\nplt.show()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "sklearn.model_selection.train_test_split", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
FrankZhu7/play-with-data-science
[ "f527c7233fc9f33408e239b03ffd7a699a8b6923" ]
[ "SP500 volatility estimation with machine learning models/dm_test.py" ]
[ "# Author : John Tsang\n# Date : December 7th, 2017\n# Purpose : Implement the Diebold-Mariano Test (DM test) to compare\n# forecast accuracy\n# Input : 1) actual_lst: the list of actual values\n# 2) pred1_lst : the first list of predicted values\n# 3) pred2_lst : the second list of predicted values\n# 4) h : the number of stpes ahead\n# 5) crit : a string specifying the criterion\n# i) MSE : the mean squared error\n# ii) MAD : the mean absolute deviation\n# iii) MAPE : the mean absolute percentage error\n# iv) poly : use power function to weigh the errors\n# 6) poly : the power for crit power\n# (it is only meaningful when crit is \"poly\")\n# Condition: 1) length of actual_lst, pred1_lst and pred2_lst is equal\n# 2) h must be an integer and it must be greater than 0 and less than\n# the length of actual_lst.\n# 3) crit must take the 4 values specified in Input\n# 4) Each value of actual_lst, pred1_lst and pred2_lst must\n# be numerical values. Missing values will not be accepted.\n# 5) power must be a numerical value.\n# Return : a named-tuple of 2 elements\n# 1) p_value : the p-value of the DM test\n# 2) DM : the test statistics of the DM test\n##########################################################\n# References:\n#\n# Harvey, D., Leybourne, S., & Newbold, P. (1997). Testing the equality of\n# prediction mean squared errors. International Journal of forecasting,\n# 13(2), 281-291.\n#\n# Diebold, F. X. and Mariano, R. S. (1995), Comparing predictive accuracy,\n# Journal of business & economic statistics 13(3), 253-264.\n#\n##########################################################\ndef dm_test(actual_lst, pred1_lst, pred2_lst, h=1, crit=\"MSE\", power=2):\n # Routine for checking errors\n def error_check():\n rt = 0\n msg = \"\"\n # Check if h is an integer\n if (not isinstance(h, int)):\n rt = -1\n msg = \"The type of the number of steps ahead (h) is not an integer.\"\n return (rt, msg)\n # Check the range of h\n if (h < 1):\n rt = -1\n msg = \"The number of steps ahead (h) is not large enough.\"\n return (rt, msg)\n len_act = len(actual_lst)\n len_p1 = len(pred1_lst)\n len_p2 = len(pred2_lst)\n # Check if lengths of actual values and predicted values are equal\n if (len_act != len_p1 or len_p1 != len_p2 or len_act != len_p2):\n rt = -1\n msg = \"Lengths of actual_lst, pred1_lst and pred2_lst do not match.\"\n return (rt, msg)\n # Check range of h\n if (h >= len_act):\n rt = -1\n msg = \"The number of steps ahead is too large.\"\n return (rt, msg)\n # Check if criterion supported\n if (crit != \"MSE\" and crit != \"MAPE\" and crit != \"MAD\" and crit != \"poly\"):\n rt = -1\n msg = \"The criterion is not supported.\"\n return (rt, msg)\n # Check if every value of the input lists are numerical values\n from re import compile as re_compile\n comp = re_compile(\"^\\d+?\\.\\d+?$\")\n\n def compiled_regex(s):\n \"\"\" Returns True is string is a number. \"\"\"\n if comp.match(s) is None:\n return s.isdigit()\n return True\n\n for actual, pred1, pred2 in zip(actual_lst, pred1_lst, pred2_lst):\n is_actual_ok = compiled_regex(str(abs(actual)))\n is_pred1_ok = compiled_regex(str(abs(pred1)))\n is_pred2_ok = compiled_regex(str(abs(pred2)))\n if (not (is_actual_ok and is_pred1_ok and is_pred2_ok)):\n msg = \"An element in the actual_lst, pred1_lst or pred2_lst is not numeric.\"\n rt = -1\n return (rt, msg)\n return (rt, msg)\n\n # Error check\n error_code = error_check()\n # Raise error if cannot pass error check\n if (error_code[0] == -1):\n raise SyntaxError(error_code[1])\n return\n # Import libraries\n from scipy.stats import t\n import collections\n import pandas as pd\n import numpy as np\n\n # Initialise lists\n e1_lst = []\n e2_lst = []\n d_lst = []\n\n # convert every value of the lists into real values\n actual_lst = pd.Series(actual_lst).apply(lambda x: float(x)).tolist()\n pred1_lst = pd.Series(pred1_lst).apply(lambda x: float(x)).tolist()\n pred2_lst = pd.Series(pred2_lst).apply(lambda x: float(x)).tolist()\n\n # Length of lists (as real numbers)\n T = float(len(actual_lst))\n\n # construct d according to crit\n if (crit == \"MSE\"):\n for actual, p1, p2 in zip(actual_lst, pred1_lst, pred2_lst):\n e1_lst.append((actual - p1) ** 2)\n e2_lst.append((actual - p2) ** 2)\n for e1, e2 in zip(e1_lst, e2_lst):\n d_lst.append(e1 - e2)\n elif (crit == \"MAD\"):\n for actual, p1, p2 in zip(actual_lst, pred1_lst, pred2_lst):\n e1_lst.append(abs(actual - p1))\n e2_lst.append(abs(actual - p2))\n for e1, e2 in zip(e1_lst, e2_lst):\n d_lst.append(e1 - e2)\n elif (crit == \"MAPE\"):\n for actual, p1, p2 in zip(actual_lst, pred1_lst, pred2_lst):\n e1_lst.append(abs((actual - p1) / actual))\n e2_lst.append(abs((actual - p2) / actual))\n for e1, e2 in zip(e1_lst, e2_lst):\n d_lst.append(e1 - e2)\n elif (crit == \"poly\"):\n for actual, p1, p2 in zip(actual_lst, pred1_lst, pred2_lst):\n e1_lst.append(((actual - p1)) ** (power))\n e2_lst.append(((actual - p2)) ** (power))\n for e1, e2 in zip(e1_lst, e2_lst):\n d_lst.append(e1 - e2)\n\n # Mean of d\n mean_d = pd.Series(d_lst).mean()\n\n # Find autocovariance and construct DM test statistics\n def autocovariance(Xi, N, k, Xs):\n autoCov = 0\n T = float(N)\n for i in np.arange(0, N - k):\n autoCov += ((Xi[i + k]) - Xs) * (Xi[i] - Xs)\n return (1 / (T)) * autoCov\n\n gamma = []\n for lag in range(0, h):\n gamma.append(autocovariance(d_lst, len(d_lst), lag, mean_d)) # 0, 1, 2\n V_d = (gamma[0] + 2 * sum(gamma[1:])) / T\n DM_stat = V_d ** (-0.5) * mean_d\n harvey_adj = ((T + 1 - 2 * h + h * (h - 1) / T) / T) ** (0.5)\n DM_stat = harvey_adj * DM_stat\n # Find p-value\n p_value = 2 * t.cdf(-abs(DM_stat), df=T - 1)\n\n # Construct named tuple for return\n dm_return = collections.namedtuple('dm_return', 'DM p_value')\n\n rt = dm_return(DM=DM_stat, p_value=p_value)\n\n return rt" ]
[ [ "numpy.arange", "pandas.Series" ] ]
goktug97/PyYOLO
[ "69c6997e3e3762199ee04e7339725b51059e56f4" ]
[ "pyyolo/yolo.py" ]
[ "#!/usr/bin/env python3\n\nimport cv2\nfrom .cyolo import *\nimport numpy as np\n\n\nclass BBox(np.ndarray):\n def __new__(cls, x, y, w, h, prob, name):\n cls.name = \"\"\n cls.prob = 0\n obj = np.asarray([x, y, w, h]).view(cls)\n obj.x, obj.y, obj.w, obj.h = obj.view()\n obj.name = name\n obj.prob = prob\n return obj\n\n def __str__(self):\n x, y, w, h = self.view()\n string = f'x: {x}, y: {y}, w: {w}, h: {h}, '\n string += f'probability: {self.prob}, name: {self.name}'\n return string\n\n def to_xyxy(self):\n x, y, w, h = self.view()\n return np.array([x, y, x + w, y + h])\n\n def __array_finalize__(self, obj):\n if obj is None: return\n\n\n# https://github.com/AlexeyAB/darknet/blob/master/darknet.py\nclass YOLO(object):\n def __init__(self, config_path,\n weights_path,\n data_path,\n detection_threshold = 0.5,\n hier_threshold = 0.5,\n nms_threshold = 0.5):\n\n if not os.path.exists(config_path):\n raise ValueError(f'Invalid config path: {os.path.abspath(config_path)}')\n if not os.path.exists(weights_path):\n raise ValueError(f'Invalid weight path: {os.path.abspath(weights_path)}')\n if not os.path.exists(data_path):\n raise ValueError(f'Invalid data file path: {os.path.abspath(data_path)}')\n\n self.net_main = load_net_custom(config_path.encode(\"ascii\"),\n weights_path.encode(\"ascii\"),\n 0, 1)\n self.meta_main = load_meta(data_path.encode(\"ascii\"))\n\n self.height = lib.network_height(self.net_main)\n self.width = lib.network_width(self.net_main)\n\n with open(data_path) as metaFH:\n meta_contents = metaFH.read()\n import re\n match = re.search(\"names *= *(.*)$\",\n meta_contents,\n re.IGNORECASE | re.MULTILINE)\n if match:\n result = match.group(1)\n else:\n result = None\n if os.path.exists(result):\n with open(result) as namesFH:\n names_list = namesFH.read().strip().split(\"\\n\")\n self.alt_names = [x.strip() for x in names_list]\n\n self.threshold = detection_threshold\n self.hier_threshold = hier_threshold\n self.nms = nms_threshold\n\n\n def detect(self, image, rgb=False):\n original_h, original_w, _ = image.shape\n image = cv2.resize(image,\n (self.width, self.height),\n interpolation=cv2.INTER_CUBIC)[:,:,::-1]\n if not rgb:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n im, arr = array_to_image(image)\n\n num = c_int(0)\n pnum = pointer(num)\n\n predict_image(self.net_main, im)\n dets = get_network_boxes(\n self.net_main, im.w, im.h,\n self.threshold,\n self.hier_threshold,\n None, 0, pnum, 0)\n num = pnum[0]\n\n if self.nms:\n do_nms_sort(dets, num, self.meta_main.classes, self.nms)\n\n res = []\n for j in range(num):\n for i in range(self.meta_main.classes):\n if dets[j].prob[i] > 0:\n b = dets[j].bbox\n\n # coordinates as percentage\n x = (b.x-b.w/2)/self.width\n y = (b.y-b.h/2)/self.height\n w = b.w/self.width\n h = b.h/self.height\n\n # scale detections to input image\n x = int(round(x*original_w))\n y = int(round(y*original_h))\n w = int(round(w*original_w))\n h = int(round(h*original_h))\n\n bbox = BBox(x, y, w, h, dets[j].prob[i], self.alt_names[i])\n\n res.append(bbox)\n\n free_detections(dets, num)\n return res\n" ]
[ [ "numpy.asarray", "numpy.array" ] ]
emanuelevivoli/CompReGAN
[ "33589c3871bed8adcc157bf25a45b8d12ba1af66" ]
[ "data_utils.py" ]
[ "from os import listdir\nfrom os.path import join\n\nfrom PIL import Image\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision.transforms import Compose, RandomCrop, ToTensor, ToPILImage, CenterCrop, Resize, transforms\nfrom utils.jpeg_layer import jpeg_compression_transform, simg_jpeg_compression, jpeg_compression\n# from utils.custom_trasform import NRandomCrop\n\nfrom numpy import asarray, clip\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in ['.png', '.jpg', '.jpeg', '.PNG', '.JPG', '.JPEG'])\n\n\ndef calculate_valid_crop_size(crop_size, upscale_factor):\n return crop_size - (crop_size % upscale_factor)\n\n\ndef train_hr_transform(crop_size):\n return Compose([\n RandomCrop(crop_size),\n ToTensor(),\n ])\n\ndef val_hr_transform(crop_size):\n return Compose([\n CenterCrop(crop_size),\n ToTensor(),\n ])\n\n# def train_multiple_hr_transform(crop_size, crop_numb, padding=0):\n# return Compose([\n# NRandomCrop(size=crop_size, n=crop_numb, padding=padding),\n# transforms.Lambda(\n# lambda crops: torch.stack([\n# transforms.ToTensor()(crop)\n# for crop in crops\n# ]))\n# ])\n\ndef jr_transform(quality_factor):\n return Compose([\n jpeg_compression_transform(quality_factor)\n ])\n\n\ndef display_transform():\n return Compose([\n ToPILImage(),\n # Resize(400),\n # CenterCrop(400),\n ToTensor()\n ])\n\n\ndef weight_init(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n torch.nn.init.xavier_uniform_(m.bias)\n else:\n m.bias.data.zero_()\n\nclass TrainDatasetFromFolder(Dataset):\n def __init__(self, dataset_dir, crop_size, upscale_factor, quality_factor, train=True, crop_numb=1, padding=0):\n super(TrainDatasetFromFolder, self).__init__()\n self.image_filenames = [join(dataset_dir, x) for x in listdir(dataset_dir) if is_image_file(x)] * crop_numb\n crop_size = calculate_valid_crop_size(crop_size, upscale_factor)\n # self.hr_transform = train_multiple_hr_transform(crop_size, crop_numb, padding)\n self.hr_transform = train_hr_transform(crop_size) if train else val_hr_transform(crop_size)\n self.quality_factor = quality_factor\n # self.jr_transform = jr_transform(quality_factor)\n\n def __getitem__(self, index):\n hr_image = self.hr_transform(Image.open(self.image_filenames[index]))\n jr_image = simg_jpeg_compression(hr_image, self.quality_factor)\n return jr_image, hr_image\n\n def __len__(self):\n return len(self.image_filenames)\n\n\nclass ValDatasetFromFolder(Dataset):\n def __init__(self, dataset_dir, crop_size, upscale_factor, quality_factor):\n super(ValDatasetFromFolder, self).__init__()\n self.upscale_factor = upscale_factor\n self.quality_factor = quality_factor\n self.image_filenames = [join(dataset_dir, x) for x in listdir(dataset_dir) if is_image_file(x)]\n self.crop_size = crop_size\n # self.jr_transform = jr_transform(quality_factor)\n\n def __getitem__(self, index):\n hr_image = Image.open(self.image_filenames[index])\n w, h = hr_image.size\n # crop_size = calculate_valid_crop_size(min(w, h), self.upscale_factor)\n\n hr_image = ToTensor()(CenterCrop(self.crop_size)(hr_image))\n jr_image = simg_jpeg_compression(hr_image, self.quality_factor)\n\n return jr_image, hr_image\n\n def __len__(self):\n return len(self.image_filenames)\n\ndef scalePixels(image):\n pixels = asarray(image.cpu())\n # convert from integers to floats\n pixels = pixels.astype('float32')\n # calculate global mean and standard deviation\n mean, std = pixels.mean(), pixels.std()\n print('Mean: %.3f, Standard Deviation: %.3f' % (mean, std))\n # global standardization of pixels\n pixels = (pixels - mean) / std\n # clip pixel values to [-1,1]\n pixels = clip(pixels, -1.0, 1.0)\n print('Min: %.3f, Max: %.3f' % (pixels.min(), pixels.max()))\n return torch.Tensor(pixels).cuda()" ]
[ [ "torch.nn.init.xavier_uniform_", "torch.Tensor", "numpy.clip" ] ]
rishabhsamb/fairlearn
[ "c039a3fb292a57d5d2995ded8400122e4c736985" ]
[ "fairlearn/metrics/_metric_frame.py" ]
[ "# Copyright (c) Microsoft Corporation and Fairlearn contributors.\n# Licensed under the MIT License.\n\nimport copy\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom typing import Any, Callable, Dict, List, Optional, Union\nfrom sklearn.utils import check_consistent_length\nimport warnings\nfrom functools import wraps\n\nfrom fairlearn.metrics._input_manipulations import _convert_to_ndarray_and_squeeze\nfrom ._function_container import FunctionContainer, _SAMPLE_PARAMS_NOT_DICT\nfrom ._group_feature import GroupFeature\n\n\nlogger = logging.getLogger(__name__)\n\n_SUBGROUP_COUNT_WARNING_THRESHOLD = 20\n\n_SF_DICT_CONVERSION_FAILURE = \"DataFrame.from_dict() failed on sensitive features. \" \\\n \"Please ensure each array is strictly 1-D.\"\n_BAD_FEATURE_LENGTH = \"Received a feature of length {0} when length {1} was expected\"\n_SUBGROUP_COUNT_WARNING = \"Found {0} subgroups. Evaluation may be slow\"\n_FEATURE_LIST_NONSCALAR = \"Feature lists must be of scalar types\"\n_FEATURE_DF_COLUMN_BAD_NAME = \"DataFrame column names must be strings. Name '{0}' is of type {1}\"\n_DUPLICATE_FEATURE_NAME = \"Detected duplicate feature name: '{0}'\"\n_TOO_MANY_FEATURE_DIMS = \"Feature array has too many dimensions\"\n_SAMPLE_PARAM_KEYS_NOT_IN_FUNC_DICT = \\\n \"Keys in 'sample_params' do not match those in 'metric'\"\n\n\ndef _deprecate_metric_frame_init(new_metric_frame_init):\n \"\"\"Issue deprecation warnings for the `MetricFrame` constructor.\n\n Decorator to issue warnings if called with positional arguments\n or with the keyword argument `metric` instead of `metrics`.\n\n Parameters\n ----------\n new_metric_frame_init : callable\n New MetricFrame constructor.\n \"\"\"\n\n @wraps(new_metric_frame_init)\n def compatible_metric_frame_init(self, *args, metric=None, **kwargs):\n positional_names = [\"metrics\", \"y_true\", \"y_pred\"]\n version = \"0.10.0\"\n\n positional_dict = dict(zip(positional_names, args))\n\n # If more than 3 positional arguments are provided (apart from self), show\n # the error message applicable to the new constructor implementation (with `self`\n # being the only positional argument).\n if len(args) > 3:\n raise TypeError(f\"{new_metric_frame_init.__name__}() takes 1 positional \"\n f\"argument but {1+len(args)} positional arguments \"\n f\"were given\")\n\n # If 1-3 positional arguments are provided (apart fom self), issue warning.\n if len(args) > 0:\n args_msg = \", \".join([f\"'{name}'\" for name in positional_dict.keys()])\n warnings.warn(f\"You have provided {args_msg} as positional arguments. \"\n f\"Please pass them as keyword arguments. From version \"\n f\"{version} passing them as positional arguments \"\n f\"will result in an error.\",\n FutureWarning)\n\n # If a keyword argument `metric` is provided, issue warning.\n metric_arg_dict = {}\n if metric is not None:\n metric_arg_dict = {\"metrics\": metric}\n warnings.warn(f\"The positional argument 'metric' has been replaced \"\n f\"by a keyword argument 'metrics'. \"\n f\"From version {version} passing it as a positional argument \"\n f\"or as a keyword argument 'metric' will result in an error\",\n FutureWarning)\n\n # Call the new constructor with positional arguments passed as keyword arguments\n # and with the `metric` keyword argument renamed to `metrics`.\n new_metric_frame_init(self,\n **metric_arg_dict,\n **positional_dict,\n **kwargs)\n\n return compatible_metric_frame_init\n\n\nclass MetricFrame:\n \"\"\"Collection of disaggregated metric values.\n\n This data structure stores and manipulates disaggregated values for any number of underlying\n metrics. At least one sensitive feature must be supplied, which is used\n to split the data into subgroups. The underlying metric(s) is(are) calculated\n across the entire dataset (made available by the :attr:`.overall` property) and\n for each identified subgroup (made available by the :attr:`.by_group` property).\n\n The only limitations placed on the metric functions are that:\n\n * The first two arguments they take must be ``y_true`` and ``y_pred`` arrays\n * Any other arguments must correspond to sample properties (such as sample weights),\n meaning that their first dimension is the same as that of y_true and y_pred. These\n arguments will be split up along with the ``y_true`` and ``y_pred`` arrays\n\n The interpretation of the ``y_true`` and ``y_pred`` arrays is up to the\n underlying metric - it is perfectly possible to pass in lists of class\n probability tuples. We also support non-scalar return types for the\n metric function (such as confusion matrices) at the current time. However,\n the aggregation functions will not be well defined in this case.\n\n Group fairness metrics are obtained by methods that implement\n various aggregators over group-level metrics, such such as the\n maximum, minimum, or the worst-case difference or ratio.\n\n This data structure also supports the concept of 'control features.' Like the sensitive\n features, control features identify subgroups within the data, but\n aggregations are not performed over the control features. Instead, the\n aggregations produce a result for each subgroup identified by the control\n feature(s). The name 'control features' refers to the statistical practice\n of 'controlling' for a variable.\n\n Parameters\n ----------\n metrics : callable or dict\n The underlying metric functions which are to be calculated. This\n can either be a single metric function or a dictionary of functions.\n These functions must be callable as\n ``fn(y_true, y_pred, **sample_params)``.\n If there are any other arguments required (such as ``beta`` for\n :func:`sklearn.metrics.fbeta_score`) then\n :func:`functools.partial` must be used.\n\n **Note** that the values returned by various members of the class change\n based on whether this argument is a callable or a dictionary of\n callables. This distinction remains *even if* the dictionary only\n contains a single entry.\n\n y_true : List, pandas.Series, numpy.ndarray, pandas.DataFrame\n The ground-truth labels (for classification) or target values (for regression).\n\n y_pred : List, pandas.Series, numpy.ndarray, pandas.DataFrame\n The predictions.\n\n sensitive_features : List, pandas.Series, dict of 1d arrays, numpy.ndarray, pandas.DataFrame\n The sensitive features which should be used to create the subgroups.\n At least one sensitive feature must be provided.\n All names (whether on pandas objects or dictionary keys) must be strings.\n We also forbid DataFrames with column names of ``None``.\n For cases where no names are provided we generate names ``sensitive_feature_[n]``.\n\n control_features : List, pandas.Series, dict of 1d arrays, numpy.ndarray, pandas.DataFrame\n Control features are similar to sensitive features, in that they\n divide the input data into subgroups.\n Unlike the sensitive features, aggregations are not performed\n across the control features - for example, the ``overall`` property\n will have one value for each subgroup in the control feature(s),\n rather than a single value for the entire data set.\n Control features can be specified similarly to the sensitive features.\n However, their default names (if none can be identified in the\n input values) are of the format ``control_feature_[n]``.\n\n **Note** the types returned by members of the class vary based on whether\n control features are present.\n\n sample_params : dict\n Parameters for the metric function(s). If there is only one metric function,\n then this is a dictionary of strings and array-like objects, which are split\n alongside the ``y_true`` and ``y_pred`` arrays, and passed to the metric function.\n If there are multiple metric functions (passed as a dictionary), then this is\n a nested dictionary, with the first set of string keys identifying the\n metric function name, with the values being the string-to-array-like dictionaries.\n\n metric : callable or dict\n The underlying metric functions which are to be calculated. This\n can either be a single metric function or a dictionary of functions.\n These functions must be callable as\n ``fn(y_true, y_pred, **sample_params)``.\n If there are any other arguments required (such as ``beta`` for\n :func:`sklearn.metrics.fbeta_score`) then\n :func:`functools.partial` must be used.\n\n .. deprecated:: 0.7.0\n `metric` will be removed in version 0.10.0, use `metrics` instead.\n \"\"\"\n\n # The deprecation decorator does two things:\n # (1) turns first three positional arguments into keyword arguments\n # (2) renames the 'metric' keyword argument into 'metrics'\n @_deprecate_metric_frame_init\n def __init__(self,\n *,\n metrics: Union[Callable, Dict[str, Callable]],\n y_true,\n y_pred,\n sensitive_features,\n control_features: Optional = None,\n sample_params: Optional[Union[Dict[str, Any], Dict[str, Dict[str, Any]]]] = None):\n \"\"\"Read a placeholder comment.\"\"\"\n check_consistent_length(y_true, y_pred)\n y_t = _convert_to_ndarray_and_squeeze(y_true)\n y_p = _convert_to_ndarray_and_squeeze(y_pred)\n\n func_dict = self._process_functions(metrics, sample_params)\n\n # Now, prepare the sensitive features\n sf_list = self._process_features(\"sensitive_feature_\", sensitive_features, y_t)\n self._sf_names = [x.name for x in sf_list]\n\n # Prepare the control features\n # Adjust _sf_indices if needed\n cf_list = None\n self._cf_names = None\n if control_features is not None:\n cf_list = self._process_features(\"control_feature_\", control_features, y_t)\n self._cf_names = [x.name for x in cf_list]\n\n # Check for duplicate feature names\n nameset = set()\n namelist = self._sf_names\n if self._cf_names:\n namelist = namelist + self._cf_names\n for name in namelist:\n if name in nameset:\n raise ValueError(_DUPLICATE_FEATURE_NAME.format(name))\n nameset.add(name)\n\n self._overall = self._compute_overall(func_dict, y_t, y_p, cf_list)\n self._by_group = self._compute_by_group(func_dict, y_t, y_p, sf_list, cf_list)\n\n def _compute_overall(self, func_dict, y_true, y_pred, cf_list):\n if cf_list is None:\n result = pd.Series(index=func_dict.keys(), dtype='object')\n for func_name in func_dict:\n metric_value = func_dict[func_name].evaluate_all(y_true, y_pred)\n result[func_name] = metric_value\n else:\n result = self._compute_dataframe_from_rows(func_dict, y_true, y_pred, cf_list)\n return result\n\n def _compute_by_group(self, func_dict, y_true, y_pred, sf_list, cf_list):\n rows = copy.deepcopy(sf_list)\n if cf_list is not None:\n # Prepend the conditional features, so they are 'higher'\n rows = copy.deepcopy(cf_list) + rows\n\n return self._compute_dataframe_from_rows(func_dict, y_true, y_pred, rows)\n\n def _compute_dataframe_from_rows(self, func_dict, y_true, y_pred, rows):\n if len(rows) == 1:\n row_index = pd.Index(data=rows[0].classes, name=rows[0].name)\n else:\n row_index = pd.MultiIndex.from_product([x.classes for x in rows],\n names=[x.name for x in rows])\n\n if len(row_index) > _SUBGROUP_COUNT_WARNING_THRESHOLD:\n msg = _SUBGROUP_COUNT_WARNING.format(len(row_index))\n logger.warning(msg)\n\n result = pd.DataFrame(index=row_index, columns=func_dict.keys())\n for func_name in func_dict:\n for row_curr in row_index:\n mask = None\n if len(rows) > 1:\n mask = self._mask_from_tuple(row_curr, rows)\n else:\n # Have to force row_curr to be an unary tuple\n mask = self._mask_from_tuple((row_curr,), rows)\n\n # Only call the metric function if the mask is non-empty\n if sum(mask) > 0:\n curr_metric = func_dict[func_name].evaluate(y_true, y_pred, mask)\n result[func_name][row_curr] = curr_metric\n return result\n\n @property\n def overall(self) -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the underlying metrics evaluated on the whole dataset.\n\n Returns\n -------\n typing.Any or pandas.Series or pandas.DataFrame\n The exact type varies based on whether control featuers were\n provided and how the metric functions were specified.\n\n ======== ================ =================================\n Metrics Control Features Result Type\n ======== ================ =================================\n Callable None Return type of callable\n -------- ---------------- ---------------------------------\n Callable Provided Series, indexed by the subgroups\n of the conditional feature(s)\n -------- ---------------- ---------------------------------\n Dict None Series, indexed by the metric\n names\n -------- ---------------- ---------------------------------\n Dict Provided DataFrame. Columns are\n metric names, rows are subgroups\n of conditional feature(s)\n ======== ================ =================================\n\n The distinction applies even if the dictionary contains a\n single metric function. This is to allow for a consistent\n interface when calling programatically, while also reducing\n typing for those using Fairlearn interactively.\n \"\"\"\n if self._user_supplied_callable:\n if self.control_levels:\n return self._overall.iloc[:, 0]\n else:\n return self._overall.iloc[0]\n else:\n return self._overall\n\n @property\n def by_group(self) -> Union[pd.Series, pd.DataFrame]:\n \"\"\"Return the collection of metrics evaluated for each subgroup.\n\n The collection is defined by the combination of classes in the\n sensitive and control features. The exact type depends on\n the specification of the metric function.\n\n Returns\n -------\n pandas.Series or pandas.DataFrame\n When a callable is supplied to the constructor, the result is\n a :class:`pandas.Series`, indexed by the combinations of subgroups\n in the sensitive and control features.\n\n When the metric functions were specified with a dictionary (even\n if the dictionary only has a single entry), then the result is\n a :class:`pandas.DataFrame` with columns named after the metric\n functions, and rows indexed by the combinations of subgroups\n in the sensitive and control features.\n\n If a particular combination of subgroups was not present in the dataset\n (likely to occur as more sensitive and control features\n are specified), then the corresponding entry will be NaN.\n \"\"\"\n if self._user_supplied_callable:\n return self._by_group.iloc[:, 0]\n else:\n return self._by_group\n\n @property\n def control_levels(self) -> List[str]:\n \"\"\"Return a list of feature names which are produced by control features.\n\n If control features are present, then the rows of the :attr:`.by_group`\n property have a :class:`pandas.MultiIndex` index. This property\n identifies which elements of that index are control features.\n\n Returns\n -------\n List[str] or None\n List of names, which can be used in calls to\n :meth:`pandas.DataFrame.groupby` etc.\n \"\"\"\n return self._cf_names\n\n @property\n def sensitive_levels(self) -> List[str]:\n \"\"\"Return a list of the feature names which are produced by sensitive features.\n\n In cases where the :attr:`.by_group` property has a :class:`pandas.MultiIndex`\n index, this identifies which elements of the index are sensitive features.\n\n Returns\n -------\n List[str]\n List of names, which can be used in calls to\n :meth:`pandas.DataFrame.groupby` etc.\n \"\"\"\n return self._sf_names\n\n def group_max(self) -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the maximum value of the metric over the sensitive features.\n\n This method computes the maximum value over all combinations of\n sensitive features for each underlying metric function in the :attr:`.by_group`\n property (it will only succeed if all the underlying metric\n functions return scalar values). The exact return type depends on\n whether control features are present, and whether the metric functions\n were specified as a single callable or a dictionary.\n\n Returns\n -------\n typing.Any or pandas.Series or pandas.DataFrame\n The maximum value over sensitive features. The exact type\n follows the table in :attr:`.MetricFrame.overall`.\n \"\"\"\n if not self.control_levels:\n result = pd.Series(index=self._by_group.columns, dtype='object')\n for m in result.index:\n max_val = self._by_group[m].max()\n result[m] = max_val\n else:\n result = self._by_group.groupby(level=self.control_levels).max()\n\n if self._user_supplied_callable:\n if self.control_levels:\n return result.iloc[:, 0]\n else:\n return result.iloc[0]\n else:\n return result\n\n def group_min(self) -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the minimum value of the metric over the sensitive features.\n\n This method computes the minimum value over all combinations of\n sensitive features for each underlying metric function in the :attr:`.by_group`\n property (it will only succeed if all the underlying metric\n functions return scalar values). The exact return type depends on\n whether control features are present, and whether the metric functions\n were specified as a single callable or a dictionary.\n\n Returns\n -------\n typing.Any pandas.Series or pandas.DataFrame\n The minimum value over sensitive features. The exact type\n follows the table in :attr:`.MetricFrame.overall`.\n \"\"\"\n if not self.control_levels:\n result = pd.Series(index=self._by_group.columns, dtype='object')\n for m in result.index:\n min_val = self._by_group[m].min()\n result[m] = min_val\n else:\n result = self._by_group.groupby(level=self.control_levels).min()\n\n if self._user_supplied_callable:\n if self.control_levels:\n return result.iloc[:, 0]\n else:\n return result.iloc[0]\n else:\n return result\n\n def difference(self,\n method: str = 'between_groups') -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the maximum absolute difference between groups for each metric.\n\n This method calculates a scalar value for each underlying metric by\n finding the maximum absolute difference between the entries in each\n combination of sensitive features in the :attr:`.by_group` property.\n\n Similar to other methods, the result type varies with the\n specification of the metric functions, and whether control features\n are present or not.\n\n There are two allowed values for the ``method=`` parameter. The\n value ``between_groups`` computes the maximum difference between\n any two pairs of groups in the :attr:`.by_group` property (i.e.\n ``group_max() - group_min()``). Alternatively, ``to_overall``\n computes the difference between each subgroup and the\n corresponding value from :attr:`.overall` (if there are control\n features, then :attr:`.overall` is multivalued for each metric).\n The result is the absolute maximum of these values.\n\n Parameters\n ----------\n method : str\n How to compute the aggregate. Default is :code:`between_groups`\n\n Returns\n -------\n typing.Any or pandas.Series or pandas.DataFrame\n The exact type follows the table in :attr:`.MetricFrame.overall`.\n \"\"\"\n subtrahend = np.nan\n if method == 'between_groups':\n subtrahend = self.group_min()\n elif method == 'to_overall':\n subtrahend = self.overall\n else:\n raise ValueError(\"Unrecognised method '{0}' in difference() call\".format(method))\n\n return (self.by_group - subtrahend).abs().max(level=self.control_levels)\n\n def ratio(self,\n method: str = 'between_groups') -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the minimum ratio between groups for each metric.\n\n This method calculates a scalar value for each underlying metric by\n finding the minimum ratio (that is, the ratio is forced to be\n less than unity) between the entries in each\n column of the :attr:`.by_group` property.\n\n Similar to other methods, the result type varies with the\n specification of the metric functions, and whether control features\n are present or not.\n\n There are two allowed values for the ``method=`` parameter. The\n value ``between_groups`` computes the minimum ratio between\n any two pairs of groups in the :attr:`.by_group` property (i.e.\n ``group_min() / group_max()``). Alternatively, ``to_overall``\n computes the ratio between each subgroup and the\n corresponding value from :attr:`.overall` (if there are control\n features, then :attr:`.overall` is multivalued for each metric),\n expressing the ratio as a number less than 1.\n The result is the minimum of these values.\n\n Parameters\n ----------\n method : str\n How to compute the aggregate. Default is :code:`between_groups`\n\n Returns\n -------\n typing.Any or pandas.Series or pandas.DataFrame\n The exact type follows the table in :attr:`.MetricFrame.overall`.\n \"\"\"\n result = None\n if method == 'between_groups':\n result = self.group_min() / self.group_max()\n elif method == 'to_overall':\n if self._user_supplied_callable:\n tmp = self.by_group / self.overall\n result = tmp.transform(lambda x: min(x, 1/x)).min(level=self.control_levels)\n else:\n ratios = None\n\n if self.control_levels:\n # It's easiest to give in to the DataFrame columns preference\n ratios = self.by_group.unstack(level=self.control_levels) / \\\n self.overall.unstack(level=self.control_levels)\n else:\n ratios = self.by_group / self.overall\n\n def ratio_sub_one(x):\n if x > 1:\n return 1/x\n else:\n return x\n\n ratios = ratios.apply(lambda x: x.transform(ratio_sub_one))\n if not self.control_levels:\n result = ratios.min()\n else:\n result = ratios.min().unstack(0)\n else:\n raise ValueError(\"Unrecognised method '{0}' in ratio() call\".format(method))\n\n return result\n\n def _process_functions(self, metric, sample_params) -> Dict[str, FunctionContainer]:\n \"\"\"Get the underlying metrics into :class:`fairlearn.metrics.FunctionContainer` objects.\"\"\"\n self._user_supplied_callable = True\n func_dict = dict()\n if isinstance(metric, dict):\n self._user_supplied_callable = False\n s_p = dict()\n if sample_params is not None:\n if not isinstance(sample_params, dict):\n raise ValueError(_SAMPLE_PARAMS_NOT_DICT)\n\n sp_keys = set(sample_params.keys())\n mf_keys = set(metric.keys())\n if not sp_keys.issubset(mf_keys):\n raise ValueError(_SAMPLE_PARAM_KEYS_NOT_IN_FUNC_DICT)\n s_p = sample_params\n\n for name, func in metric.items():\n curr_s_p = None\n if name in s_p:\n curr_s_p = s_p[name]\n fc = FunctionContainer(func, name, curr_s_p)\n func_dict[fc.name_] = fc\n else:\n fc = FunctionContainer(metric, None, sample_params)\n func_dict[fc.name_] = fc\n return func_dict\n\n def _process_features(self, base_name, features, sample_array) -> List[GroupFeature]:\n \"\"\"Extract the features into :class:`fairlearn.metrics.GroupFeature` objects.\"\"\"\n result = []\n\n if isinstance(features, pd.Series):\n check_consistent_length(features, sample_array)\n result.append(GroupFeature(base_name, features, 0, None))\n elif isinstance(features, pd.DataFrame):\n for i in range(len(features.columns)):\n col_name = features.columns[i]\n if not isinstance(col_name, str):\n msg = _FEATURE_DF_COLUMN_BAD_NAME.format(col_name, type(col_name))\n raise ValueError(msg)\n column = features.iloc[:, i]\n check_consistent_length(column, sample_array)\n result.append(GroupFeature(base_name, column, i, None))\n elif isinstance(features, list):\n if np.isscalar(features[0]):\n f_arr = np.atleast_1d(np.squeeze(np.asarray(features)))\n assert len(f_arr.shape) == 1 # Sanity check\n check_consistent_length(f_arr, sample_array)\n result.append(GroupFeature(base_name, f_arr, 0, None))\n else:\n raise ValueError(_FEATURE_LIST_NONSCALAR)\n elif isinstance(features, dict):\n try:\n df = pd.DataFrame.from_dict(features)\n except ValueError as ve:\n raise ValueError(_SF_DICT_CONVERSION_FAILURE) from ve\n for i in range(len(df.columns)):\n col_name = df.columns[i]\n if not isinstance(col_name, str):\n msg = _FEATURE_DF_COLUMN_BAD_NAME.format(col_name, type(col_name))\n raise ValueError(msg)\n column = df.iloc[:, i]\n check_consistent_length(column, sample_array)\n result.append(GroupFeature(base_name, column, i, None))\n else:\n # Need to specify dtype to avoid inadvertent type conversions\n f_arr = np.squeeze(np.asarray(features, dtype=object))\n if len(f_arr.shape) == 1:\n check_consistent_length(f_arr, sample_array)\n result.append(GroupFeature(base_name, f_arr, 0, None))\n elif len(f_arr.shape) == 2:\n # Work similarly to pd.DataFrame(data=ndarray)\n for i in range(f_arr.shape[1]):\n col = f_arr[:, i]\n check_consistent_length(col, sample_array)\n result.append(GroupFeature(base_name, col, i, None))\n else:\n raise ValueError(_TOO_MANY_FEATURE_DIMS)\n\n return result\n\n def _mask_from_tuple(self, index_tuple, feature_list) -> np.ndarray:\n \"\"\"Generate a mask for the ``y_true``, ``y_pred`` and ``sample_params`` arrays.\n\n Given a tuple of feature values (which indexes the ``by_groups``\n DataFrame), generate a mask to select the corresponding samples\n from the input\n \"\"\"\n # Following are internal sanity checks\n assert isinstance(index_tuple, tuple)\n assert len(index_tuple) == len(feature_list)\n\n result = feature_list[0].get_mask_for_class(index_tuple[0])\n for i in range(1, len(index_tuple)):\n result = np.logical_and(\n result,\n feature_list[i].get_mask_for_class(index_tuple[i]))\n return result\n" ]
[ [ "pandas.Series", "sklearn.utils.check_consistent_length", "numpy.asarray", "pandas.Index", "pandas.MultiIndex.from_product", "numpy.isscalar", "pandas.DataFrame.from_dict" ] ]
aditya-vikram-parakala/MachineLearning_CSE574
[ "7816ebd6cc342d0c4405d45e771dd50e800c2463", "7816ebd6cc342d0c4405d45e771dd50e800c2463" ]
[ "logreg_hd_concat.py", "logistic_regression_softmax.py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport csv \nimport random\nimport math\nimport pandas as pd\n\n\n# In[2]:\n\n\nTrainingPercent = 80 # 80% of raw data \nValidationPercent = 10 # 10% of raw data\nTestPercent = 10 #10% of raw data \nIsSynthetic =False\ndef GenerateRawData(filePath, IsSynthetic): \n dataMatrix = [] \n with open(filePath, 'rU') as fi:\n reader = csv.reader(fi)\n for row in reader:\n dataRow = []\n for column in row:\n dataRow.append(float(column))\n dataMatrix.append(dataRow) \n \n #changedif IsSynthetic == False : #this is for deleting the columns in our data that contains 0's which would not contribute to calculation of the varience and is not invertable.\n changeddataMatrix = np.delete(dataMatrix, [0,10], axis=1)# we deletd 5 cols so left with 41 features out of 46 features.\n dataMatrix = np.transpose(changeddataMatrix) #we transpose the data matrix to simplify the further steps of matrix multiplication \n #print (\"Data Matrix Generated..\")\n return dataMatrix # each data row we have 1x41\n#print(Data_values.shape)\ndef GenerateTrainingDataMatrix(rawData, TrainingPercent = 80):\n T_len = int(math.ceil(len(rawData[0])*0.01*TrainingPercent))\n d2 = rawData[:,0:T_len] # generating the training data matrix\n #print(str(TrainingPercent) + \"% Training Data Generated..\")\n return d2\n\ndef GenerateValData(rawData, ValPercent, TrainingCount): #\n valSize = int(math.ceil(len(rawData[0])*ValPercent*0.01))\n V_End = TrainingCount + valSize\n dataMatrix = rawData[:,TrainingCount+1:V_End]\n #print (str(ValPercent) + \"% Val Data Generated..\") \n return dataMatrix\n\ndef GenerateValTargetVector(rawData, ValPercent, TrainingCount): \n valSize = int(math.ceil(len(rawData)*ValPercent*0.01))\n V_End = TrainingCount + valSize\n t =rawData[TrainingCount+1:V_End]\n #print (str(ValPercent) + \"% Val Target Data Generated..\")\n return t\n#Data_1= GenerateRawData(r'C:\\Users\\aditya vikram\\humandata_X_hd_concat.csv',IsSynthetic=False)\n#X = GenerateTrainingDataMatrix(Data_1, TrainingPercent )\n\ndef GetTargetVector(filePath):\n t = []\n with open(filePath, 'rU') as f:\n reader = csv.reader(f)\n for row in reader: \n t.append(int(row[0]))\n #print(\"Raw Training Generated..\")\n return t # we will get the values \n#target_values =GetTargetVector(r'C:\\Users\\aditya vikram\\humandata_t_hd_concat.csv')\n#y = GenerateValTargetVector(target_values, ValPercent, TrainingCount)\n\ndef GenerateTrainingTarget(rawTraining,TrainingPercent = 80): #given to use 80% of the dataset as training\n TrainingLen = int(math.ceil(len(rawTraining)*(TrainingPercent*0.01))) #calculate the length of target training set\n t = rawTraining[:TrainingLen] # loading the elements till the training length it has only one column\n #print(str(TrainingPercent) + \"% Training Target Generated..\")\n return t \n\n\ndef GenerateValData(rawData, ValPercent, TrainingCount): #\n valSize = int(math.ceil(len(rawData[0])*ValPercent*0.01))\n V_End = TrainingCount + valSize\n dataMatrix = rawData[:,TrainingCount+1:V_End]\n #print (str(ValPercent) + \"% Val Data Generated..\") \n return dataMatrix\n\ndef GenerateValTargetVector(rawData, ValPercent, TrainingCount): \n valSize = int(math.ceil(len(rawData)*ValPercent*0.01))\n V_End = TrainingCount + valSize\n t =rawData[TrainingCount+1:V_End]\n #print (str(ValPercent) + \"% Val Target Data Generated..\")\n return t\n\n\n# In[3]:\n\n\n#TrainingTarget = np.array(GenerateTrainingTarget(RawTarget,TrainingPercent))\n#TrainingData = GenerateTrainingDataMatrix(RawData,TrainingPercent)\n\n\n# In[4]:\n\n\nRawTarget = GetTargetVector(r'C:\\Users\\aditya vikram\\humandata_t_hd_concat.csv')\nRawData = GenerateRawData(r'C:\\Users\\aditya vikram\\humandata_X_hd_concat.csv',IsSynthetic)\n#RawData = RawData.loc[:, (~RawData.isin([0])).any(axis=0)]\n#RawData[~np.all(r == 0, axis=1)]\n# preparing the data of taining i.e. training data , training target accordingly\nTrainingTarget = np.array(GenerateTrainingTarget(RawTarget,TrainingPercent))\nTrainingData = GenerateTrainingDataMatrix(RawData,TrainingPercent)\nprint(TrainingTarget.shape)\nprint(TrainingData.shape)\n# preparing the validation data \nValDataAct = np.array(GenerateValTargetVector(RawTarget,ValidationPercent, (len(TrainingTarget))))\nValData = GenerateValData(RawData,ValidationPercent, (len(TrainingTarget)))\nprint(ValDataAct.shape)\nprint(ValData.shape)\n#Preparing the test data \nTestDataAct = np.array(GenerateValTargetVector(RawTarget,TestPercent, (len(TrainingTarget)+len(ValDataAct))))\nTestData = GenerateValData(RawData,TestPercent, (len(TrainingTarget)+len(ValDataAct)))\nprint(ValDataAct.shape)\nprint(ValData.shape)\nX=np.transpose(TrainingData)\nX_val=np.transpose(ValData)\nX_test=np.transpose(TestData)\ny=TrainingTarget\ny_val=ValDataAct\ny_test =TestDataAct\nprint(y.shape)\nprint(y_val.shape)\nprint(y_test.shape)\n\n\n# In[ ]:\n\n\n#source intro to data science website, referenced a part of the code \n\n\n# In[5]:\n\n\nclass LogisticRegression:\n def __init__(self, lr=0.01, num_iter=100000, fit_intercept=True, verbose=False):\n self.lr = lr\n self.num_iter = num_iter\n self.fit_intercept = fit_intercept\n \n def __add_intercept(self, X):\n intercept = np.ones((X.shape[0], 1))\n return np.concatenate((intercept, X), axis=1)\n \n def __sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n def __loss(self, h, y):\n return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()\n \n def fit(self, X, y):\n if self.fit_intercept:\n X = self.__add_intercept(X)\n \n # weights initialization\n self.theta = np.zeros(X.shape[1])\n \n for i in range(self.num_iter):\n z = np.dot(X, self.theta)\n h = self.__sigmoid(z)\n gradient = np.dot(X.T, (h - y)) / y.size\n self.theta -= self.lr * gradient\n \n #if(self.verbose == True and i % 10000 == 0):\n #z = np.dot(X, self.theta)\n #h = self.__sigmoid(z)\n #print(f'loss: {self.__loss(h, y)} \\t')\n \n def predict_prob(self, X):\n if self.fit_intercept:\n X = self.__add_intercept(X)\n \n return self.__sigmoid(np.dot(X, self.theta))\n \n def predict(self, X, threshold):\n return self.predict_prob(X) >= threshold\n\n\n# In[35]:\n\n\nmodel = LogisticRegression(lr=0.1, num_iter=3000)\nget_ipython().run_line_magic('time', 'model.fit(X, y)')\npreds = model.predict(X, 0.5)\n# accuracy\nprint(\"Accuracy HD TRAIN:\",(preds == y).mean())\n\n\n# In[33]:\n\n\nmodel = LogisticRegression(lr=0.1, num_iter=3000)\nget_ipython().run_line_magic('time', 'model.fit(X_val, y_val)')\npreds = model.predict(X_val, 0.5)\n# accuracy\nprint(\"Accuracy HD VAL:\",(preds == y_val).mean())\n\n\n\n# In[34]:\n\n\nmodel = LogisticRegression(lr=0.1, num_iter=3000)\nget_ipython().run_line_magic('time', 'model.fit(X_test, y_test)')\npreds = model.predict(X_test, 0.5)\n# accuracy\nprint(\"Accuracy HD TEST:\",(preds == y_test).mean())\n\n", "import pickle\nimport gzip\nimport numpy as np\nimport random\nfrom matplotlib import pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport pandas as pd\nfilename = r'C:\\Users\\aditya vikram\\Desktop\\ML_project3\\mnist.pkl.gz'\nf = gzip.open(filename, 'rb')\ntraining_data, validation_data, test_data = pickle.load(f, encoding='latin1')\nf.close()\n#MNIST DATA PREPROCESSING\ntr_values = np.asarray(training_data[0])\ntr_target = np.asarray(training_data[1])\nval_values = np.asarray(validation_data[0])\nval_target = np.asarray(validation_data[1])\ntest_values =np.asarray(test_data[0])\ntest_target = np.asarray(test_data[1])\nX=tr_values\ny=tr_target\n# adding the bias term 1 to both the training data and the testing data\nX = np.insert(X, 0, 1, axis=1)\ntest_values = np.insert(test_values, 0, 1, axis=1)\n#USPS DATA PREPROCESSING\nfrom PIL import Image\nimport os\nimport numpy as np\nUSPSMat = []\nUSPSTar = []\ncurPath = r'C:\\Users\\aditya vikram\\Desktop\\ML_project3\\USPSdata\\USPSdata\\Numerals'\nsavedImg = []\nfor j in range(0,10):\n curFolderPath = curPath + '/' + str(j)\n imgs = os.listdir(curFolderPath)\n for img in imgs:\n curImg = curFolderPath + '/' + img\n if curImg[-3:] == 'png':\n img = Image.open(curImg,'r')\n img = img.resize((28, 28))\n savedImg = img\n imgdata = (255-np.array(img.getdata()))/255\n USPSMat.append(imgdata)\n USPSTar.append(j) \n# conversting list to numpy array for easy calculation \narr_mat = np.asarray(USPSMat)\nUSPS_X = np.reshape(arr_mat,(19999,784))\nUSPS_y = np.array(USPSTar)\nUSPS_X = np.insert(USPS_X, 0, 1, axis=1) # adding the bias term for testing data\n#LOGISTIC REGRESSION \n#softmax function is the activation function we use for multi class classification problem\ndef smax(act):\n exp = np.exp(act)\n prob_val = np.zeros((act.shape))\n for i in range(act.shape[0]):\n #for j in range(act.shape[1]):\n prob_val[i,:]=exp[i,:]/np.sum(exp[i,:])\n return prob_val\n# hot vector representation \ndef one_of_k(y):\n result = np.zeros((y.shape[0],10))\n for i in range(result.shape[0]):\n for j in range(result.shape[1]):\n if(j==(y[i])):\n result[i][j] = 1\n return result\n# calculation of error after each iteration\ndef cal_error(pred_value,t_mat,X):\n t_mat = t_mat.reshape((50000,10))\n temp = np.matmul(X.T,pred_value-t_mat)\n return temp \ntemp1 = []\n# loss clac to know the convergence\ndef loss_calc(pred_value,t_mat):\n log_y = np.log(pred_value)\n loss_val = -(np.sum((t_mat*log_y)))/pred_value.shape[0]\n temp1.append(loss_val)\n return temp1 \ndef logistic_regression():\n num_iter = 500\n k=0\n lparam = 0.9\n LAMBDA= 0.001\n N = X.shape[0] # total number of samples\n wt = np.random.rand(785,10) # initialize random weights\n t_mat = one_of_k(y)\n lr = lparam/N # learning rate\n while(k<num_iter):\n act = np.matmul(X,wt)\n pred_value = smax(act)\n loss_val = loss_calc(pred_value,t_mat) \n gradient = cal_error(pred_value,t_mat,X)\n reg_wt = LAMBDA * wt\n reg_wt[0,:] = 0\n wt =wt - lr *(gradient + reg_wt)\n k+=1\n# plt.plot(loss_val)\n# plt.xlabel('No of Iterations')\n# plt.ylabel('Loss')\n# plt.show()\n return wt # return the optimal weights after calculation\ndef accuracy_cal(X,y):\n wt_new = logistic_regression()\n final_val = smax(np.matmul(X,wt_new))\n pred_out = np.argmax(final_val,axis=1)\n #predicted_value = pd.DataFrame(pred_out)\n #predicted_value.to_csv(r\"C:\\Users\\aditya vikram\\Desktop\\logreg_predvalues_usps.csv\")\n from sklearn.metrics import confusion_matrix\n a = confusion_matrix(y,pred_out) # construct the confusion matrix\n print(\"confusion matrix\",a)\n cnt=0\n for i in range(pred_out.shape[0]):\n if(pred_out[i]==y[i]):\n cnt+=1\n return cnt/(X.shape[0]) # calculating the accuracy\n#MNIST dataset\nacc_mnist= accuracy_cal(test_values,test_target)\nprint(\"ACCURACY MNIST: \",acc_mnist)\n#USPS dataset\nacc_usps= accuracy_cal(USPS_X,USPS_y)\nprint(\"ACCURACY USPS: \",acc_usps)\n\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.ones", "numpy.concatenate", "numpy.delete", "numpy.transpose", "numpy.exp", "numpy.zeros" ], [ "numpy.log", "numpy.asarray", "numpy.reshape", "numpy.matmul", "sklearn.metrics.confusion_matrix", "numpy.argmax", "numpy.insert", "numpy.random.rand", "numpy.exp", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
ana-simionescu/ddsp
[ "9f37ff66e79cf912c3377ba1beddb220196aa1a3", "9f37ff66e79cf912c3377ba1beddb220196aa1a3" ]
[ "ddsp/synths_test.py", "ddsp/training/data.py" ]
[ "# Copyright 2020 The DDSP Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for ddsp.synths.\"\"\"\n\nfrom ddsp import core\nfrom ddsp import synths\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\nclass AdditiveTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n synthesizer = synths.Additive(\n n_samples=64000,\n sample_rate=16000,\n scale_fn=None,\n normalize_below_nyquist=True)\n batch_size = 3\n num_frames = 1000\n amp = tf.zeros((batch_size, num_frames, 1), dtype=tf.float32) + 1.0\n harmonic_distribution = tf.zeros(\n (batch_size, num_frames, 16), dtype=tf.float32) + 1.0 / 16\n f0_hz = tf.zeros((batch_size, num_frames, 1), dtype=tf.float32) + 16000\n\n output = synthesizer(amp, harmonic_distribution, f0_hz)\n\n self.assertAllEqual([batch_size, 64000], output.shape.as_list())\n\n\nclass FilteredNoiseTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n synthesizer = synths.FilteredNoise(n_samples=16000)\n filter_bank_magnitudes = tf.zeros((3, 16000, 100), dtype=tf.float32) + 3.0\n output = synthesizer(filter_bank_magnitudes)\n\n self.assertAllEqual([3, 16000], output.shape.as_list())\n\n\nclass WavetableTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n synthesizer = synths.Wavetable(\n n_samples=64000,\n sample_rate=16000,\n scale_fn=None)\n batch_size = 3\n num_frames = 1000\n n_wavetable = 1024\n amp = tf.zeros((batch_size, num_frames, 1), dtype=tf.float32) + 1.0\n wavetables = tf.zeros(\n (batch_size, num_frames, n_wavetable), dtype=tf.float32)\n f0_hz = tf.zeros((batch_size, num_frames, 1), dtype=tf.float32) + 440\n\n output = synthesizer(amp, wavetables, f0_hz)\n\n self.assertAllEqual([batch_size, 64000], output.shape.as_list())\n\n\nclass SinusoidalTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n synthesizer = synths.Sinusoidal(n_samples=32000, sample_rate=16000)\n batch_size = 3\n num_frames = 1000\n n_partials = 10\n amps = tf.zeros((batch_size, num_frames, n_partials),\n dtype=tf.float32)\n freqs = tf.zeros((batch_size, num_frames, n_partials),\n dtype=tf.float32)\n\n output = synthesizer(amps, freqs)\n\n self.assertAllEqual([batch_size, 32000], output.shape.as_list())\n\n def test_frequencies_controls_are_bounded(self):\n depth = 10\n def freq_scale_fn(x):\n return core.frequencies_sigmoid(x, depth=depth, hz_min=0.0, hz_max=8000.0)\n\n synthesizer = synths.Sinusoidal(\n n_samples=32000, sample_rate=16000, freq_scale_fn=freq_scale_fn)\n batch_size = 3\n num_frames = 10\n n_partials = 100\n amps = tf.zeros((batch_size, num_frames, n_partials), dtype=tf.float32)\n freqs = tf.linspace(-100.0, 100.0, n_partials)\n freqs = tf.tile(freqs[tf.newaxis, tf.newaxis, :, tf.newaxis],\n [batch_size, num_frames, 1, depth])\n\n controls = synthesizer.get_controls(amps, freqs)\n freqs = controls['frequencies']\n lt_nyquist = (freqs <= 8000.0)\n gt_zero = (freqs >= 0.0)\n both_conditions = np.logical_and(lt_nyquist, gt_zero)\n\n self.assertTrue(np.all(both_conditions))\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2020 The DDSP Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Library of functions to help loading data.\"\"\"\n\nfrom absl import logging\nimport gin\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\n\n_AUTOTUNE = tf.data.experimental.AUTOTUNE\n\n\n# ---------- Base Class --------------------------------------------------------\nclass DataProvider(object):\n \"\"\"Base class for returning a dataset.\"\"\"\n\n def __init__(self, sample_rate, frame_rate):\n \"\"\"DataProvider constructor.\n\n Args:\n sample_rate: Sample rate of audio in the dataset.\n frame_rate: Frame rate of features in the dataset.\n \"\"\"\n self._sample_rate = sample_rate\n self._frame_rate = frame_rate\n\n @property\n def sample_rate(self):\n \"\"\"Return dataset sample rate, must be defined in the constructor.\"\"\"\n return self._sample_rate\n\n @property\n def frame_rate(self):\n \"\"\"Return dataset feature frame rate, must be defined in the constructor.\"\"\"\n return self._frame_rate\n\n def get_dataset(self, shuffle):\n \"\"\"A method that returns a tf.data.Dataset.\"\"\"\n raise NotImplementedError\n\n def get_batch(self, batch_size, shuffle=True, repeats=-1):\n \"\"\"Read dataset.\n\n Args:\n batch_size: Size of batch.\n shuffle: Whether to shuffle the examples.\n repeats: Number of times to repeat dataset. -1 for endless repeats.\n\n Returns:\n A batched tf.data.Dataset.\n \"\"\"\n dataset = self.get_dataset(shuffle)\n dataset = dataset.repeat(repeats)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n dataset = dataset.prefetch(buffer_size=_AUTOTUNE)\n return dataset\n\n\nclass TfdsProvider(DataProvider):\n \"\"\"Base class for reading datasets from TensorFlow Datasets (TFDS).\"\"\"\n\n def __init__(self, name, split, data_dir, sample_rate, frame_rate):\n \"\"\"TfdsProvider constructor.\n\n Args:\n name: TFDS dataset name (with optional config and version).\n split: Dataset split to use of the TFDS dataset.\n data_dir: The directory to read TFDS datasets from. Defaults to\n \"~/tensorflow_datasets\".\n sample_rate: Sample rate of audio in the dataset.\n frame_rate: Frame rate of features in the dataset.\n \"\"\"\n self._name = name\n self._split = split\n self._data_dir = data_dir\n super().__init__(sample_rate, frame_rate)\n\n def get_dataset(self, shuffle=True):\n \"\"\"Read dataset.\n\n Args:\n shuffle: Whether to shuffle the input files.\n\n Returns:\n dataset: A tf.data.Dataset that reads from TFDS.\n \"\"\"\n return tfds.load(\n self._name,\n data_dir=self._data_dir,\n split=self._split,\n shuffle_files=shuffle,\n download=False)\n\n\[email protected]\nclass NSynthTfds(TfdsProvider):\n \"\"\"Parses features in the TFDS NSynth dataset.\n\n If running on Cloud, it is recommended you set `data_dir` to\n 'gs://tfds-data/datasets' to avoid unnecessary downloads.\n \"\"\"\n\n def __init__(self,\n name='nsynth/gansynth_subset.f0_and_loudness:2.3.0',\n split='train',\n data_dir='gs://tfds-data/datasets',\n sample_rate=16000,\n frame_rate=250):\n \"\"\"TfdsProvider constructor.\n\n Args:\n name: TFDS dataset name (with optional config and version).\n split: Dataset split to use of the TFDS dataset.\n data_dir: The directory to read the prepared NSynth dataset from. Defaults\n to the public TFDS GCS bucket.\n sample_rate: Sample rate of audio in the dataset.\n frame_rate: Frame rate of features in the dataset.\n \"\"\"\n if data_dir == 'gs://tfds-data/datasets':\n logging.warning(\n 'Using public TFDS GCS bucket to load NSynth. If not running on '\n 'GCP, this will be very slow, and it is recommended you prepare '\n 'the dataset locally with TFDS and set the data_dir appropriately.')\n super().__init__(name, split, data_dir, sample_rate, frame_rate)\n\n def get_dataset(self, shuffle=True):\n \"\"\"Returns dataset with slight restructuring of feature dictionary.\"\"\"\n def preprocess_ex(ex):\n return {\n 'pitch':\n ex['pitch'],\n 'audio':\n ex['audio'],\n 'instrument_source':\n ex['instrument']['source'],\n 'instrument_family':\n ex['instrument']['family'],\n 'instrument':\n ex['instrument']['label'],\n 'f0_hz':\n ex['f0']['hz'],\n 'f0_confidence':\n ex['f0']['confidence'],\n 'loudness_db':\n ex['loudness']['db'],\n }\n dataset = super().get_dataset(shuffle)\n dataset = dataset.map(preprocess_ex, num_parallel_calls=_AUTOTUNE)\n return dataset\n\n\nclass RecordProvider(DataProvider):\n \"\"\"Class for reading records and returning a dataset.\"\"\"\n\n def __init__(self,\n file_pattern,\n example_secs,\n sample_rate,\n frame_rate,\n data_format_map_fn):\n \"\"\"RecordProvider constructor.\"\"\"\n self._file_pattern = file_pattern or self.default_file_pattern\n self._audio_length = example_secs * sample_rate\n self._feature_length = example_secs * frame_rate\n super().__init__(sample_rate, frame_rate)\n self._data_format_map_fn = data_format_map_fn\n\n @property\n def default_file_pattern(self):\n \"\"\"Used if file_pattern is not provided to constructor.\"\"\"\n raise NotImplementedError(\n 'You must pass a \"file_pattern\" argument to the constructor or '\n 'choose a FileDataProvider with a default_file_pattern.')\n\n def get_dataset(self, shuffle=True):\n \"\"\"Read dataset.\n\n Args:\n shuffle: Whether to shuffle the files.\n\n Returns:\n dataset: A tf.dataset that reads from the TFRecord.\n \"\"\"\n def parse_tfexample(record):\n return tf.io.parse_single_example(record, self.features_dict)\n\n filenames = tf.data.Dataset.list_files(self._file_pattern, shuffle=shuffle)\n dataset = filenames.interleave(\n map_func=self._data_format_map_fn,\n cycle_length=40,\n num_parallel_calls=_AUTOTUNE)\n dataset = dataset.map(parse_tfexample, num_parallel_calls=_AUTOTUNE)\n return dataset\n\n @property\n def features_dict(self):\n \"\"\"Dictionary of features to read from dataset.\"\"\"\n return {\n 'audio':\n tf.io.FixedLenFeature([self._audio_length], dtype=tf.float32),\n 'f0_hz':\n tf.io.FixedLenFeature([self._feature_length], dtype=tf.float32),\n 'f0_confidence':\n tf.io.FixedLenFeature([self._feature_length], dtype=tf.float32),\n 'loudness_db':\n tf.io.FixedLenFeature([self._feature_length], dtype=tf.float32),\n }\n\n\[email protected]\nclass TFRecordProvider(RecordProvider):\n \"\"\"Class for reading TFRecords and returning a dataset.\"\"\"\n\n def __init__(self,\n file_pattern=None,\n example_secs=4,\n sample_rate=16000,\n frame_rate=250):\n \"\"\"TFRecordProvider constructor.\"\"\"\n super().__init__(file_pattern, example_secs, sample_rate,\n frame_rate, tf.data.TFRecordDataset)\n\n\n# ------------------------------------------------------------------------------\n# Zipped DataProvider\n# ------------------------------------------------------------------------------\[email protected]\nclass ZippedProvider(DataProvider):\n \"\"\"Combines datasets from two providers with zip.\"\"\"\n\n def __init__(self, data_providers, batch_size_ratios=()):\n \"\"\"Constructor.\n\n Args:\n data_providers: A list of data_providers.\n batch_size_ratios: A list of ratios of batch sizes for each provider.\n These do not need to sum to 1. For example, [2, 1] will produce batches\n with a size ratio of 2 to 1.\n \"\"\"\n # Normalize the ratios.\n if batch_size_ratios:\n # Check lengths match.\n if len(batch_size_ratios) != len(data_providers):\n raise ValueError('List of batch size ratios ({}) must be of the same '\n 'length as the list of data providers ({}) for varying'\n 'batch sizes.'.format(\n len(batch_size_ratios), len(data_providers)))\n total = sum(batch_size_ratios)\n batch_size_ratios = [float(bsr) / total for bsr in batch_size_ratios]\n\n # Make sure all sample rates are the same.\n sample_rates = [dp.sample_rate for dp in data_providers]\n assert len(set(sample_rates)) <= 1\n sample_rate = sample_rates[0]\n\n # Make sure all frame rates are the same.\n frame_rates = [dp.frame_rate for dp in data_providers]\n assert len(set(frame_rates)) <= 1\n frame_rate = frame_rates[0]\n\n super().__init__(sample_rate, frame_rate)\n self._data_providers = data_providers\n self._batch_size_ratios = batch_size_ratios\n\n def get_dataset(self, shuffle=True):\n \"\"\"Read dataset.\n\n Args:\n shuffle: Whether to shuffle the input files.\n\n Returns:\n dataset: A zipped tf.data.Dataset from multiple providers.\n \"\"\"\n datasets = tuple(dp.get_dataset(shuffle) for dp in self._data_providers)\n return tf.data.Dataset.zip(datasets)\n\n def get_batch(self, batch_size, shuffle=True, repeats=-1):\n \"\"\"Read dataset.\n\n Args:\n batch_size: Size of batches, can be a list to have varying batch_sizes.\n shuffle: Whether to shuffle the examples.\n repeats: Number of times to repeat dataset. -1 for endless repeats.\n\n Returns:\n A batched tf.data.Dataset.\n \"\"\"\n if not self._batch_size_ratios:\n # One batch size for all datasets ('None' is batch shape).\n return super().get_batch(batch_size)\n\n else:\n # Varying batch sizes (Integer batch shape for each).\n batch_sizes = [int(batch_size * bsr) for bsr in self._batch_size_ratios]\n datasets = tuple(\n dp.get_dataset(shuffle).batch(bs, drop_remainder=True)\n for bs, dp in zip(batch_sizes, self._data_providers))\n dataset = tf.data.Dataset.zip(datasets)\n dataset = dataset.repeat(repeats)\n dataset = dataset.prefetch(buffer_size=_AUTOTUNE)\n return dataset\n\n\n# ------------------------------------------------------------------------------\n# Synthetic Data for TranscribingAutoencoder\n# ------------------------------------------------------------------------------\[email protected]\nclass SyntheticNotes(TFRecordProvider):\n \"\"\"Create self-supervised control signal.\n\n EXPERIMENTAL\n\n Pass file_pattern to tfrecords created by `ddsp_generate_synthetic_data.py`.\n \"\"\"\n\n def __init__(self,\n n_timesteps,\n n_harmonics,\n n_mags,\n file_pattern=None,\n sample_rate=16000):\n self.n_timesteps = n_timesteps\n self.n_harmonics = n_harmonics\n self.n_mags = n_mags\n super().__init__(file_pattern=file_pattern, sample_rate=sample_rate)\n\n @property\n def features_dict(self):\n \"\"\"Dictionary of features to read from dataset.\"\"\"\n return {\n 'f0_hz':\n tf.io.FixedLenFeature([self.n_timesteps, 1], dtype=tf.float32),\n 'harm_amp':\n tf.io.FixedLenFeature([self.n_timesteps, 1], dtype=tf.float32),\n 'harm_dist':\n tf.io.FixedLenFeature(\n [self.n_timesteps, self.n_harmonics], dtype=tf.float32),\n 'sin_amps':\n tf.io.FixedLenFeature(\n [self.n_timesteps, self.n_harmonics], dtype=tf.float32),\n 'sin_freqs':\n tf.io.FixedLenFeature(\n [self.n_timesteps, self.n_harmonics], dtype=tf.float32),\n 'noise_magnitudes':\n tf.io.FixedLenFeature(\n [self.n_timesteps, self.n_mags], dtype=tf.float32),\n }\n\n\n" ]
[ [ "tensorflow.compat.v2.test.main", "numpy.all", "tensorflow.compat.v2.linspace", "tensorflow.compat.v2.zeros", "numpy.logical_and", "tensorflow.compat.v2.tile" ], [ "tensorflow.compat.v2.io.FixedLenFeature", "tensorflow.compat.v2.io.parse_single_example", "tensorflow.compat.v2.data.Dataset.list_files", "tensorflow.compat.v2.data.Dataset.zip" ] ]
loramf/mlforhealthlabpub
[ "aa5a42a4814cf69c8223f27c21324ee39d43c404", "aa5a42a4814cf69c8223f27c21324ee39d43c404", "aa5a42a4814cf69c8223f27c21324ee39d43c404", "aa5a42a4814cf69c8223f27c21324ee39d43c404", "aa5a42a4814cf69c8223f27c21324ee39d43c404", "aa5a42a4814cf69c8223f27c21324ee39d43c404" ]
[ "alg/discriminative-jackknife/utils/parameters.py", "app/hide-and-seek/common/hider/timegan/timegan.py", "alg/compartmental_gp/ModelSelection-Ablation-US.py", "alg/smsdkl/Evaluate.py", "alg/timegan/metrics/discriminative_score_metrics.py", "alg/time_series_deconfounder/rmsn/script_propensity_generation.py" ]
[ "\n# Copyright (c) 2020, Ahmed M. Alaa\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\n# ---------------------------------------------------------\n# Helper functions and utilities for deep learning models\n# ---------------------------------------------------------\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\n\nif not sys.warnoptions:\n import warnings\n warnings.simplefilter(\"ignore\")\n\nimport torch\nfrom torch import nn\n\nfrom influence.influence_utils import *\n\ntorch.manual_seed(1) \n\n\nACTIVATION_DICT = {\"ReLU\": torch.nn.ReLU(), \"Hardtanh\": torch.nn.Hardtanh(),\n \"ReLU6\": torch.nn.ReLU6(), \"Sigmoid\": torch.nn.Sigmoid(),\n \"Tanh\": torch.nn.Tanh(), \"ELU\": torch.nn.ELU(),\n \"CELU\": torch.nn.CELU(), \"SELU\": torch.nn.SELU(), \n \"GLU\": torch.nn.GLU(), \"LeakyReLU\": torch.nn.LeakyReLU(),\n \"LogSigmoid\": torch.nn.LogSigmoid(), \"Softplus\": torch.nn.Softplus()}\n\n\ndef build_architecture(base_model):\n\n modules = []\n\n if base_model.dropout_active:\n\n modules.append(torch.nn.Dropout(p=base_model.dropout_prob))\n\n modules.append(torch.nn.Linear(base_model.n_dim, base_model.num_hidden))\n modules.append(ACTIVATION_DICT[base_model.activation])\n\n for u in range(base_model.num_layers - 1):\n\n if base_model.dropout_active:\n\n modules.append(torch.nn.Dropout(p=base_model.dropout_prob))\n\n modules.append(torch.nn.Linear(base_model.num_hidden, base_model.num_hidden))\n modules.append(ACTIVATION_DICT[base_model.activation])\n\n modules.append(torch.nn.Linear(base_model.num_hidden, base_model.output_size))\n\n _architecture = nn.Sequential(*modules)\n\n return _architecture\n\n\ndef get_number_parameters(model):\n\n params_ = []\n\n for param in model.parameters():\n \n params_.append(param)\n \n return stack_torch_tensors(params_).shape[0] ", "\"\"\"Time-series Generative Adversarial Networks (TimeGAN) Codebase.\n\nReference: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar, \n\"Time-series Generative Adversarial Networks,\" \nNeural Information Processing Systems (NeurIPS), 2019.\n\nPaper link: https://papers.nips.cc/paper/8789-time-series-generative-adversarial-networks\n\nLast updated Date: April 24th 2020\nCode author: Jinsung Yoon ([email protected])\n\n-----------------------------\n\ntimegan.py\n\nNote: Use original data as training set to generator synthetic data (time-series)\n\"\"\"\n\n# Necessary Packages\nimport tensorflow as tf\nimport numpy as np\nfrom .utils import ( # pylint: disable=relative-beyond-top-level\n extract_time,\n rnn_cell,\n random_generator,\n batch_generator,\n)\n\n\ndef timegan(ori_data):\n \"\"\"TimeGAN function.\n\n Use original data as training set to generator synthetic data (time-series)\n\n Args:\n - ori_data: original time-series data\n - parameters: TimeGAN network parameters\n\n Returns:\n - generated_data: generated time-series data\n \"\"\"\n parameters = dict()\n parameters[\"module\"] = \"gru\"\n parameters[\"hidden_dim\"] = 10\n parameters[\"num_layer\"] = 3\n parameters[\"iterations\"] = 20000\n parameters[\"batch_size\"] = 128\n\n # Initialization on the Graph\n tf.reset_default_graph()\n\n # Basic Parameters\n no, seq_len, dim = np.asarray(ori_data).shape\n\n # Maximum sequence length and each sequence length\n ori_time, max_seq_len = extract_time(ori_data)\n\n def MinMaxScaler(data):\n \"\"\"Min-Max Normalizer.\n\n Args:\n - data: raw data\n\n Returns:\n - norm_data: normalized data\n - min_val: minimum values (for renormalization)\n - max_val: maximum values (for renormalization)\n \"\"\"\n min_val = np.min(np.min(data, axis=0), axis=0)\n data = data - min_val\n\n max_val = np.max(np.max(data, axis=0), axis=0)\n norm_data = data / (max_val + 1e-7)\n\n return norm_data, min_val, max_val\n\n # Normalization\n ori_data, min_val, max_val = MinMaxScaler(ori_data)\n\n ## Build a RNN networks\n\n # Network Parameters\n hidden_dim = parameters[\"hidden_dim\"]\n num_layers = parameters[\"num_layer\"]\n iterations = parameters[\"iterations\"]\n batch_size = parameters[\"batch_size\"]\n module_name = parameters[\"module\"]\n z_dim = dim\n gamma = 1\n\n batch_size = ori_data.shape[0] if ori_data.shape[0] < batch_size else batch_size\n\n # Input place holders\n X = tf.placeholder(tf.float32, [None, max_seq_len, dim], name=\"myinput_x\")\n Z = tf.placeholder(tf.float32, [None, max_seq_len, z_dim], name=\"myinput_z\")\n T = tf.placeholder(tf.int32, [None], name=\"myinput_t\")\n\n def embedder(X, T):\n \"\"\"Embedding network between original feature space to latent space.\n\n Args:\n - X: input time-series features\n - T: input time information\n\n Returns:\n - H: embeddings\n \"\"\"\n with tf.variable_scope(\"embedder\", reuse=tf.AUTO_REUSE):\n e_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell(module_name, hidden_dim) for _ in range(num_layers)])\n e_outputs, e_last_states = tf.nn.dynamic_rnn(e_cell, X, dtype=tf.float32, sequence_length=T)\n H = tf.contrib.layers.fully_connected(e_outputs, hidden_dim, activation_fn=tf.nn.sigmoid)\n return H\n\n def recovery(H, T):\n \"\"\"Recovery network from latent space to original space.\n\n Args:\n - H: latent representation\n - T: input time information\n\n Returns:\n - X_tilde: recovered data\n \"\"\"\n with tf.variable_scope(\"recovery\", reuse=tf.AUTO_REUSE):\n r_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell(module_name, hidden_dim) for _ in range(num_layers)])\n r_outputs, r_last_states = tf.nn.dynamic_rnn(r_cell, H, dtype=tf.float32, sequence_length=T)\n X_tilde = tf.contrib.layers.fully_connected(r_outputs, dim, activation_fn=None)\n return X_tilde\n\n def generator(Z, T):\n \"\"\"Generator function: Generate time-series data in latent space.\n\n Args:\n - Z: random variables\n - T: input time information\n\n Returns:\n - E: generated embedding\n \"\"\"\n with tf.variable_scope(\"generator\", reuse=tf.AUTO_REUSE):\n e_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell(module_name, hidden_dim) for _ in range(num_layers)])\n e_outputs, e_last_states = tf.nn.dynamic_rnn(e_cell, Z, dtype=tf.float32, sequence_length=T)\n E = tf.contrib.layers.fully_connected(e_outputs, hidden_dim, activation_fn=tf.nn.sigmoid)\n return E\n\n def supervisor(H, T):\n \"\"\"Generate next sequence using the previous sequence.\n\n Args:\n - H: latent representation\n - T: input time information\n\n Returns:\n - S: generated sequence based on the latent representations generated by the generator\n \"\"\"\n with tf.variable_scope(\"supervisor\", reuse=tf.AUTO_REUSE):\n e_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell(module_name, hidden_dim) for _ in range(num_layers - 1)])\n e_outputs, e_last_states = tf.nn.dynamic_rnn(e_cell, H, dtype=tf.float32, sequence_length=T)\n S = tf.contrib.layers.fully_connected(e_outputs, hidden_dim, activation_fn=tf.nn.sigmoid)\n return S\n\n def discriminator(H, T):\n \"\"\"Discriminate the original and synthetic time-series data.\n\n Args:\n - H: latent representation\n - T: input time information\n\n Returns:\n - Y_hat: classification results between original and synthetic time-series\n \"\"\"\n with tf.variable_scope(\"discriminator\", reuse=tf.AUTO_REUSE):\n d_cell = tf.nn.rnn_cell.MultiRNNCell([rnn_cell(module_name, hidden_dim) for _ in range(num_layers)])\n d_outputs, d_last_states = tf.nn.dynamic_rnn(d_cell, H, dtype=tf.float32, sequence_length=T)\n Y_hat = tf.contrib.layers.fully_connected(d_outputs, 1, activation_fn=None)\n return Y_hat\n\n # Embedder & Recovery\n H = embedder(X, T)\n X_tilde = recovery(H, T)\n\n # Generator\n E_hat = generator(Z, T)\n H_hat = supervisor(E_hat, T)\n H_hat_supervise = supervisor(H, T)\n\n # Synthetic data\n X_hat = recovery(H_hat, T)\n\n # Discriminator\n Y_fake = discriminator(H_hat, T)\n Y_real = discriminator(H, T)\n Y_fake_e = discriminator(E_hat, T)\n\n # Variables\n e_vars = [v for v in tf.trainable_variables() if v.name.startswith(\"embedder\")]\n r_vars = [v for v in tf.trainable_variables() if v.name.startswith(\"recovery\")]\n g_vars = [v for v in tf.trainable_variables() if v.name.startswith(\"generator\")]\n s_vars = [v for v in tf.trainable_variables() if v.name.startswith(\"supervisor\")]\n d_vars = [v for v in tf.trainable_variables() if v.name.startswith(\"discriminator\")]\n\n # Discriminator loss\n D_loss_real = tf.losses.sigmoid_cross_entropy(tf.ones_like(Y_real), Y_real)\n D_loss_fake = tf.losses.sigmoid_cross_entropy(tf.zeros_like(Y_fake), Y_fake)\n D_loss_fake_e = tf.losses.sigmoid_cross_entropy(tf.zeros_like(Y_fake_e), Y_fake_e)\n D_loss = D_loss_real + D_loss_fake + gamma * D_loss_fake_e\n\n # Generator loss\n # 1. Adversarial loss\n G_loss_U = tf.losses.sigmoid_cross_entropy(tf.ones_like(Y_fake), Y_fake)\n G_loss_U_e = tf.losses.sigmoid_cross_entropy(tf.ones_like(Y_fake_e), Y_fake_e)\n\n # 2. Supervised loss\n G_loss_S = tf.losses.mean_squared_error(H[:, 1:, :], H_hat_supervise[:, :-1, :])\n\n # 3. Two Moments\n G_loss_V1 = tf.reduce_mean(\n tf.abs(tf.sqrt(tf.nn.moments(X_hat, [0])[1] + 1e-6) - tf.sqrt(tf.nn.moments(X, [0])[1] + 1e-6))\n )\n G_loss_V2 = tf.reduce_mean(tf.abs((tf.nn.moments(X_hat, [0])[0]) - (tf.nn.moments(X, [0])[0])))\n\n G_loss_V = G_loss_V1 + G_loss_V2\n\n # 4. Summation\n G_loss = G_loss_U + gamma * G_loss_U_e + 100 * tf.sqrt(G_loss_S) + 100 * G_loss_V\n\n # Embedder network loss\n E_loss_T0 = tf.losses.mean_squared_error(X, X_tilde)\n E_loss0 = 10 * tf.sqrt(E_loss_T0)\n E_loss = E_loss0 + 0.1 * G_loss_S\n\n # optimizer\n E0_solver = tf.train.AdamOptimizer().minimize(E_loss0, var_list=e_vars + r_vars)\n E_solver = tf.train.AdamOptimizer().minimize(E_loss, var_list=e_vars + r_vars)\n D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=d_vars)\n G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=g_vars + s_vars)\n GS_solver = tf.train.AdamOptimizer().minimize(G_loss_S, var_list=g_vars + s_vars)\n\n ## TimeGAN training\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # 1. Embedding network training\n print(\"Start Embedding Network Training\")\n\n for itt in range(iterations):\n # Set mini-batch\n X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)\n # Train embedder\n _, step_e_loss = sess.run([E0_solver, E_loss_T0], feed_dict={X: X_mb, T: T_mb})\n # Checkpoint\n if itt % 1000 == 0:\n print(\"step: \" + str(itt) + \"/\" + str(iterations) + \", e_loss: \" + str(np.round(np.sqrt(step_e_loss), 4)))\n\n print(\"Finish Embedding Network Training\")\n\n # 2. Training only with supervised loss\n print(\"Start Training with Supervised Loss Only\")\n\n for itt in range(iterations):\n # Set mini-batch\n X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)\n # Random vector generation\n Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)\n # Train generator\n _, step_g_loss_s = sess.run([GS_solver, G_loss_S], feed_dict={Z: Z_mb, X: X_mb, T: T_mb})\n # Checkpoint\n if itt % 1000 == 0:\n print(\"step: \" + str(itt) + \"/\" + str(iterations) + \", s_loss: \" + str(np.round(np.sqrt(step_g_loss_s), 4)))\n\n print(\"Finish Training with Supervised Loss Only\")\n\n # 3. Joint Training\n print(\"Start Joint Training\")\n\n for itt in range(iterations):\n # Generator training (twice more than discriminator training)\n for kk in range(2):\n # Set mini-batch\n X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)\n # Random vector generation\n Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)\n # Train generator\n _, step_g_loss_u, step_g_loss_s, step_g_loss_v = sess.run(\n [G_solver, G_loss_U, G_loss_S, G_loss_V], feed_dict={Z: Z_mb, X: X_mb, T: T_mb}\n )\n # Train embedder\n _, step_e_loss_t0 = sess.run([E_solver, E_loss_T0], feed_dict={Z: Z_mb, X: X_mb, T: T_mb})\n\n # Discriminator training\n # Set mini-batch\n X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)\n # Random vector generation\n Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)\n # Check discriminator loss before updating\n check_d_loss = sess.run(D_loss, feed_dict={X: X_mb, T: T_mb, Z: Z_mb})\n # Train discriminator (only when the discriminator does not work well)\n if check_d_loss > 0.15:\n _, step_d_loss = sess.run([D_solver, D_loss], feed_dict={X: X_mb, T: T_mb, Z: Z_mb})\n\n # Print multiple checkpoints\n if itt % 1000 == 0:\n print(\n \"step: \"\n + str(itt)\n + \"/\"\n + str(iterations)\n + \", d_loss: \"\n + str(np.round(step_d_loss, 4))\n + \", g_loss_u: \"\n + str(np.round(step_g_loss_u, 4))\n + \", g_loss_s: \"\n + str(np.round(np.sqrt(step_g_loss_s), 4))\n + \", g_loss_v: \"\n + str(np.round(step_g_loss_v, 4))\n + \", e_loss_t0: \"\n + str(np.round(np.sqrt(step_e_loss_t0), 4))\n )\n print(\"Finish Joint Training\")\n\n ## Synthetic data generation\n Z_mb = random_generator(no, z_dim, ori_time, max_seq_len)\n generated_data_curr = sess.run(X_hat, feed_dict={Z: Z_mb, X: ori_data, T: ori_time})\n\n generated_data = list()\n\n for i in range(no):\n temp = generated_data_curr[i, : ori_time[i], :]\n generated_data.append(temp)\n\n # Renormalization\n generated_data = generated_data * max_val\n generated_data = generated_data + min_val\n\n return generated_data\n", "#!/usr/bin/env python\n# coding: utf-8\n\n\nimport pickle\n\nimport numpy as np\nimport pandas as pds\nimport torch\nfrom pyro.ops.stats import quantile\n\nimport data_loader\nimport pyro_model.helper\n\n# ## loading data\n\nprefix = 'trained_models/'\n\ncountries = [\n 'US',\n]\n\npad = 24\n\ndata_dict = data_loader.get_data_pyro(countries, smart_start=False, pad=pad)\ndata_dict = pyro_model.helper.smooth_daily(data_dict)\n\ndf_list = []\n\n\nfor days in [14, 28, 42]:\n train_len = data_dict['cum_death'].shape[0] - days\n\n\n test_dates = data_dict['date_list'][train_len:]\n\n len(data_dict['date_list'][train_len:])\n\n # ## loading results\n\n\n predictive_list = []\n samples_list = []\n\n for seed in range(10):\n model_id = 'ablation-day-{}-rng-{}'.format(days, seed)\n try:\n with open(prefix + 'AblationLoop{}/{}-predictive.pkl'.format(days, model_id), 'rb') as f:\n predictive = pickle.load(f)\n except Exception:\n continue\n predictive_list.append(predictive)\n\n with open(prefix + 'AblationLoop{}/{}-samples.pkl'.format(days, model_id), 'rb') as f:\n samples = pickle.load(f)\n samples_list.append(samples)\n\n print(len(predictive_list))\n\n # validation accuracy\n val_window = 14\n\n seir_error_list = []\n\n for i in range(len(predictive_list)):\n seir_train = quantile(predictive_list[i]['prediction'].squeeze(), 0.5, dim=0)[-val_window + 1:].numpy()\n seir_train = np.diff(seir_train, axis=0)\n seir_label = data_dict['daily_death'][train_len - val_window:train_len, :].numpy()\n\n seir_error = np.abs(np.sum(seir_train, axis=0) - np.sum(seir_label, axis=0))\n seir_error_list.append(seir_error)\n\n seir_error = np.stack(seir_error_list, axis=0)\n best_model = np.argmin(seir_error, axis=0)\n\n for test_len in [7, 14]:\n\n best_error_list = []\n\n test_len = test_len - 1\n for j, i in zip(range(len(countries)), best_model):\n c = countries[j]\n samples = samples_list[i]\n p50 = quantile(torch.tensor(samples), 0.5, dim=0)[1:, :]\n pred = p50[test_len, :]\n truth = data_dict['actual_cum_death'][test_len - days]\n err = (pred[j] - truth[j]).item()\n\n best_error_list.append(err)\n\n test_date = data_dict['date_list'][test_len - days].date()\n\n train_end_date = data_dict['date_list'][train_len].date()\n\n df_save = pds.DataFrame({'countries': countries, 'best_err': best_error_list, 'best_model': best_model})\n df_save['window'] = test_len + 1\n if days == 14:\n df_save['fcst_date'] = 'apr25'\n elif days == 28:\n df_save['fcst_date'] = 'apr11'\n else:\n df_save['fcst_date'] = 'mar28'\n\n df_list.append(df_save)\n\ndf_export = pds.concat(df_list)\ndf_export.to_csv('tables/Table-1-cgp-ablation-us.csv')\n", "import random\nimport time\nfrom copy import deepcopy\n\nfrom keras import backend as K\n\nK.tensorflow_backend._get_available_gpus()\nimport numpy as np\nfrom keras.layers import Dense\nfrom keras.regularizers import l2\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM\n\n\ndef error_function(BO_output):\n\n rmse1 = np.min(np.mean(BO_output, axis=1), axis=0)\n rmse2 = np.mean(np.min(BO_output, axis=0))\n\n return rmse1,rmse2\n\n\n\ndef get_opt_domain():\n\n domain = get_hyperparameter_space()\n\n dim = len(domain)\n\n bounds = []\n bounds_type = []\n for i_domain in domain:\n bounds.append([i_domain['domain'][0], i_domain['domain'][-1]])\n bounds_type.append(i_domain['type'])\n\n bb = [bounds, bounds_type]\n return domain, dim, bb\n\n\n\n\ndef init_random_uniform(domain, n_points=25000,initial=False):\n\n\n list = []\n\n for k in range(int(n_points)):\n\n if initial:\n random.seed(k)\n else:\n random.seed(time.time())\n\n\n list_i = []\n for i_domain in domain:\n\n if i_domain['type'] == 'continuous':\n kk = float(random.uniform(i_domain['domain'][0], i_domain['domain'][1]))\n list_i.append(kk)\n else:\n list_i.append(int(random.sample(i_domain['domain'], 1)[0]))\n\n list.append(list_i)\n\n\n return list\n\n\ndef min_list(obs):\n\n obs = -obs[:,-1]\n leng = len(obs)\n list = []\n a = obs[0]\n list.append(a)\n for i in range(1, leng):\n if obs[i] <= a:\n list.append(obs[i])\n a = deepcopy(obs[i])\n else:\n list.append(a)\n return list\n\n\n\ndef get_hyperparameter_space():\n hyp_ = [{'name': 'RNN.hidden_size', 'type': 'discrete', 'domain':list(range(10, 251, 1)), 'dimensionality': 1},\n {'name': 'RNN.dropout_rate', 'type': 'discrete', 'domain': list(range(10, 91, 1)), 'dimensionality': 1},\n {'name': 'RNN.l2', 'type': 'continuous', 'domain': [-20, 1], 'dimensionality': 1},\n {'name': 'RNN.num_epoch', 'type': 'discrete', 'domain': list(range(10, 101, 1)), 'dimensionality': 1},\n {'name': 'RNN.batch_size', 'type': 'discrete', 'domain': list(range(10, 101, 1)), 'dimensionality': 1},\n {'name': 'RNN.recurrent_dropout_rate', 'type': 'discrete', 'domain': list(range(10, 91, 5)), 'dimensionality': 1}]\n\n return hyp_\n\n\ndef evaluate(train_X, test_X, train_y, test_y, list_domain):\n\n\n\n performance_list = []\n for i in range(np.shape(list_domain)[0]):\n performance_list.append(evaluate_reg_keras(train_X, test_X, train_y, test_y, list_domain[i]))\n\n obs = np.array(performance_list)\n\n return obs\n\n\n\n\n\n\n\ndef evaluate_reg_keras(train_X,test_X, train_y, test_y, param):\n\n\n num_units = int(param[0])\n dropout_rate = param[1]/100\n l2_rate = np.exp(param[2])\n epochs = int(param[3])\n batch = int(param[4])\n recurrent_dropout = param[5] / 100\n\n max_length = np.shape(train_y)[1]\n\n\n train_y = np.expand_dims(train_y,axis=2)\n\n test_y = np.expand_dims(test_y, axis=2)\n\n inputs = Input(shape=(train_X.shape[1], train_X.shape[2]))\n lstm1 = LSTM(num_units, kernel_regularizer=l2(l2_rate), return_sequences=True,\n dropout=dropout_rate, recurrent_dropout= recurrent_dropout )(inputs)\n\n pred = Dense(1)(lstm1)\n\n\n optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n\n\n model = Model(inputs=inputs, outputs=pred)\n\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n\n model.fit(train_X, train_y, epochs=epochs, batch_size=batch, verbose=0, shuffle=False)\n\n\n rmse_list = np.zeros((10, max_length))\n\n for row in range(10):\n\n model.fit(train_X, train_y, epochs=1, batch_size=batch, verbose=0, shuffle=False)\n\n for i in range(max_length):\n\n yhat = model.predict(test_X)\n\n rmse = np.sqrt(np.mean(np.square(test_y[:, i, 0] - yhat[:, i, 0]),axis=0))\n\n rmse_list[row, i] = rmse\n\n return np.mean(rmse_list, axis=0)\n\n\n\n", "'''\n2019 NeurIPS Submission\nTitle: Time-series Generative Adversarial Networks\nAuthors: Jinsung Yoon, Daniel Jarrett, Mihaela van der Schaar\n\nLast Updated Date: May 29th 2019\nCode Author: Jinsung Yoon ([email protected])\n\n-----------------------------\n\nDiscriminative_Score_Metrics\n- Use Post-hoc RNN to classify Original data and Synthetic data\n\nInputs\n- dataX: Original data\n- dataX_hat: Synthetic ata\n\nOutputs\n- Discriminative Score (np.abs(Classification Accuracy - 0.5))\n\n'''\n\n#%% Necessary Packages\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\n\n#%% Post-hoc RNN Classifier \n\ndef discriminative_score_metrics (dataX, dataX_hat):\n \n # Initialization on the Graph\n tf.reset_default_graph()\n\n # Basic Parameters\n No = len(dataX)\n data_dim = len(dataX[0][0,:])\n \n # Compute Maximum seq length and each seq length\n dataT = list()\n Max_Seq_Len = 0\n for i in range(No):\n Max_Seq_Len = max(Max_Seq_Len, len(dataX[i][:,0]))\n dataT.append(len(dataX[i][:,0]))\n \n # Network Parameters\n hidden_dim = max(int(data_dim/2),1)\n iterations = 2000\n batch_size = 128\n \n #%% input place holders\n # Features\n X = tf.placeholder(tf.float32, [None, Max_Seq_Len, data_dim], name = \"myinput_x\")\n X_hat = tf.placeholder(tf.float32, [None, Max_Seq_Len, data_dim], name = \"myinput_x_hat\")\n \n # Times\n T = tf.placeholder(tf.int32, [None], name = \"myinput_t\")\n T_hat = tf.placeholder(tf.int32, [None], name = \"myinput_t_hat\")\n \n #%% builde a RNN classification network \n \n def discriminator (X, T):\n \n with tf.variable_scope(\"discriminator\", reuse = tf.AUTO_REUSE) as vs:\n \n d_cell = tf.nn.rnn_cell.GRUCell(num_units=hidden_dim, activation=tf.nn.tanh, name = 'cd_cell')\n \n d_outputs, d_last_states = tf.nn.dynamic_rnn(d_cell, X, dtype=tf.float32, sequence_length = T)\n \n # Logits\n Y_hat = tf.contrib.layers.fully_connected(d_last_states, 1, activation_fn=None) \n \n # Sigmoid output\n Y_hat_Final = tf.nn.sigmoid(Y_hat)\n \n # Variables\n d_vars = [v for v in tf.all_variables() if v.name.startswith(vs.name)]\n \n return Y_hat, Y_hat_Final, d_vars\n \n #%% Train / Test Division\n def train_test_divide (dataX, dataX_hat, dataT):\n \n # Divide train/test index\n No = len(dataX)\n idx = np.random.permutation(No)\n train_idx = idx[:int(No*0.8)]\n test_idx = idx[int(No*0.8):]\n \n # Train and Test X\n trainX = [dataX[i] for i in train_idx]\n trainX_hat = [dataX_hat[i] for i in train_idx]\n \n testX = [dataX[i] for i in test_idx]\n testX_hat = [dataX_hat[i] for i in test_idx]\n \n # Train and Test T\n trainT = [dataT[i] for i in train_idx]\n testT = [dataT[i] for i in test_idx]\n \n return trainX, trainX_hat, testX, testX_hat, trainT, testT\n \n #%% Functions\n # Variables\n Y_real, Y_pred_real, d_vars = discriminator(X, T)\n Y_fake, Y_pred_fake, _ = discriminator(X_hat, T_hat)\n \n # Loss for the discriminator\n D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Y_real, labels = tf.ones_like(Y_real)))\n D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Y_fake, labels = tf.zeros_like(Y_fake)))\n D_loss = D_loss_real + D_loss_fake\n \n # optimizer\n D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list = d_vars)\n \n #%% Sessions \n\n # Start session and initialize\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n \n # Train / Test Division\n trainX, trainX_hat, testX, testX_hat, trainT, testT = train_test_divide (dataX, dataX_hat, dataT)\n \n # Training step\n for itt in range(iterations):\n \n # Batch setting\n idx = np.random.permutation(len(trainX))\n train_idx = idx[:batch_size] \n \n X_mb = list(trainX[i] for i in train_idx)\n T_mb = list(trainT[i] for i in train_idx)\n \n # Batch setting\n idx = np.random.permutation(len(trainX_hat))\n train_idx = idx[:batch_size] \n \n X_hat_mb = list(trainX_hat[i] for i in train_idx)\n T_hat_mb = list(trainT[i] for i in train_idx)\n \n # Train discriminator\n _, step_d_loss = sess.run([D_solver, D_loss], feed_dict={X: X_mb, T: T_mb, X_hat: X_hat_mb, T_hat: T_hat_mb}) \n \n #%% Checkpoints\n# if itt % 500 == 0:\n# print(\"[step: {}] loss - d loss: {}\".format(itt, np.round(step_d_loss,4)))\n \n #%% Final Outputs (ontTesting set)\n \n Y_pred_real_curr, Y_pred_fake_curr = sess.run([Y_pred_real, Y_pred_fake], feed_dict={X: testX, T: testT, X_hat: testX_hat, T_hat: testT})\n \n Y_pred_final = np.squeeze(np.concatenate((Y_pred_real_curr, Y_pred_fake_curr), axis = 0))\n Y_label_final = np.concatenate((np.ones([len(Y_pred_real_curr),]), np.zeros([len(Y_pred_real_curr),])), axis = 0)\n \n #%% Accuracy\n Acc = accuracy_score(Y_label_final, Y_pred_final>0.5)\n \n Disc_Score = np.abs(0.5-Acc)\n \n return Disc_Score\n ", "\"\"\"\nCODE ADAPTED FROM: https://github.com/sjblim/rmsn_nips_2018\n\nImplementation of Recurrent Marginal Structural Networks (R-MSNs):\nBrian Lim, Ahmed M Alaa, Mihaela van der Schaar, \"Forecasting Treatment Responses Over Time Using Recurrent\nMarginal Structural Networks\", Advances in Neural Information Processing Systems, 2018.\n\"\"\"\n\nimport rmsn.configs\nfrom rmsn.configs import load_optimal_parameters\n\nimport rmsn.core_routines as core\nfrom rmsn.core_routines import test\n\nimport numpy as np\nimport logging\nimport os\n\nimport tensorflow as tf\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\nlogging.getLogger().setLevel(logging.INFO)\n\nROOT_FOLDER = rmsn.configs.ROOT_FOLDER\nexpt_name = \"treatment_effects\"\n\n\ndef propensity_generation(dataset_map, MODEL_ROOT, b_use_predicted_confounders,\n b_use_oracle_confounders=False, b_remove_x1=False):\n\n logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\n\n action_inputs_only = load_optimal_parameters('treatment_rnn_action_inputs_only',\n expt_name, MODEL_ROOT,\n add_net_name=True)\n action_w_trajectory_inputs = load_optimal_parameters('treatment_rnn',\n expt_name, MODEL_ROOT,\n add_net_name=True)\n\n # Generate propensity weights for validation data as well - used for MSM which is calibrated on train + valid data\n b_with_validation = False\n # Generate non-stabilised IPTWs (default false)\n b_denominator_only = False\n\n # Setup tensorflow - setup session to use cpu/gpu\n tf_device = 'cpu'\n if tf_device == \"cpu\":\n tf_config = tf.ConfigProto(log_device_placement=False, device_count={'GPU': 0})\n else:\n tf_config= tf.ConfigProto(log_device_placement=False, device_count={'GPU': 1})\n tf_config.gpu_options.allow_growth = True\n\n # Config + activation functions\n activation_map = {'rnn_propensity_weighted': (\"elu\", 'linear'),\n 'rnn_model': (\"elu\", 'linear'),\n 'rnn_model_bptt': (\"elu\", 'linear'),\n 'treatment_rnn': (\"tanh\", 'sigmoid'),\n 'treatment_rnn_action_inputs_only': (\"tanh\", 'sigmoid'),\n 'treatment_rnn_softmax': (\"tanh\", 'sigmoid'),\n 'treatment_rnn_action_inputs_only_softmax': (\"tanh\", 'sigmoid'),\n }\n\n configs = {'action_num': action_inputs_only,\n 'action_den': action_w_trajectory_inputs}\n\n # Setup the simulated datasets\n training_data = dataset_map['training_data']\n validation_data = dataset_map['validation_data']\n test_data = dataset_map['test_data']\n\n # Generate propensity weights for validation data if required\n if b_with_validation:\n for k in training_data:\n training_data[k] = np.concatenate([training_data[k], validation_data[k]])\n\n ##############################################################################################################\n # Functions\n def get_predictions(config):\n\n net_name = config[0]\n\n hidden_activation, output_activation = activation_map[net_name]\n\n # Pull datasets\n b_predict_actions = \"treatment_rnn\" in net_name\n b_use_actions_only = \"rnn_action_inputs_only\" in net_name\n\n # Extract only relevant trajs and shift data\n training_processed = core.get_processed_data(training_data, b_predict_actions, b_use_actions_only,\n b_use_predicted_confounders, b_use_oracle_confounders, b_remove_x1)\n validation_processed = core.get_processed_data(validation_data, b_predict_actions,\n b_use_actions_only,\n b_use_predicted_confounders, b_use_oracle_confounders, b_remove_x1)\n\n num_features = training_processed['scaled_inputs'].shape[-1] # 4 if not b_use_actions_only else 3\n num_outputs = training_processed['scaled_outputs'].shape[-1] # 1 if not b_predict_actions else 3 # 5\n\n # Unpack remaining variables\n dropout_rate = config[1]\n memory_multiplier = config[2] / num_features\n num_epochs = config[3]\n minibatch_size = config[4]\n learning_rate = config[5]\n max_norm = config[6]\n\n\n\n model_folder = os.path.join(MODEL_ROOT, net_name)\n means, outputs, _, _ = test(training_processed, validation_processed, training_processed, tf_config,\n net_name, expt_name, dropout_rate, num_features, num_outputs,\n memory_multiplier, num_epochs, minibatch_size, learning_rate, max_norm,\n hidden_activation, output_activation, model_folder)\n\n return means, outputs\n\n def get_weights(probs, targets):\n w = probs*targets + (1-probs) * (1-targets)\n return w.prod(axis=2)\n\n\n def get_weights_from_config(config):\n net_name = config[0]\n\n probs, targets = get_predictions(config)\n\n return get_weights(probs, targets)\n\n def get_probabilities_from_config(config):\n net_name = config[0]\n\n probs, targets = get_predictions(config)\n\n return probs\n\n\n ##############################################################################################################\n\n # Action with trajs\n weights = {k: get_weights_from_config(configs[k]) for k in configs}\n\n den = weights['action_den']\n num = weights['action_num']\n\n propensity_weights = 1.0/den if b_denominator_only else num/den\n\n # truncation @ 95th and 5th percentiles\n UB = np.percentile(propensity_weights, 99)\n LB = np.percentile(propensity_weights, 1)\n\n propensity_weights[propensity_weights > UB] = UB\n propensity_weights[propensity_weights < LB] = LB\n\n # Adjust so for 3 trajectories here\n horizon = 1\n (num_patients, num_time_steps) = propensity_weights.shape\n output = np.ones((num_patients, num_time_steps, horizon))\n\n tmp = np.ones((num_patients, num_time_steps))\n tmp[:, 1:] = propensity_weights[:, :-1]\n propensity_weights = tmp\n\n for i in range(horizon):\n output[:, :num_time_steps-i, i] = propensity_weights[:, i:]\n\n propensity_weights = output.cumprod(axis=2)\n\n suffix = \"\" if not b_denominator_only else \"_den_only\"\n\n if b_with_validation:\n save_file = os.path.join(MODEL_ROOT, \"propensity_scores_w_validation{}\".format(suffix))\n else:\n save_file = os.path.join(MODEL_ROOT, \"propensity_scores{}\".format(suffix))\n\n np.save(save_file, propensity_weights)\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.GLU", "torch.nn.ReLU6", "torch.nn.Softplus", "torch.nn.Dropout", "torch.manual_seed", "torch.nn.ELU", "torch.nn.Sigmoid", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.CELU", "torch.nn.LogSigmoid", "torch.nn.SELU", "torch.nn.LeakyReLU", "torch.nn.ReLU", "torch.nn.Hardtanh" ], [ "tensorflow.nn.dynamic_rnn", "numpy.sqrt", "numpy.asarray", "numpy.round", "numpy.max", "tensorflow.train.AdamOptimizer", "tensorflow.nn.moments", "tensorflow.reset_default_graph", "tensorflow.Session", "tensorflow.trainable_variables", "numpy.min", "tensorflow.placeholder", "tensorflow.zeros_like", "tensorflow.global_variables_initializer", "tensorflow.losses.mean_squared_error", "tensorflow.ones_like", "tensorflow.contrib.layers.fully_connected", "tensorflow.variable_scope", "tensorflow.sqrt" ], [ "pandas.concat", "pandas.DataFrame", "numpy.stack", "torch.tensor", "numpy.argmin", "numpy.diff", "numpy.sum" ], [ "numpy.square", "numpy.expand_dims", "numpy.min", "numpy.mean", "numpy.shape", "numpy.array", "numpy.exp", "numpy.zeros" ], [ "tensorflow.nn.dynamic_rnn", "tensorflow.all_variables", "tensorflow.nn.sigmoid", "numpy.abs", "tensorflow.ones_like", "tensorflow.placeholder", "tensorflow.contrib.layers.fully_connected", "numpy.concatenate", "tensorflow.global_variables_initializer", "tensorflow.zeros_like", "tensorflow.reset_default_graph", "numpy.random.permutation", "tensorflow.Session", "tensorflow.train.AdamOptimizer", "tensorflow.variable_scope", "tensorflow.nn.rnn_cell.GRUCell", "sklearn.metrics.accuracy_score" ], [ "numpy.save", "numpy.percentile", "tensorflow.ConfigProto", "numpy.concatenate", "numpy.ones" ] ]
albamr09/PythonML
[ "9848cf913a7cdb73d2b98a8ab7334c04f421ad87" ]
[ "pyml/supervised/SVM/SVM2.py" ]
[ "import numpy as np\n\n\"\"\"\n\n------------------------------------------------------------------------------------------------------------------------------------------------------\n\n SVM2\n\n------------------------------------------------------------------------------------------------------------------------------------------------------\n\n\n\n\"\"\"\n\nclass SVMV2():\n\n def __init__(self) -> None:\n \"\"\"Empty method\"\"\"\n pass\n\n def svmTrain(X, Y, C, kernelFunction, tol=1e-3, max_passes=5, args=()):\n \"\"\"\n Trains an SVM classifier using a simplified version of the SMO algorithm.\n\n Parameters\n ---------\n X : numpy ndarray\n (m x n) Matrix of training examples. Each row is a training example, and the\n jth column holds the jth feature.\n\n Y : numpy ndarray\n (m, ) A vector (1-D numpy array) containing 1 for positive examples and 0 for negative examples.\n\n C : float\n The standard SVM regularization parameter.\n\n kernelFunction : func\n A function handle which computes the kernel. The function should accept two vectors as\n inputs, and returns a scalar as output.\n\n tol : float, optional\n Tolerance value used for determining equality of floating point numbers.\n\n max_passes : int, optional\n Controls the number of iterations over the dataset (without changes to alpha)\n before the algorithm quits.\n\n args : tuple\n Extra arguments required for the kernel function, such as the sigma parameter for a\n Gaussian kernel.\n\n Returns\n -------\n model :\n The trained SVM model.\n\n Notes\n -----\n This is a simplified version of the SMO algorithm for training SVMs. In practice, if\n you want to train an SVM classifier, we recommend using an optimized package such as:\n\n - LIBSVM (http://www.csie.ntu.edu.tw/~cjlin/libsvm/)\n - SVMLight (http://svmlight.joachims.org/)\n - scikit-learn (http://scikit-learn.org/stable/modules/svm.html) which contains python wrappers\n for the LIBSVM library.\n \"\"\"\n # make sure data is signed int\n Y = Y.astype(int)\n # Dataset size parameters\n m, n = X.shape\n\n passes = 0\n E = np.zeros(m)\n alphas = np.zeros(m)\n b = 0\n\n # Map 0 to -1\n Y[Y == 0] = -1\n\n # Pre-compute the Kernel Matrix since our dataset is small\n # (in practice, optimized SVM packages that handle large datasets\n # gracefully will **not** do this)\n\n # We have implemented the optimized vectorized version of the Kernels here so\n # that the SVM training will run faster\n if kernelFunction.__name__ == 'linearKernel':\n # Vectorized computation for the linear kernel\n # This is equivalent to computing the kernel on every pair of examples\n K = np.dot(X, X.T)\n elif kernelFunction.__name__ == 'gaussianKernel':\n # vectorized RBF Kernel\n # This is equivalent to computing the kernel on every pair of examples\n X2 = np.sum(X**2, axis=1)\n K = X2 + X2[:, None] - 2 * np.dot(X, X.T)\n\n if len(args) > 0:\n K /= 2*args[0]**2\n\n K = np.exp(-K)\n else:\n K = np.zeros((m, m))\n for i in range(m):\n for j in range(i, m):\n K[i, j] = kernelFunction(X[i, :], X[j, :])\n K[j, i] = K[i, j]\n\n while passes < max_passes:\n num_changed_alphas = 0\n for i in range(m):\n E[i] = b + np.sum(alphas * Y * K[:, i]) - Y[i]\n\n if (Y[i]*E[i] < -tol and alphas[i] < C) or (Y[i]*E[i] > tol and alphas[i] > 0):\n # select the alpha_j randomly\n j = np.random.choice(list(range(i)) + list(range(i+1, m)), size=1)[0]\n\n E[j] = b + np.sum(alphas * Y * K[:, j]) - Y[j]\n\n alpha_i_old = alphas[i]\n alpha_j_old = alphas[j]\n\n if Y[i] == Y[j]:\n L = max(0, alphas[j] + alphas[i] - C)\n H = min(C, alphas[j] + alphas[i])\n else:\n L = max(0, alphas[j] - alphas[i])\n H = min(C, C + alphas[j] - alphas[i])\n\n if L == H:\n continue\n\n eta = 2 * K[i, j] - K[i, i] - K[j, j]\n\n # objective function positive definite, there will be a minimum along the direction\n # of linear equality constrain, and eta will be greater than zero\n # we are actually computing -eta here (so we skip of eta >= 0)\n if eta >= 0:\n continue\n\n alphas[j] -= Y[j] * (E[i] - E[j])/eta\n alphas[j] = max(L, min(H, alphas[j]))\n\n if abs(alphas[j] - alpha_j_old) < tol:\n alphas[j] = alpha_j_old\n continue\n alphas[i] += Y[i]*Y[j]*(alpha_j_old - alphas[j])\n\n b1 = b - E[i] - Y[i]*(alphas[i] - alpha_i_old) * K[i, j] \\\n - Y[j] * (alphas[j] - alpha_j_old) * K[i, j]\n\n b2 = b - E[j] - Y[i]*(alphas[i] - alpha_i_old) * K[i, j] \\\n - Y[j] * (alphas[j] - alpha_j_old) * K[j, j]\n\n if 0 < alphas[i] < C:\n b = b1\n elif 0 < alphas[j] < C:\n b = b2\n else:\n b = (b1 + b2)/2\n\n num_changed_alphas += 1\n if num_changed_alphas == 0:\n passes += 1\n else:\n passes = 0\n\n idx = alphas > 0\n model = {'X': X[idx, :],\n 'y': Y[idx],\n 'kernelFunction': kernelFunction,\n 'b': b,\n 'args': args,\n 'alphas': alphas[idx],\n 'w': np.dot(alphas * Y, X)}\n return model\n\n\n def svmPredict(model, X):\n \"\"\"\n Returns a vector of predictions using a trained SVM model.\n\n Parameters\n ----------\n model : dict\n The parameters of the trained svm model, as returned by the function svmTrain\n\n X : array_like\n A (m x n) matrix where each example is a row.\n\n Returns\n -------\n pred : array_like\n A (m,) sized vector of predictions {0, 1} values.\n \"\"\"\n # check if we are getting a vector. If so, then assume we only need to do predictions\n # for a single example\n if X.ndim == 1:\n X = X[np.newaxis, :]\n\n m = X.shape[0]\n p = np.zeros(m)\n pred = np.zeros(m)\n\n if model['kernelFunction'].__name__ == 'linearKernel':\n # we can use the weights and bias directly if working with the linear kernel\n p = np.dot(X, model['w']) + model['b']\n elif model['kernelFunction'].__name__ == 'gaussianKernel':\n # vectorized RBF Kernel\n # This is equivalent to computing the kernel on every pair of examples\n X1 = np.sum(X**2, 1)\n X2 = np.sum(model['X']**2, 1)\n K = X2 + X1[:, None] - 2 * np.dot(X, model['X'].T)\n\n if len(model['args']) > 0:\n K /= 2*model['args'][0]**2\n\n K = np.exp(-K)\n p = np.dot(K, model['alphas']*model['y']) + model['b']\n else:\n # other non-linear kernel\n for i in range(m):\n predictions = 0\n for j in range(model['X'].shape[0]):\n predictions += model['alphas'][j] * model['y'][j] \\\n * model['kernelFunction'](X[i, :], model['X'][j, :])\n p[i] = predictions\n\n pred[p >= 0] = 1\n return pred\n\n\n def linearKernel(x1, x2):\n \"\"\"\n Returns a linear kernel between x1 and x2.\n\n Parameters\n ----------\n x1 : numpy ndarray\n A 1-D vector.\n\n x2 : numpy ndarray\n A 1-D vector of same size as x1.\n\n Returns\n -------\n : float\n The scalar amplitude.\n \"\"\"\n return np.dot(x1, x2)\n\n\n def visualizeBoundaryLinear(X, y, model):\n \"\"\"\n Plots a linear decision boundary learned by the SVM.\n\n Parameters\n ----------\n X : array_like\n (m x 2) The training data with two features (to plot in a 2-D plane).\n\n y : array_like\n (m, ) The data labels.\n\n model : dict\n Dictionary of model variables learned by SVM.\n \"\"\"\n w, b = model['w'], model['b']\n xp = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)\n yp = -(w[0] * xp + b)/w[1]\n\n plotData(X, y)\n pyplot.plot(xp, yp, '-b')\n\n\n def visualizeBoundary(X, y, model):\n \"\"\"\n Plots a non-linear decision boundary learned by the SVM and overlays the data on it.\n\n Parameters\n ----------\n X : array_like\n (m x 2) The training data with two features (to plot in a 2-D plane).\n\n y : array_like\n (m, ) The data labels.\n\n model : dict\n Dictionary of model variables learned by SVM.\n \"\"\"\n plotData(X, y)\n\n # make classification predictions over a grid of values\n x1plot = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)\n x2plot = np.linspace(min(X[:, 1]), max(X[:, 1]), 100)\n X1, X2 = np.meshgrid(x1plot, x2plot)\n\n vals = np.zeros(X1.shape)\n for i in range(X1.shape[1]):\n this_X = np.stack((X1[:, i], X2[:, i]), axis=1)\n vals[:, i] = svmPredict(model, this_X)\n\n pyplot.contour(X1, X2, vals, colors='y', linewidths=2)\n pyplot.pcolormesh(X1, X2, vals, cmap='YlGnBu', alpha=0.25, edgecolors='None', lw=0)\n pyplot.grid(False)\n" ]
[ [ "numpy.dot", "numpy.stack", "numpy.exp", "numpy.meshgrid", "numpy.zeros", "numpy.sum" ] ]
w-sugar/maskrcnn-benchmark
[ "37d985c2c0b190bf76945b9f7a9530b855e370e5" ]
[ "maskrcnn_benchmark/engine/trainer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport datetime\nimport logging\nimport os\nimport time\n\nimport torch\nimport torch.distributed as dist\nfrom tqdm import tqdm\n\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.utils.comm import get_world_size, synchronize\nfrom maskrcnn_benchmark.utils.metric_logger import MetricLogger\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.utils.visualize import print_dict\n\nfrom apex import amp\n\ndef reduce_loss_dict(loss_dict):\n \"\"\"\n Reduce the loss dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n loss_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return loss_dict\n with torch.no_grad():\n loss_names = []\n all_losses = []\n for k in sorted(loss_dict.keys()):\n loss_names.append(k)\n all_losses.append(loss_dict[k])\n all_losses = torch.stack(all_losses, dim=0)\n dist.reduce(all_losses, dst=0)\n if dist.get_rank() == 0:\n # only main process gets accumulated, so only divide by\n # world_size in this case\n all_losses /= world_size\n reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}\n return reduced_losses\n\n\ndef do_train(\n cfg,\n model,\n data_loader,\n data_loader_val,\n optimizer,\n scheduler,\n checkpointer,\n device,\n checkpoint_period,\n test_period,\n arguments,\n distributed,\n vis_port\n):\n from visdom import Visdom\n vis = None\n if distributed:\n if dist.get_rank() == 0:\n vis = Visdom(server='http://127.0.0.1', port=vis_port)\n else:\n vis = Visdom(server='http://127.0.0.1', port=vis_port)\n logger = logging.getLogger(\"maskrcnn_benchmark.trainer\")\n logger.info(\"Start training\")\n meters = MetricLogger(delimiter=\" \")\n max_iter = len(data_loader)\n start_iter = arguments[\"iteration\"]\n model.train()\n start_training_time = time.time()\n end = time.time()\n\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n dataset_names = cfg.DATASETS.TEST\n\n for iteration, (images, targets, _) in enumerate(data_loader, start_iter):\n \n if any(len(target) < 1 for target in targets):\n logger.error(f\"Iteration={iteration + 1} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}\" )\n continue\n data_time = time.time() - end\n iteration = iteration + 1\n arguments[\"iteration\"] = iteration\n\n images = images.to(device)\n targets = [target.to(device) for target in targets]\n\n loss_dict = model(images, targets)\n\n losses = sum(loss for loss in loss_dict.values())\n\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = reduce_loss_dict(loss_dict)\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n meters.update(loss=losses_reduced, **loss_dict_reduced)\n\n optimizer.zero_grad()\n # Note: If mixed precision is not used, this ends up doing nothing\n # Otherwise apply loss scaling for mixed-precision recipe\n with amp.scale_loss(losses, optimizer) as scaled_losses:\n scaled_losses.backward()\n optimizer.step()\n scheduler.step()\n\n batch_time = time.time() - end\n end = time.time()\n meters.update(time=batch_time, data=data_time)\n\n eta_seconds = meters.time.global_avg * (max_iter - iteration)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n\n if iteration % 20 == 0 or iteration == max_iter:\n logger.info(\n meters.delimiter.join(\n [\n \"eta: {eta}\",\n \"iter: {iter}\",\n \"{meters}\",\n \"lr: {lr:.6f}\",\n \"max mem: {memory:.0f}\",\n ]\n ).format(\n eta=eta_string,\n iter=iteration,\n meters=str(meters),\n lr=optimizer.param_groups[0][\"lr\"],\n memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n )\n )\n\n # 更新 loss 曲线\n loss_dict_print = loss_dict_reduced\n loss_dict_print['loss'] = losses_reduced\n print_dict(vis, loss_dict_print, iteration, need_plot=True)\n\n if iteration % checkpoint_period == 0:\n checkpointer.save(\"model_{:07d}\".format(iteration), **arguments)\n if data_loader_val is not None and test_period > 0 and iteration % test_period == 0:\n meters_val = MetricLogger(delimiter=\" \")\n synchronize()\n _ = inference( # The result can be used for additional logging, e. g. for TensorBoard\n model,\n # The method changes the segmentation mask format in a data loader,\n # so every time a new data loader is created:\n make_data_loader(cfg, is_train=False, is_distributed=(get_world_size() > 1), is_for_period=True),\n dataset_name=\"[Validation]\",\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=None,\n )\n synchronize()\n model.train()\n # with torch.no_grad():\n # # Should be one image for each GPU:\n # for iteration_val, (images_val, targets_val, _) in enumerate(tqdm(data_loader_val)):\n # images_val = images_val.to(device)\n # targets_val = [target.to(device) for target in targets_val]\n # loss_dict = model(images_val, targets_val)\n # losses = sum(loss for loss in loss_dict.values())\n # loss_dict_reduced = reduce_loss_dict(loss_dict)\n # losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n # meters_val.update(loss=losses_reduced, **loss_dict_reduced)\n # synchronize()\n # logger.info(\n # meters_val.delimiter.join(\n # [\n # \"[Validation]: \",\n # \"eta: {eta}\",\n # \"iter: {iter}\",\n # \"{meters}\",\n # \"lr: {lr:.6f}\",\n # \"max mem: {memory:.0f}\",\n # ]\n # ).format(\n # eta=eta_string,\n # iter=iteration,\n # meters=str(meters_val),\n # lr=optimizer.param_groups[0][\"lr\"],\n # memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n # )\n # )\n if iteration == max_iter:\n checkpointer.save(\"model_final\", **arguments)\n\n total_training_time = time.time() - start_training_time\n total_time_str = str(datetime.timedelta(seconds=total_training_time))\n logger.info(\n \"Total training time: {} ({:.4f} s / it)\".format(\n total_time_str, total_training_time / (max_iter)\n )\n )\n" ]
[ [ "torch.cuda.max_memory_allocated", "torch.distributed.reduce", "torch.no_grad", "torch.stack", "torch.distributed.get_rank" ] ]
scuervo91/reservoirpy
[ "a4db620baf3ff66a85c7f61b1919713a8642e6fc", "a4db620baf3ff66a85c7f61b1919713a8642e6fc" ]
[ "reservoirpy/wellproductivitypy/pi/outflow.py", "reservoirpy/simulationpy/model.py" ]
[ "import numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom ...pvtpy.black_oil import Pvt,Oil,Water,Gas\nfrom scipy.optimize import root_scalar\nfrom .inflow import OilInflow, GasInflow\nfrom ...utils import intercept_curves\nfrom typing import Union\n\n## Incompressible pressure drop\ndef potential_energy_change(\n z1:Union[int,float]=None, \n z2=None, \n delta_z=None,\n length=None, \n ge=1, \n angle=None, \n inc=None,\n p1=0):\n \"\"\"potential_energy_change [ Δp PE accounts for the pressure change due to the weight of the column of fluid (the hydrostatic head); it\n will be zero for flow in a horizontal pipe.\n\n In this equation, Δz is the difference in elevation between positions 1 and 2, with z increasing upward. θ\n is defined as the angle between horizontal and the direction of flow. Thus, θ is +90° for upward, vertical\n flow, 0° for horizontal flow, and –90° for downward flow in a vertical well (Figure 7-4). For flow in a\n straight pipe of length L with flow direction θ,]\n\n Parameters\n ----------\n z1 : [type], optional\n [description], by default None\n z2 : [type], optional\n [description], by default None\n delta_z : [type], optional\n [description], by default None\n length : [type], optional\n [description], by default None\n ge : int, optional\n [description], by default 1\n angle : [type], optional\n [description], by default None\n inc : [type], optional\n [description], by default None\n p1 : int, optional\n [description], by default 0\n\n Returns\n -------\n [type]\n [description]\n \"\"\"\n\n # Assert height difference types\n if delta_z is None:\n if length is None:\n assert isinstance(z1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(z2,(float,int,np.ndarray,np.int64,np.float64)), f\"{type(z1)} {type(z2)}\"\n z1 = np.atleast_1d(z1)\n z2 = np.atleast_1d(z2)\n #assert z1.shape == (1,) and z2.shape == (1,)\n delta_z = z1-z2\n\n else:\n assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64)) \n length = np.atleast_1d(length)\n #assert length.shape == (1,)\n\n if angle is None:\n assert isinstance(inc,(float,int,np.ndarray,np.int64,np.float64))\n inc = np.atleast_1d(inc)\n assert inc <= 90 and inc >= -90\n sign = np.sign(inc)\n\n angle = (90 - np.abs(inc)) * sign\n else:\n # Assert angle between -90 and 90\n assert isinstance(angle,(float,int,np.ndarray,np.int64,np.float64))\n angle = np.atleast_1d(angle)\n assert angle <= 90 and angle >= -90 \n\n delta_z = length * np.sin(np.radians(angle))\n\n else:\n assert isinstance(delta_z,(float,int,np.ndarray,np.int64,np.float64))\n delta_z = np.atleast_1d(delta_z)\n #assert delta_z.shape == (1,)\n\n\n #Assert ge be positive\n assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0, f\"{ge} {type(ge)} not allowed\"\n\n #Calculate Delta P\n delta_p = 0.433 * ge * delta_z\n\n #Calculate P2\n p2 = p1 + delta_p\n\n return delta_p, p2\n\ndef kinetic_energy_change(d1=None,d2=None, ge=1,rate=None,p1=0):\n \"\"\"\n Δp KE is the pressure drop resulting from a change in the velocity of the fluid between positions 1 and 2.\n It will be zero for an incompressible fluid unless the cross-sectional area of the pipe is different at the\n two positions of interest.\n\n Petroleum Production Systems, Economides. Chapter 7 7.2.3.2. Δp KE, the Pressure Drop Due to Kinetic Energy Change. Page 172\n\n \"\"\"\n\n assert isinstance(d1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(d2,(float,int,np.ndarray,np.int64,np.float64))\n d1 = np.atleast_1d(d1)\n d2 = np.atleast_1d(d2)\n\n\n #Assert Specifi Gravity be positive\n assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0\n ge = np.atleast_1d(ge)\n\n\n # Rate in bbl/d\n assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0\n rate = np.atleast_1d(rate) \n\n #Estimate Density in lb/ft3\n rho = 62.4 * ge\n\n #Estimate delta Pressure in psi\n delta_p = 1.53e-8 * np.power(rate,2) * rho * ((1/np.power(d1,4))-(1/np.power(d2,4)))\n\n p2 = p1 + delta_p\n\n return delta_p, p2\n\ndef reynolds_number(rate,rho,d,mu):\n \"\"\"\n Reynolds Number where q is in bbl/d, ρ in lb m /ft 3 , D in in., and μ in cp.\n \"\"\" \n nre = (1.48 * rate * rho) / (d * mu)\n\n return nre\n\ndef frictional_pressure_drop(\n rate=None, \n epsilon=0.001,\n ge=1,\n d=None, \n mu=1, \n length=None):\n\n # Rate in bbl/d\n assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0\n rate = np.atleast_1d(rate) \n\n # pipe relative roughness\n assert isinstance(epsilon,(float,int,np.ndarray,np.int64,np.float64))\n epsilon = np.atleast_1d(epsilon) \n\n #Assert Specifi Gravity be positive\n assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0\n ge = np.atleast_1d(ge)\n\n assert isinstance(d,(float,int,np.ndarray,np.int64,np.float64))\n d = np.atleast_1d(d)\n\n assert isinstance(mu,(float,int,np.ndarray,np.int64,np.float64))\n mu = np.atleast_1d(mu)\n\n assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64))\n length = np.atleast_1d(length)\n\n #Estimate Density in lb/ft3\n rho = 62.4 * ge\n\n #Reynolds Number\n nre = reynolds_number(rate,rho,d,mu)\n\n #Friction Factor\n if nre == 0:\n ff = 0\n else:\n ff = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)\n\n #Velocity ft/s\n u = (4*rate*5.615)/(np.pi*np.power(d/12,2)*86400)\n\n delta_p = (2 * ff * rho * np.power(u,2) * length)/(32.17 * (d/12) * 144)\n delta_p *= -1\n return delta_p\n\n\n\ndef one_phase_pressure_profile(\n p1=0,\n ge=1,\n epsilon=0.001,\n md=None,\n tvd=None,\n d = None,\n rate = None,\n mu=None,\n backwards=1\n ):\n\n assert isinstance(md,(int,float,list,np.ndarray))\n md = np.atleast_1d(md)\n \n if tvd is None:\n tvd = md\n else:\n assert isinstance(tvd,(int,float,list,np.ndarray))\n tvd = np.atleast_1d(tvd)\n \n assert isinstance(d,(int,float,list,np.ndarray))\n if isinstance(d,(int,float)):\n d = np.full(md.shape,d)\n else:\n d = np.atleast_1d(d)\n \n assert isinstance(rate,(int,float, np.ndarray))\n rate = np.atleast_1d(rate)\n assert isinstance(mu,(int,float, np.ndarray))\n mu = np.atleast_1d(mu)\n assert isinstance(p1,(int,float, np.ndarray))\n p1 = np.atleast_1d(p1)\n assert isinstance(ge,(int,float, np.ndarray))\n ge = np.atleast_1d(ge)\n assert isinstance(epsilon,(int,float, np.ndarray))\n epsilon = np.atleast_1d(epsilon)\n\n assert md.shape[0] == tvd.shape[0] == d.shape[0]\n\n n = md.shape[0]\n\n #Create arrays\n pressure = np.zeros(n)\n ppe = np.zeros(n)\n pke = np.zeros(n)\n pf = np.zeros(n)\n delta_p = np.zeros(n)\n gradient = np.zeros(n)\n\n pressure[0] = p1\n\n for i in range(1,n):\n\n #Potential Energy Change\n ppe[i], _ = potential_energy_change(\n z1=tvd[i-1],\n z2=tvd[i],\n ge= ge,\n )\n\n #Kinetic Energy Change\n pke[i], _ = kinetic_energy_change(\n d1=d[i-1],\n d2=d[i],\n rate=rate,\n ge=ge,\n )\n\n #Frictional Pressure drop\n pf[i] = frictional_pressure_drop(\n rate=rate, \n epsilon=epsilon,\n ge=ge,\n d=d[i], \n mu=mu, \n length=np.abs(md[i-1]-md[i])\n ) * backwards\n\n delta_p[i] = ppe[i] + pke[i] + pf[i]\n pressure[i] = pressure[i-1] + delta_p[i]\n gradient[i] = (pressure[i] - pressure[i-1])/np.abs(tvd[i] - tvd[i-1])\n \n # Create dataframe\n pressure_profile = pd.DataFrame({\n 'md':md,\n 'tvd':tvd,\n 'diameter':d,\n 'pressure':pressure,\n 'ppe': ppe,\n 'pke': pke,\n 'pf' : pf,\n 'delta_p': delta_p,\n 'gradient': gradient\n }).set_index('md')\n \n p2 = pressure[-1]\n\n return pressure_profile, p2\n\n\n## Gas Outflow functions\n\ndef gas_pressure_profile_correlation(thp,sg,depth):\n assert isinstance(thp,(float,int,np.ndarray,np.int64,np.float64))\n thp = np.atleast_1d(thp)\n assert thp.ndim == 1\n\n assert isinstance(sg,(float,int,np.ndarray,np.int64,np.float64))\n sg = np.atleast_1d(sg)\n assert sg.shape == (1,)\n\n assert isinstance(depth,(list,float,int,np.ndarray))\n depth = np.atleast_1d(depth)\n assert sg.ndim == 1\n\n pwf = thp*np.exp(3.47e-5*depth)\n\n return pwf\n\n\n\ndef gas_pressure_profile(\n md = None, \n inc = None, \n thp = None, \n rate = None, \n gas_obj = None,\n di=2.99,\n surf_temp=80,\n temp_grad=1,\n epsilon = 0.0006, \n tol = 0.05, \n max_iter=20):\n \"\"\"\n To calculate the pressure drop in a gas well, the compressibility of the fluid must be considered. When\n the fluid is compressible, the fluid density and fluid velocity vary along the pipe, and these variations\n must be included when integrating the mechanical energy balance equation.\n\n Petroleum Production Systems, Economides. Chapter 7 7.3. Single-Phase Flow of a Compressible, Newtonian Fluid. Page 175\n \"\"\"\n # Assert the right types and shapes for input\n assert isinstance(md, (np.ndarray,pd.Series))\n md = np.atleast_1d(md)\n assert md.ndim ==1\n\n assert isinstance(inc, (int,float,np.ndarray,pd.Series))\n if isinstance(inc,np.ndarray):\n assert inc.shape == md.shape\n else:\n inc = np.full(md.shape,inc)\n\n angle = np.radians(90 - inc) \n\n assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n thp = np.atleast_1d(thp)\n assert thp.shape == (1,)\n\n assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None\n\n assert isinstance(di, (int,float,np.ndarray))\n if isinstance(di,np.ndarray):\n assert di.shape == md.shape\n else:\n di = np.full(md.shape,di)\n\n assert isinstance(rate, (int,float,np.ndarray))\n rate = np.atleast_1d(rate)\n assert rate.shape == (1,)\n\n assert gas_obj.sg is not None\n\n #Create the variables\n\n pressure_profile = np.zeros(md.shape)\n temperature_profile = np.zeros(md.shape)\n pressure_gradient = np.zeros(md.shape)\n pressure_profile[0] = thp\n temperature_profile[0] = surf_temp\n\n interations = np.zeros(md.shape)\n\n if gas_obj.chromatography is not None:\n df_rho = gas_obj.chromatography.get_rhog(p=thp,t=surf_temp, rhog_method='real_gas')\n else:\n df_rho = gas_obj.pvt.interpolate(thp,property='rhog')\n\n grad_guess = df_rho['rhog'].values*(0.433/62.4)\n\n #Loop over depth\n for i in range(1,md.shape[0]):\n err = tol + 0.01\n dz = np.sin(angle[i])*(md[i]-md[i-1])\n gas_sg = gas_obj.sg\n it = 0\n while err>= tol and it <= max_iter:\n p_guess = grad_guess*(md[i]-md[i-1])*np.sin(angle[i]) + pressure_profile[i-1]\n\n #Interpolate pvt\n df_pvt = gas_obj.pvt.interpolate(p_guess)\n\n #Reynolds Number\n #nre = (4*28.97*gas_obj.sg*rate*14.7)/(np.pi*di[i]*df_pvt['mug'].values*10.73*520)\n nre = 20.09*(gas_sg*rate)/(di[i]*df_pvt['mug'].values)\n\n #Friction Factor\n friction = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)\n\n #Temperature\n temperature_profile[i] = dz * (temp_grad/100) + temperature_profile[i-1]\n\n #S\n s = (-0.0375*gas_obj.sg*dz)/(df_pvt['z'].values*(temperature_profile[i]+460))\n\n #Calculate next pressure by parts for easily read\n a = np.exp(-s) * np.power(pressure_profile[i-1],2)\n b = (friction*np.power(df_pvt['z'].values*(temperature_profile[i]+460)*rate,2))/(np.sin(angle[i])*np.power(di[i],5))\n c = 1 - np.exp(-s)\n\n p_new = np.sqrt(a - (2.685e-3*b*c))\n grad_new = (p_new - pressure_profile[i-1])/dz\n\n err = np.abs(grad_guess-grad_new)/grad_new\n grad_guess = grad_new\n it +=1\n \n pressure_gradient[i] = grad_new\n pressure_profile[i] = p_new\n interations[i] = it\n\n df_dict = {\n 'pressure':pressure_profile,\n 'pressure_gradient': pressure_gradient,\n 'temperature': temperature_profile,\n 'iterations': interations\n }\n\n df = pd.DataFrame(df_dict, index = md)\n pwf = pressure_profile[-1]\n\n return df, pwf\n\ndef gas_upward_pressure(\n md = None, \n inc = None, \n pwf = None, \n rate = None, \n gas_obj = None,\n di=2.99,\n surf_temp=80,\n temp_grad=1,\n epsilon = 0.0006, \n tol = 0.05, \n max_iter=20,\n guess=None,\n grad_guess = [0.02,0.05]\n):\n\n if guess is None:\n grad = np.atleast_1d(grad_guess)\n delta_h = np.abs(md[-1] - md[0])\n guess = pwf - grad * delta_h\n else:\n assert isinstance(guess,(list,np.ndarray))\n guess = np.atleast_1d(guess)\n\n def solve(x):\n _,_pwf = gas_pressure_profile(\n md = md, \n inc = inc, \n thp = x, \n rate = rate, \n gas_obj = gas_obj,\n di=di,\n surf_temp=surf_temp,\n temp_grad=temp_grad,\n epsilon = epsilon, \n tol = tol, \n max_iter=max_iter,\n )\n\n return pwf - _pwf\n\n sol = root_scalar(solve, x0=guess[0],x1=guess[1])\n\n return sol.root\n\ndef gas_outflow_curve(\n md = None, \n inc = None, \n thp = None, \n gas_obj = None,\n rate=None,\n min_rate=100,\n max_rate=8000,\n n_rate=20,\n di=2.99,\n surf_temp=80,\n temp_grad=1,\n epsilon = 0.0006, \n tol = 0.05, \n max_iter=20,\n operating_point = None,\n op_n = 30\n ):\n\n # Assert the right types and shapes for input\n assert isinstance(md, (np.ndarray,pd.Series)) and md.ndim ==1\n md = np.atleast_1d(md)\n\n assert isinstance(inc, (int,float,np.ndarray,pd.Series))\n if isinstance(inc,np.ndarray):\n assert inc.shape == md.shape\n else:\n inc = np.full(md.shape,inc)\n\n angle = np.radians(90 - inc) \n\n assert isinstance(thp, (int,float,list,np.ndarray))\n thp = np.atleast_1d(thp)\n assert thp.ndim == 1\n\n assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None\n\n assert isinstance(di, list)\n\n assert isinstance(rate, (int,float,list,np.ndarray,type(None)))\n if rate is None:\n rate = np.linspace(min_rate,max_rate,n_rate)\n else:\n rate = np.atleast_1d(rate)\n assert rate.ndim == 1\n\n assert gas_obj.sg is not None\n\n pwf = np.zeros(rate.shape[0]*thp.shape[0]*len(di))\n thp_arr = np.zeros(pwf.shape)\n di_arr = np.zeros(pwf.shape)\n gas_arr = np.zeros(pwf.shape)\n name_list = []\n i = 0\n for p in thp:\n for d in di:\n for q in rate:\n _,pwf[i] = gas_pressure_profile(\n md = md,\n inc = inc,\n thp = p,\n rate = q,\n gas_obj = gas_obj,\n surf_temp=surf_temp,\n temp_grad=temp_grad,\n di=d\n )\n gas_arr[i] = q\n thp_arr[i] = p\n di_arr[i] = d\n case_name = f'thp-{p}_di-{d}'\n name_list.append(case_name)\n i += 1\n\n #df = pd.DataFrame(pwf,columns=name_list,index=rate)\n arr=np.column_stack((pwf,thp_arr,di_arr))\n df = pd.DataFrame(arr,columns=['pwf','thp','di'],index=gas_arr)\n df['case'] = name_list\n df.index.name = 'gas'\n\n op = pd.DataFrame()\n if operating_point is not None:\n inflow = operating_point.df\n\n for case in df['case'].unique():\n df_case = df[df['case']==case]\n\n points, idx = intercept_curves(inflow['q'],inflow['p'],df_case.index,df_case['pwf'], n=op_n)\n\n points_df = pd.DataFrame(points[[-1],:], columns=['q','p'])\n points_df['case'] = case\n points_df['idx'] = idx\n\n op = op.append(points_df)\n \n op = op.merge(df.groupby('case').mean(), left_on='case', right_on='case')\n\n return df, op\n\n### Multiphase Pressure Gradients\n\ndef flow_regime_plot(\n ql=None, \n qg=None,\n d=2.99,\n sg_liquid = 1,\n surface_tension=30,\n ax=None,\n method = 'duns_ros',\n **kwargs\n ):\n \"\"\"\n Plot Flow Regime from Duns and Ros Flow Regime Map\n \n Coordinates extracted from Figure7-10 Duns and Ros Flow Regime Map\n https://apps.automeris.io/wpd/\n\n Petroleum Production Systems, Economides. Chapter 7 7.2.3.2. Δp KE, the Pressure Drop Due to Kinetic Energy Change. Page 84\n\n \"\"\"\n if d is not None:\n assert isinstance(d,(int,float,list,np.ndarray,pd.Series))\n d = np.atleast_1d(d)\n # Estimate Cross section Area [ft2] from diameter [in]\n a = np.power((d*0.5)/12,2)*np.pi\n\n if ql is not None:\n assert isinstance(ql,(int,float,list,np.ndarray,pd.Series))\n ql = np.atleast_1d(ql)\n\n #Liquid velocity. Convert bbl/d to ft3/s then divide area. Result velocity in ft/s\n usl = (ql * 5.616 * (1/86400))/a\n #Calculate the dimensionless numbers for each phase\n nvl = 1.938 * usl * np.power((sg_liquid*62.4)/surface_tension,0.25)\n\n if qg is not None:\n assert isinstance(ql,(int,float,list,np.ndarray,pd.Series))\n qg = np.atleast_1d(qg)\n\n #Gas velocity. Convert ft3/d to ft3/s then divide area. Result velocity in ft/s\n usg = (qg * (1/86400))/a\n nvg = 1.938 * usg * np.power((sg_liquid*62.4)/surface_tension,0.25)\n\n if method == 'duns_ros':\n fax= ax or plt.gca()\n region_1_2 = np.array([\n [1.1753722651306362, 0.1082636733874053],\n [1.1913061720030635, 0.16102620275609392],\n [1.3268047497147244, 0.23950266199874834],\n [1.4777148689707504, 0.35154183187529914],\n [1.7604108438655526, 0.5228664844415476],\n [2.1544346900318843, 0.7880462815669913],\n [2.8585141796844757, 1.2358165955824107],\n [3.545745842465605, 1.790084628235539],\n [5.529553425383406, 3.2470894518548166],\n [8.507942799627454, 5.512889788770675],\n [16.68100537200059, 11.566937549363251],\n [29.76351441631322, 20.43359717856943],\n [61.58482110660267, 39.079952122756026],\n [41.11829402435837, 27.703123342457815],\n [79.53985507023424, 48.93900918477497],\n ])\n\n region_2_t = np.array([\n [53.10631887314356, 0.10543589908346815],\n [59.146605445917515, 0.18139306939110614],\n [66.7669293918757, 0.36097012876068046],\n [80.61813527211957, 0.7674630429274295],\n [104.12232560483065, 1.5475873545578884],\n [141.92103954525945, 2.7338936055226313],\n [270.8622850933671, 5.9684569951223105],\n [204.14630347954724, 4.230939172613499],\n [340.53655850163904, 7.674630429274299],\n [503.2159359259993, 12.195704601594414],\n [714.1692874235849, 18.380944176677932],\n [922.3851039358485, 23.324701361610806],\n ])\n\n region_t_3 = np.array([\n [92.23851039358486, 0.10684043121253317],\n [97.34285811778867, 0.15475873545578891],\n [105.53385749880759, 0.24269312356542563],\n [115.96514767613999, 0.41204298882016666],\n [136.30221830031346, 0.7278953843983147],\n [183.29807108324394, 1.2358165955824107],\n [263.6650898730361, 2.271547585601246],\n [364.25331154496416, 4.120429888201667],\n [531.0631887314356, 6.995642156712631],\n [714.1692874235849, 11.264816923358868],\n [947.5632026539927, 18.139306939110632],\n ])\n\n fax.plot(region_1_2[:,0],region_1_2[:,1], color='black',linestyle='--')\n fax.plot(region_2_t[:,0],region_2_t[:,1], color='black',linestyle='--')\n fax.plot(region_t_3[:,0],region_t_3[:,1], color='black',linestyle='--')\n fax.set_ylabel('Nvl')\n fax.set_ylabel('Nvg')\n fax.set_title('Duns and Ros Flow Regime Map')\n fax.set_xlim([0.1,1000])\n fax.set_ylim([0.1,100])\n annot = kwargs.pop('ann',True)\n font = kwargs.pop('fontsize',8)\n\n if annot:\n fax.annotate(\n f\"Region I \\n Bubble Flow or \\n low-velocity slug flow\",\n xy = (0.2,0.15),\n xycoords='data',\n xytext=(0, 0), \n textcoords='offset points',\n bbox={'boxstyle':'round', 'fc':'0.8'},\n fontsize = font\n )\n\n fax.annotate(\n f\"Region II \\n High-velocity Flow or \\n churn flow\",\n xy = (2,0.15),\n xycoords='data',\n xytext=(0, 0), \n textcoords='offset points',\n bbox={'boxstyle':'round', 'fc':'0.8'},\n fontsize = font\n )\n\n fax.annotate(\n f\"Region III \\n Annular Flow Pattern\",\n xy = (300,0.15),\n xycoords='data',\n xytext=(0, 0), \n textcoords='offset points',\n bbox={'boxstyle':'round', 'fc':'0.8'},\n fontsize = font\n )\n \n if ql is not None and qg is not None:\n fax.scatter(nvg,nvl,color='blue',marker = \"^\")\n\n\n if method == 'taitel_dukler':\n fax= ax or plt.gca()\n\n region_E = np.array([\n [14.977474763452001, 0.0022033318988979545],\n [14.977474763452001, 0.006595844345274293],\n [14.977474763452001, 0.04746934676639568],\n [14.777148689707504, 0.9165263295637442],\n [14.977474763452001, 6.87270243904312],\n [14.977474763452001, 15.857064005032758]\n ])\n\n region_A = np.array([\n [0.08858667904100832, 0.0022372323125884317],\n [0.08858667904100832, 0.005091596044287256],\n [0.0986624843178949, 0.018460289732281962],\n [0.11137395078578621, 0.04142593768347061],\n [0.1326804749714725, 0.08679099331751502],\n [0.1668100537200059, 0.18431459769950134],\n [0.21256187881919958, 0.3275265038954424],\n [0.30575961084169306, 0.695276382058884],\n [0.46415888336127775, 1.2691784682206282],\n [0.7336637748600019, 2.019816384578137],\n [0.9223851039358476, 2.412109197346714]\n ])\n region_B = np.array([\n [0.028585141796844758, 3.4805610999729812],\n [0.0531063188731435, 3.5220947122633963],\n [0.08623280529014943, 3.517016970779084],\n [0.24649769667586238, 3.2292570594299215],\n [0.8978760230238888, 2.4455928433916867],\n [2.0971883035581533, 1.7556200043179786],\n [5.239601353002639, 4.20919831000811],\n [10.412232560483055, 7.572933314656229],\n [14.579502008614657, 10.657087726496014],\n ])\n region_D = np.array([\n [0.26366508987303583, 0.44861391200434203],\n [0.30575961084169306, 0.4018483957905594],\n [0.4398198780581129, 0.2288467215238852],\n [0.5032159359259996, 0.16920697751727592],\n [0.5835551032264551, 0.11058672774921392],\n [0.6676692939187563, 0.05647578739286295],\n [0.6951927961775606, 0.03743162248826758],\n [0.7536903980898542, 0.02284801683862376],\n [0.7639077845044221, 0.015565548854263186],\n [0.7436096708208817, 0.011357807043115235],\n [0.7847599703514607, 0.006933286608265855],\n [0.7536903980898542, 0.0027304200384003397],\n [0.7436096708208817, 0.002162999360197944],\n ])\n\n fax.plot(region_A[:,0],region_A[:,1], color='black',linestyle='--')\n fax.plot(region_B[:,0],region_B[:,1], color='black',linestyle='--')\n fax.plot(region_D[:,0],region_D[:,1], color='black',linestyle='--')\n fax.plot(region_E[:,0],region_E[:,1], color='black',linestyle='--')\n fax.set_ylabel('Usg [m/s]')\n fax.set_ylabel('Usl [m/s]')\n fax.set_title('Taitel-Dukler flow regime map')\n fax.set_xlim([0.01,100])\n fax.set_ylim([0.001,10])\n if ql is not None and qg is not None:\n fax.scatter(usg*0.3048,usl*0.3048,color='blue',marker = \"^\")\n\n\n fax.set_yscale('log')\n fax.set_xscale('log')\n\ndef hb_correlation(\n pressure=None, #Pressure [psi]\n temperature=None, #Temperature [F]\n liquid_rate=None, # Liquid Flow [bbl/d]\n gas_rate=None, # gas flow [kscfd]\n ten_liquid=None, #Surface tension dyne/cm2\n rho_liquid=None, # density lb/ft3\n rho_gas=None, # density lb/ft3\n mu_liquid=None, # Viscosity [cp]\n mu_gas=None, # Viscosity [cp]\n z=1, # Gas compressibility Factor\n di=None, # Diameter,\n epsilon = 0.0006,\n):\n\n \"\"\"\n The modified Hagedorn and Brown method (mH-B) is an empirical two-phase flow correlation based\n on the original work of Hagedorn and Brown (1965). The heart of the Hagedorn-Brown method is a\n correlation for liquid holdup; the modifications of the original method include using the no-slip holdup\n when the original empirical correlation predicts a liquid holdup value less than the no-slip holdup and\n the use of the Griffith correlation (Griffith and Wallis, 1961) for the bubble flow regime.\n\n Petroleum Production Systems, Economides. Chapter 7 7.4.3.1. The Modified Hagedorn and Brown Method Page 187\n\n \"\"\"\n #Check types and converto to np.ndarray\n assert isinstance(pressure,(int,float,np.ndarray,np.float64,np.int64))\n pressure = np.atleast_1d(pressure)\n\n assert isinstance(temperature,(int,float,np.ndarray,np.float64,np.int64))\n temperature = np.atleast_1d(temperature)\n\n assert isinstance(liquid_rate,(int,float,np.ndarray,np.float64,np.int64))\n liquid_rate = np.atleast_1d(liquid_rate)\n\n assert isinstance(gas_rate,(int,float,np.ndarray,np.float64,np.int64))\n gas_rate = np.atleast_1d(gas_rate)\n\n assert isinstance(ten_liquid,(int,float,np.ndarray,np.float64,np.int64))\n ten_liquid = np.atleast_1d(ten_liquid)\n\n assert isinstance(rho_liquid,(int,float,np.ndarray,np.float64,np.int64))\n rho_liquid = np.atleast_1d(rho_liquid)\n\n assert isinstance(rho_gas,(int,float,np.ndarray,np.float64,np.int64))\n rho_gas = np.atleast_1d(rho_gas)\n\n assert isinstance(mu_liquid,(int,float,np.ndarray,np.float64,np.int64))\n mu_liquid = np.atleast_1d(mu_liquid)\n\n assert isinstance(mu_gas,(int,float,np.ndarray,np.float64,np.int64))\n mu_gas = np.atleast_1d(mu_gas)\n\n assert isinstance(z,(int,float,np.ndarray,np.float64,np.int64))\n z = np.atleast_1d(z)\n\n assert isinstance(di,(int,float,np.ndarray,np.float64,np.int64))\n di = np.atleast_1d(di)\n\n assert isinstance(epsilon,(int,float,np.ndarray,np.float64,np.int64))\n epsilon = np.atleast_1d(epsilon)\n\n griffith = False\n\n area = np.power((di*0.5)/12,2)*np.pi\n usl = (liquid_rate * 5.615)/(area * 86400)\n usg = (4*gas_rate*1000*z*(460+temperature)*14.7)/(86400*pressure*520*np.pi*np.power(di/12,2)) \n \n \n #Mixure Velocity\n um = usl + usg \n lambda_g = usg / um \n lambda_l = 1 - lambda_g\n #Check if Buble flow exist\n lb = 1.071 - 0.2218 * (np.power(um,2)/(di/12))\n \n if lb < 0.13:\n lb = 0.13\n\n if lb > lambda_g:\n yl=1-0.5*(1+(um/0.8)-np.sqrt(np.power(1+(um/0.8),2)-4*(usg/0.8)))\n griffith=True\n else:\n #Calculate Dimensionless numbers\n nvl= 1.938*usl*np.power(rho_liquid/ten_liquid,0.25) #Liquid Velocity Number\n nvg=1.938*usg*np.power(rho_liquid/ten_liquid,0.25) #Gas Velocity Number\n nd=120.872*(di/12)*np.power(rho_liquid/ten_liquid,0.5) #Pipe Diameter Number\n nl=0.15726*mu_liquid*np.power(1/(rho_liquid * np.power(ten_liquid,3)),0.25) \n\n #cnl=(0.0019+0.0322*nl-0.6642*np.power(nl,2)+4.9951*np.power(nl,3))/(1+10.0147*nl-33.8696*np.power(nl,2)+277.2817*np.power(nl,3)) # original\n cnl=(0.0019+0.0505*nl-0.0929*np.power(nl,2)+0.061*np.power(nl,3)) #pengtools\n\n # H\n h = (nvl/np.power(nvg,0.575)) * np.power(pressure/14.7,0.1) * (cnl/nd)\n\n #yi/phi ratio\n yl_ratio = np.power(((0.0047+1123.32*h-729489.64*np.power(h,2))/(1+1097.1566*h-722153.97*np.power(h,2))),0.5)\n\n #B\n b = nvg * np.power(nl,0.38)/np.power(nd,2.14)\n\n #Psi calculated by equation from pengtools\n # https://wiki.pengtools.com/index.php?title=Hagedorn_and_Brown_correlation\n if b > 0.055:\n psi = 2.5714*b + 1.5962\n elif b > 0.025:\n psi = -533.33*np.power(b,2) + 58.524*b + 0.1171\n else:\n psi = 27170*np.power(b,3) - 317.52 * np.power(b,2) + 0.5472*b + 0.9999\n\n # Psi calculated from Economides\n #psi=(1.0886+69.9473*b-2334.3497*np.power(b,2)+12896.683*np.power(b,3))/(1+53.4401*b-1517.9369*np.power(b,2)+8419.8115*np.power(b,3))\n\n #yl\n yl = yl_ratio * psi\n\n if yl < lambda_l:\n yl = lambda_l\n\n # Mass flow in lb/d\n mass_flow = area * (usl * rho_liquid + usg * rho_gas) * 86400 \n\n #Reynolds Number\n nre = (2.2e-2 * mass_flow) / ((di/2) * np.power(mu_liquid,yl) * np.power(mu_gas,1-yl))\n\n #Friction Factor\n ff = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)\n\n #Average density\n rho_avg = yl*rho_liquid + (1-yl)*rho_gas\n\n if griffith:\n pressure_gradient = (1/144)*(rho_avg+((ff*np.power(mass_flow,2))/(7.413e10*np.power(di/12,5)*rho_avg*np.power(yl,2))))\n else:\n pressure_gradient = (1/144)*(rho_avg+((ff*np.power(mass_flow,2))/(7.413e10*np.power(di/12,5)*rho_avg)))\n\n return pressure_gradient\n\ndef gray_correlation(\n pressure=None, #Pressure [psi]\n temperature=None, #Temperature [F]\n liquid_rate=None, # Liquid Flow [bbl/d]\n gas_rate=None, # gas flow [kscfd]\n ten_liquid=None, #Surface tension dyne/cm2\n rho_liquid=None, # density lb/ft3\n rho_gas=None, # density lb/ft3\n mu_liquid=None, # Viscosity [cp]\n mu_gas=None, # Viscosity [cp]\n z=1, # Gas compressibility Factor\n di=None, # Diameter,\n epsilon = 0.0006,\n):\n #Check types and converto to np.ndarray\n assert isinstance(pressure,(int,float,np.ndarray,np.float64,np.int64))\n pressure = np.atleast_1d(pressure)\n\n assert isinstance(temperature,(int,float,np.ndarray,np.float64,np.int64))\n temperature = np.atleast_1d(temperature)\n\n assert isinstance(liquid_rate,(int,float,np.ndarray,np.float64,np.int64))\n liquid_rate = np.atleast_1d(liquid_rate)\n\n assert isinstance(gas_rate,(int,float,np.ndarray,np.float64,np.int64))\n gas_rate = np.atleast_1d(gas_rate)\n\n assert isinstance(ten_liquid,(int,float,np.ndarray,np.float64,np.int64))\n ten_liquid = np.atleast_1d(ten_liquid)\n\n assert isinstance(rho_liquid,(int,float,np.ndarray,np.float64,np.int64))\n rho_liquid = np.atleast_1d(rho_liquid)\n\n assert isinstance(rho_gas,(int,float,np.ndarray,np.float64,np.int64))\n rho_gas = np.atleast_1d(rho_gas)\n\n assert isinstance(mu_liquid,(int,float,np.ndarray,np.float64,np.int64))\n mu_liquid = np.atleast_1d(mu_liquid)\n\n assert isinstance(mu_gas,(int,float,np.ndarray,np.float64,np.int64))\n mu_gas = np.atleast_1d(mu_gas)\n\n assert isinstance(z,(int,float,np.ndarray,np.float64,np.int64))\n z = np.atleast_1d(z)\n\n assert isinstance(di,(int,float,np.ndarray,np.float64,np.int64))\n di = np.atleast_1d(di)\n\n assert isinstance(epsilon,(int,float,np.ndarray,np.float64,np.int64))\n epsilon = np.atleast_1d(epsilon)\n\n area = np.power((di*0.5)/12,2)*np.pi\n usl = (liquid_rate * 5.615)/(area * 86400)\n usg = (4*gas_rate*1000*z*(460+temperature)*14.7)/(86400*pressure*520*np.pi*np.power(di/12,2)) \n\n #Total velocity\n um = usl + usg\n\n #Lambda liquid\n lambda_l = usl / um\n \n # Rho m\n rho_m = lambda_l*rho_liquid + (1 - lambda_l) * rho_gas \n\n #Calculate N\n n1 = (np.power(rho_m,2)*np.power(um,4))/(32.172*6.85e-5*ten_liquid*(rho_liquid-rho_gas))\n n2 = (32.172 * np.power(di/12,2)*(rho_liquid-rho_gas))/(ten_liquid*6.85e-5)\n \n #N3\n rv = usl / usg\n n3 = 0.0814 * (1 - 0.0554 * np.log(1+((730*rv)/(rv + 1))))\n\n #Liquid Holdup\n fl = -2.314 * np.power(n1*(1+(205/n2)),n3)\n yl = 1 - (1-lambda_l)*(1 - np.exp(fl))\n\n #Rho avg\n rho_avg = yl*rho_liquid + (1-yl)*rho_gas\n\n #potential energy\n ppe = rho_avg / 144 \n\n # Absolute Roughness\n k = epsilon * (di/12)\n\n ko = (0.285*ten_liquid)/(rho_m * np.power(um,2))\n\n if rv >= 0.007:\n ke = ko\n else:\n ke = k + rv*((ko - k)/0.007) \n\n epsilon_relative = ke / (di/12)\n\n #Friction Factor\n nre = np.power(10,7)\n ff = np.power((1/(-4*np.log10((epsilon_relative/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon_relative,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)\n\n\n #ppf\n ppf = (2*ff*rho_m*np.power(um,2))/(32.172 * (di/12) * 144)\n\n pressure_gradient = ppe + ppf\n\n return pressure_gradient\n\n\n\ndef two_phase_pressure_profile(\n depth = None,\n thp = None,\n liquid_rate = None,\n oil_rate = None,\n gas_rate = None,\n glr = None,\n gor = None,\n bsw = None,\n oil_obj = None,\n gas_obj = None,\n water_obj = None, \n epsilon=0.0006, \n surface_temperature=80, \n temperature_gradient=1, \n di=2.99, \n tol=0.02,\n max_iter = 20,\n method = 'hagedorn_brown',\n min_glr = 10\n):\n\n # Assert the right types and shapes for input\n assert isinstance(depth, (np.ndarray,pd.Series,list))\n depth = np.atleast_1d(depth)\n assert depth.ndim == 1\n\n assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n thp = np.atleast_1d(thp)\n assert thp.shape == (1,)\n\n if oil_rate is not None:\n assert isinstance(oil_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n oil_rate = np.atleast_1d(oil_rate)\n assert oil_rate.shape == (1,)\n\n if liquid_rate is not None:\n assert isinstance(liquid_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n liquid_rate = np.atleast_1d(liquid_rate)\n assert liquid_rate.shape == (1,)\n\n assert any([oil_rate is not None,liquid_rate is not None])\n\n if gas_rate is not None:\n assert isinstance(gas_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n gas_rate = np.atleast_1d(gas_rate)\n assert gas_rate.shape == (1,)\n\n if gor is not None:\n assert isinstance(gor, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n gor = np.atleast_1d(gor)\n assert gor.shape == (1,)\n\n if glr is not None:\n assert isinstance(glr, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n glr = np.atleast_1d(glr)\n assert glr.shape == (1,)\n\n assert any([gas_rate is not None,gor is not None,glr is not None])\n\n assert isinstance(bsw, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n bsw = np.atleast_1d(bsw)\n assert bsw.shape == (1,)\n\n assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None\n assert isinstance(oil_obj,Oil) and oil_obj.pvt is not None\n assert isinstance(water_obj,Water) and water_obj.pvt is not None\n\n if isinstance(di,(np.ndarray,pd.Series,list)):\n di = np.atleast_1d(di)\n assert di.shape == depth.shape\n elif isinstance(di,(int,float)):\n di = np.full(depth.shape,di)\n\n assert isinstance(epsilon, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n epsilon = np.atleast_1d(epsilon)\n assert epsilon.shape == (1,)\n\n assert isinstance(surface_temperature,(int,float,np.ndarray))\n surface_temperature = np.atleast_1d(surface_temperature)\n\n assert isinstance(temperature_gradient,(int,float,np.ndarray))\n temperature_gradient = np.atleast_1d(temperature_gradient)\n\n #Start\n if liquid_rate is None:\n liquid_rate = oil_rate / (1-bsw)\n else:\n oil_rate = liquid_rate*(1-bsw)\n\n water_rate = liquid_rate * bsw \n\n if gas_rate is None:\n if gor is None:\n gas_rate = glr * liquid_rate * 1e-3\n else:\n gas_rate = gor * oil_rate * 1e-3\n\n\n pressure_profile = np.zeros(depth.shape)\n pressure_profile[0] = thp\n pressure_gradient = np.zeros(depth.shape)\n iterations = np.zeros(depth.shape)\n free_gas_rate = np.zeros(depth.shape)\n glr = np.zeros(depth.shape)\n temperature_profile = np.abs(depth[0] - depth) * (temperature_gradient/100) + surface_temperature\n\n #Initials Densities\n rho_oil_i = oil_obj.pvt.interpolate(thp,property = 'rhoo').iloc[0,0]\n rho_water_i = water_obj.pvt.interpolate(thp,property = 'rhow').iloc[0,0]\n rho_l = rho_oil_i * (1-bsw) + rho_water_i * bsw \n\n pressure_gradient[0] = rho_l * (0.433/62.4)\n for i in range(1,depth.shape[0]):\n err = tol + 0.01\n it = 0\n grad_guess = pressure_gradient[i-1]\n\n while err>= tol and it <= max_iter:\n p_guess = grad_guess * np.abs(depth[i] - depth[i-1]) + pressure_profile[i-1]\n \n #Interpolate pvt\n gas_pvt_guess = gas_obj.pvt.interpolate(p_guess)\n oil_pvt_guess = oil_obj.pvt.interpolate(p_guess)\n water_pvt_guess = water_obj.pvt.interpolate(p_guess)\n\n ten_liquid = oil_pvt_guess['tension'].iloc[0] * (1-bsw) + water_pvt_guess['tension'].iloc[0] * bsw\n rho_liquid = oil_pvt_guess['rhoo'].iloc[0] * (1-bsw) + water_pvt_guess['rhow'].iloc[0] * bsw\n mu_liquid = oil_pvt_guess['muo'].iloc[0] * (1-bsw) + water_pvt_guess['muw'].iloc[0] * bsw\n rho_gas = (28.97 * gas_obj.sg * p_guess)/(gas_pvt_guess['z'].iloc[0]*10.73*(temperature_profile[i]+460))\n mu_gas = gas_pvt_guess['mug'].iloc[0]\n z = gas_pvt_guess['z'].iloc[0]\n free_gas = gas_rate - (oil_pvt_guess['rs'].iloc[0]*oil_rate*1e-3)\n free_gas = 0 if free_gas < 0 else free_gas\n \n glr_ratio = free_gas*1e3 / liquid_rate\n if glr_ratio > 10:\n if method == 'hagedorn_brown':\n grad_new = hb_correlation(\n pressure=p_guess,\n temperature=temperature_profile[i],\n liquid_rate = liquid_rate,\n gas_rate = free_gas,\n ten_liquid = ten_liquid,\n rho_liquid = rho_liquid,\n rho_gas = rho_gas,\n mu_liquid = mu_liquid,\n mu_gas = mu_gas,\n z = z,\n di = di[i],\n epsilon = epsilon,\n )\n #elif method == 'beggs_brill':\n # grad_new = bb_correlation()\n elif method == 'gray':\n grad_new = gray_correlation(\n pressure=p_guess, #Pressure [psi]\n temperature=temperature_profile[i], #Temperature [F]\n liquid_rate=liquid_rate, # Liquid Flow [bbl/d]\n gas_rate=free_gas, # gas flow [kscfd]\n ten_liquid=ten_liquid, #Surface tension dyne/cm2\n rho_liquid=rho_liquid, # density lb/ft3\n rho_gas=rho_gas, # density lb/ft3\n mu_liquid=mu_liquid, # Viscosity [cp]\n mu_gas=mu_gas, # Viscosity [cp]\n z=z, # Gas compressibility Factor\n di=di[i], # Diameter,\n epsilon = epsilon\n )\n else:\n df, _ = one_phase_pressure_profile(\n p1=p_guess,\n ge=rho_liquid /62.4,\n epsilon=epsilon,\n md=[depth[i], depth[i-1]],\n tvd=[depth[i], depth[i-1]],\n d = [di[i], di[i]],\n rate = liquid_rate,\n mu=mu_liquid\n )\n\n grad_new = df['gradient'].iloc[-1]\n \n err = abs(grad_guess-grad_new)/grad_new\n grad_guess = grad_new\n it += 1\n\n pressure_gradient[i] = grad_new \n pressure_profile[i] = p_guess\n free_gas_rate[i] = free_gas\n glr[i] = glr_ratio\n iterations[i] = it\n\n df_dict = {\n 'pressure':pressure_profile,\n 'pressure_gradient': pressure_gradient,\n 'free_gas_rate': free_gas_rate,\n 'temperature': temperature_profile,\n 'iterations': iterations,\n 'grl': glr\n }\n\n df = pd.DataFrame(df_dict, index = depth)\n pwf = pressure_profile[-1]\n\n return df, pwf\n\ndef two_phase_upward_pressure(\n depth = None,\n pwf = None,\n liquid_rate = None,\n oil_rate = None,\n gas_rate = None,\n glr = None,\n gor = None,\n bsw = None,\n oil_obj = None,\n gas_obj = None,\n water_obj = None, \n epsilon=0.0006, \n surface_temperature=80, \n temperature_gradient=1, \n di=2.99, \n tol=0.02,\n max_iter = 20,\n method = 'hagedorn_brown',\n guess=None,\n grad_guess = [0.41,0.38]\n):\n\n if guess is None:\n grad = np.atleast_1d(grad_guess)\n delta_h = np.abs(depth[-1] - depth[0])\n guess = pwf - grad * delta_h\n else:\n assert isinstance(guess,(list,np.ndarray))\n guess = np.atleast_1d(guess)\n\n def solve(x):\n _,_pwf = two_phase_pressure_profile(\n depth = depth,\n thp = x,\n liquid_rate = liquid_rate,\n oil_rate = oil_rate,\n gas_rate = gas_rate,\n glr = glr,\n gor = gor,\n bsw = bsw,\n oil_obj = oil_obj,\n gas_obj = gas_obj,\n water_obj = water_obj, \n epsilon=epsilon, \n surface_temperature=surface_temperature,\n temperature_gradient=temperature_gradient, \n di=di, \n tol=tol,\n max_iter = max_iter,\n method = method\n )\n\n return pwf - _pwf\n\n sol = root_scalar(solve, x0=guess[0],x1=guess[1])\n\n return sol.root\n\n\ndef two_phase_outflow_curve(\n depth = None,\n thp = None,\n liquid_rate = None,\n oil_rate = None,\n gas_rate = None,\n glr = None,\n gor = None,\n bsw = None,\n oil_obj = None,\n gas_obj = None,\n water_obj = None, \n epsilon=0.0006, \n surface_temperature=80, \n temperature_gradient=1, \n di=2.99, \n tol=0.02,\n max_iter = 20,\n method = 'hagedorn_brown',\n use_gas = False,\n operating_point = None,\n op_n = 30\n):\n\n # Assert the right types and shapes for input\n assert isinstance(depth, (np.ndarray,pd.Series,list))\n depth = np.atleast_1d(depth)\n assert depth.ndim == 1\n\n assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n thp = np.atleast_1d(thp)\n assert thp.ndim == 1\n\n if oil_rate is not None:\n assert isinstance(oil_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n oil_rate = np.atleast_1d(oil_rate)\n assert oil_rate.ndim == 1\n\n if liquid_rate is not None:\n assert isinstance(liquid_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n liquid_rate = np.atleast_1d(liquid_rate)\n assert liquid_rate.ndim == 1\n\n assert any([oil_rate is not None,liquid_rate is not None])\n\n if gas_rate is not None:\n assert isinstance(gas_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n gas_rate = np.atleast_1d(gas_rate)\n assert gas_rate.ndim == 1\n\n if gor is not None:\n assert isinstance(gor, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n gor = np.atleast_1d(gor)\n assert gor.ndim == 1\n\n if glr is not None:\n assert isinstance(glr, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n glr = np.atleast_1d(glr)\n assert glr.ndim == 1\n\n assert any([gas_rate is not None,gor is not None,glr is not None])\n\n assert isinstance(bsw, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n bsw = np.atleast_1d(bsw)\n assert bsw.ndim == 1\n\n assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None\n assert isinstance(oil_obj,Oil) and oil_obj.pvt is not None\n assert isinstance(water_obj,Water) and water_obj.pvt is not None\n\n if isinstance(di,(np.ndarray,list)):\n di = np.atleast_2d(di)\n assert di.shape[0] == depth.shape[0]\n elif isinstance(di,(int,float)):\n di = np.full((depth.shape[0],1),di)\n\n assert isinstance(epsilon, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n epsilon = np.atleast_1d(epsilon)\n assert epsilon.shape == (1,)\n\n assert isinstance(surface_temperature,(int,float,np.ndarray))\n surface_temperature = np.atleast_1d(surface_temperature)\n\n assert isinstance(temperature_gradient,(int,float,np.ndarray))\n temperature_gradient = np.atleast_1d(temperature_gradient)\n\n if operating_point is not None:\n if use_gas:\n assert isinstance(operating_point,GasInflow)\n else:\n assert isinstance(operating_point,OilInflow)\n\n #Start\n if liquid_rate is None:\n liquid_rate = np.zeros(len(oil_rate)*len(bsw))\n c = 0\n for o in oil_rate:\n for b in bsw:\n liquid_rate[c] = o / (1-b)\n c += 1\n else:\n oil_rate = np.zeros(len(liquid_rate)*len(bsw))\n c = 0\n for l in liquid_rate:\n for b in bsw:\n oil_rate[c] = l * (1 - b)\n c += 1\n\n if gas_rate is None:\n assert use_gas == 'False'\n if gor is None:\n gas_arr = glr\n gas_name = 'glr'\n else:\n gas_arr = gor\n gas_name = 'gor'\n else:\n gas_arr = gas_rate \n gas_name = 'gas_rate'\n\n #Estimate number of columns for 2d matrix\n number_columns = len(bsw)*len(liquid_rate)*len(thp)*di.shape[1] if use_gas else len(bsw)*len(gas_arr)*len(thp)*di.shape[1]\n\n #Create matrix for results\n #pwf = np.zeros((len(gas_arr),number_columns)) if use_gas else np.zeros((len(liquid_rate),number_columns))\n pwf = np.zeros(len(gas_arr)*number_columns) if use_gas else np.zeros(len(liquid_rate)*number_columns)\n bsw_arr = np.zeros(pwf.shape)\n liquid_arr = np.zeros(pwf.shape)\n gas_ = np.zeros(pwf.shape)\n thp_arr = np.zeros(pwf.shape)\n di_arr = np.zeros(pwf.shape)\n\n name_list = []\n i= 0\n\n c = 0\n for b in bsw:\n for l in liquid_rate:\n for pi in thp:\n for d in range(di.shape[1]):\n for g in gas_arr:\n _,pwf[i] = two_phase_pressure_profile(\n depth = depth,\n thp = pi,\n liquid_rate = l,\n oil_rate = None,\n gas_rate = g if gas_rate is not None else None,\n glr = g if glr is not None else None,\n gor = g if gor is not None else None,\n bsw = b,\n oil_obj = oil_obj,\n gas_obj = gas_obj,\n water_obj = water_obj, \n epsilon=epsilon, \n surface_temperature=surface_temperature,\n temperature_gradient=temperature_gradient, \n di=di[:,d], \n tol=tol,\n max_iter = max_iter,\n method = method\n )\n bsw_arr[i] = b\n gas_[i] = g\n liquid_arr[i] = l\n thp_arr[i] = pi\n di_arr[i] = di[:,d].mean()\n i += 1\n c += 1 \n case_name = f\"bsw_{b} liquid_{l} thp_{pi} di_{np.round(di[:,d].mean(),decimals=2)}\"\n name_list.append(case_name)\n\n if use_gas:\n arr=np.column_stack((pwf,bsw_arr,liquid_arr,thp_arr,di_arr))\n df = pd.DataFrame(arr,columns=['pwf','bsw','liquid','thp','di'],index=gas_)\n df.index.name = 'gas'\n else:\n arr=np.column_stack((pwf,bsw_arr,gas_,thp_arr,di_arr))\n df = pd.DataFrame(arr,columns=['pwf','bsw','liquid','thp','di'],index=liquid_arr)\n df.index.name = 'liquid'\n df['case'] = name_list\n\n op = pd.DataFrame()\n if operating_point is not None:\n inflow = operating_point.df\n\n for case in df['case'].unique():\n df_case = df[df['case']==case]\n\n points, idx = intercept_curves(inflow['q'],inflow['p'],df_case.index,df_case['pwf'], n=op_n)\n\n points_df = pd.DataFrame(points[[-1],:], columns=['q','p'])\n points_df['case'] = case\n points_df['idx'] = idx\n\n op = op.append(points_df)\n \n op = op.merge(df.groupby('case').mean(), left_on='case', right_on='case')\n\n return df, op\n \n\n\n", "import numpy as np\nimport pandas as pd \nfrom .grid import Grid\nfrom ..pvtpy.black_oil import Oil, Gas, Water\nfrom ..krpy import KrWaterOil, KrGasOil\nfrom ..wellpy.path import WellsGroup\nfrom .numerical import Numerical\nfrom .results import Results\nfrom .initial_conditions import InitialConditions\n \nclass SimModel:\n\n def __init__(self,**kwargs):\n\n #Grid. Includes petrophysical properties\n self.grid = kwargs.pop('grid',None)\n\n #Number of phases to simulate\n self.phase = kwargs.pop('phase',None)\n\n #pvt\n self.pvt = kwargs.pop('pvt',None)\n\n # kr and pc\n self.rock_fluid = kwargs.pop('rock_fluid', None)\n\n #Wells\n self.wells = kwargs.pop('wells',None)\n\n #Numerical\n self.numerical = kwargs.pop('numerical',None)\n\n #Initial conditions\n self.initial_conditions = kwargs.pop('initial_conditions', None)\n\n #Results\n self.results = kwargs.pop('results', None)\n\n ## Properties\n\n @property\n def grid(self):\n return self._grid\n\n @grid.setter \n def grid(self,value):\n assert isinstance(value,grid), f\"{type(value)} not allowed\"\n assert all( i in list(value.petrophysics.keys()) for i in ['PORO','PERMX','PERMY','PERMZ','RT'])\n self._grid = value\n\n @property \n def phase(self):\n return self._phase\n\n @phase.setter\n def phase(self,value):\n phases = ['oil','water','gas']\n assert isinstance(value,list) and len(value) <= 3\n assert all(i in phases for i in value)\n self._phase = value\n\n @property \n def pvt(self):\n return self._pvt \n \n @pvt.setter\n def pvt(self,value):\n assert isinstance(value,dict)\n \n for i in value:\n assert i in self.phase\n assert isinstance(value[i], (Oil,Gas,Water))\n self._pvt = value\n\n @property \n def rock_fluid(self):\n return self._rock_fluid\n\n @rock_fluid.setter\n def rock_fluid(self,value):\n \n if len(self.phase) == 1:\n self._rock_fluid = None\n else:\n assert isinstance(value,dict)\n rt_list = np.unique(self.grid.petrophysics['RT']).tolist()\n\n #Assert all rock types are present in rock fluid\n assert all(str(i) in list(value.keys()) for i in rt_list)\n\n for i in value:\n #Assert the keys for each rocktype are dictionary\n assert isinstance(value[i], dict)\n\n if len(self.phase) == 2: \n assert len(list(value[i].keys())) == 1\n else: \n assert len(list(value[i].keys())) == 2\n\n for j in value[i]:\n assert j in ['krwo','krgo']\n assert isinstance(value[i][j],(KrWaterOil,KrGasOil))\n\n self._rock_fluid = value\n\n \n @property\n def wells(self):\n return self._wells \n \n @wells.setter\n def wells(self,value):\n if value is not None:\n assert isinstance(value, wells_group)\n for w in value.wells:\n assert value.wells[w].perforations is not None\n assert value.wells[w].constrains is not None\n self._wells = value\n\n @property\n def numerical(self):\n return self._numerical\n \n @numerical.setter\n def numerical(self,value):\n assert isinstance(value, numerical)\n self._numerical = value\n\n @property\n def initial_conditions(self):\n return self._initial_conditions \n\n @initial_conditions.setter\n def initial_conditions(self,value):\n assert isinstance(value,initial_conditions)\n self._initial_conditions = value\n \n @property\n def results(self):\n return self._results \n\n @results.setter\n def results(self,value):\n if value is not None:\n assert isinstance(value,results)\n self._results = value\n\n" ]
[ [ "numpy.array", "matplotlib.pyplot.gca", "numpy.log", "numpy.radians", "numpy.abs", "numpy.linspace", "numpy.power", "numpy.sqrt", "pandas.DataFrame", "numpy.full", "numpy.atleast_1d", "numpy.atleast_2d", "numpy.sin", "numpy.sign", "numpy.column_stack", "scipy.optimize.root_scalar", "numpy.exp", "numpy.zeros" ], [ "numpy.unique" ] ]
pyrateml/agent
[ "84235db931d6e4ef956962961c619994898ebdd5" ]
[ "utilities/curriculum/InitialStateDistribution.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'cnheider'\nfrom collections import namedtuple\n\nimport numpy as np\n\n\nclass InitStateDistribution(object):\n StateDist = namedtuple('StateDist', ('state', 'prob'))\n\n def __init__(self):\n self.state_tuples = []\n\n def add(self, state, prob):\n self.state_tuples.append(self.StateDist(state, prob))\n\n def sample(self):\n sds = self.StateDist(*zip(*self.state_tuples))\n return np.random.choice(sds.state, p=sds.prob)\n" ]
[ [ "numpy.random.choice" ] ]
conorfalvey/Python-MultilayerExtraction
[ "68cfe9a82c45d52f36c5588e2bce83a5fc8400bb" ]
[ "python/test_init.py" ]
[ "# Testing Setup of Multilayer Extraction\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nimport math\nimport itertools as it\nfrom . import adjacency_to_edgelist\nfrom . import expectation_CM\nfrom . import initialization\nfrom . import score\nimport matplotlib.pyplot as plt\n\n# Gen default testing graph\ng1 = nx.planted_partition_graph(5, 25, 0.5, 0.05)\ngraph = nx.generators.complete_graph(9)\n# Gen adjacency matrix for complete graph\nadjacency = [[[0, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 0, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 0]]]\nprint(adjacency)\n\n# Gen edgelist from adjacency matrix\nedgelist = adjacency_to_edgelist.adjacency_to_edgelist(adjacency)\n\nprint(edgelist)\n\n# Gen Expectation.CM from edgelist\nexpectation_CM = expectation_CM.expectation_CM(edgelist)\n\nnx.draw(expectation_CM[0])\nplt.show()\n\n# Gen initialization outputs\ninitial = initialization.initialization(graph, 0.05, 1, 9)\n\nprint(initial)\n\n# Gen score\n# test_score = score.score(adjacency, initial['vertex_set'], initial['layer_set'], 9)\n\nn = 9\nvertex_set = initial['vertex_set']\nlayer_set = initial['layer_set']\nadjacency_score = expectation_CM\nsuper_mod = None\nif len(layer_set) < 1 or len(vertex_set) < 1:\n print(0)\nif len(layer_set) == 1:\n super_mod = adjacency_score[layer_set[0][0]]\nif len(layer_set) > 1:\n super_mod = nx.empty_graph(n)\n for i in range(0, layer_set):\n super_mod = nx.union(super_mod, adjacency_score[i])\n\nsuper_mod_subgraph = super_mod.subgraph(map(int, vertex_set[0]))\n\nedge_weight_tuples = nx.get_edge_attributes(super_mod_subgraph, 'weight')\nedge_weights = pd.DataFrame({'edge': list(edge_weight_tuples.keys()), 'weight': list(edge_weight_tuples.values())})\nfor _, weights in edge_weights.iterrows():\n if math.isnan(weights['weight']):\n weights['weight'] = 0\n \nmodularity_score = np.sum(edge_weights['weight'])\nmodularity_score = [0 for i in modularity_score if i < 0]\n\ntot_mod = np.sum(modularity_score)\nobs_score = (tot_mod ** 2) / ((n ** 2 * it.combinations(range(0, len(vertex_set)), 2)) * (len(layer_set)))\n\nprint(obs_score)\n\nprint(score.score(edgelist, vertex_set, layer_set, n))\n" ]
[ [ "matplotlib.pyplot.show", "numpy.sum" ] ]
allanwright/media-classifier-core
[ "7d86c0bc4a9361d36da0f6eaf62f2faa257c2339" ]
[ "src/mccore/prediction.py" ]
[ "'''Helper methods for making classification predictions.\n\n'''\n\nimport numpy as np\n\ndef get_class(proba, labels):\n '''Gets the class label from the specified class probability estimates.\n\n Args:\n proba (array like): The estimated class probability estimates.\n labels (dictionary): The label dictionary.\n\n Returns:\n class (object): The class label and associated probability estimate.\n '''\n label_id = np.argmax(proba)\n return {\n \"label\": {\n \"id\": int(label_id),\n \"name\": labels[str(label_id)]\n },\n \"probability\": float(np.max(proba))\n }\n" ]
[ [ "numpy.max", "numpy.argmax" ] ]
rkalahasty/medicaltorch
[ "34ea15075a57271940d26684c34767a8a9e8fb58" ]
[ "medicaltorch/metrics.py" ]
[ "from collections import defaultdict\n\nfrom scipy import spatial\nimport numpy as np\n\n\nclass MetricManager(object):\n def __init__(self, metric_fns):\n self.metric_fns = metric_fns\n self.result_dict = defaultdict(float)\n self.num_samples = 0 \n \n def __call__(self, prediction, ground_truth):\n self.num_samples += len(prediction)\n for metric_fn in self.metric_fns:\n for p, gt in zip(prediction, ground_truth):\n res = metric_fn(p, gt)\n dict_key = metric_fn.__name__\n self.result_dict[dict_key] += res\n \n def get_results(self):\n res_dict = {}\n for key, val in self.result_dict.items():\n res_dict[key] = val / self.num_samples\n return res_dict\n \n def reset(self):\n self.num_samples = 0\n self.result_dict = defaultdict(float)\n \n\ndef numeric_score(prediction, groundtruth):\n \"\"\"Computation of statistical numerical scores:\n\n * FP = False Positives\n * FN = False Negatives\n * TP = True Positives\n * TN = True Negatives\n\n return: tuple (FP, FN, TP, TN)\n \"\"\"\n FP = np.float32(np.sum((prediction == 1) & (groundtruth == 0)))\n FN = np.float32(np.sum((prediction == 0) & (groundtruth == 1)))\n TP = np.float32(np.sum((prediction == 1) & (groundtruth == 1)))\n TN = np.float32(np.sum((prediction == 0) & (groundtruth == 0)))\n return FP, FN, TP, TN\n\n\ndef dice_score(prediction, groundtruth):\n pflat = prediction.flatten()\n gflat = groundtruth.flatten()\n d = (1 - spatial.distance.dice(pflat, gflat)) * 100.0\n if np.isnan(d):\n return 0.0\n return d\n\n\ndef jaccard_score(prediction, groundtruth):\n pflat = prediction.flatten()\n gflat = groundtruth.flatten()\n return (1 - spatial.distance.jaccard(pflat, gflat)) * 100.0\n\n\ndef hausdorff_score(prediction, groundtruth):\n return spatial.distance.directed_hausdorff(prediction, groundtruth)[0]\n\n\ndef precision_score(prediction, groundtruth):\n # PPV\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FP) <= 0.0:\n return 0.0\n\n precision = np.divide(TP, TP + FP)\n return precision * 100.0\n\n\ndef recall_score(prediction, groundtruth):\n # TPR, sensitivity\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FN) <= 0.0:\n return 0.0\n TPR = np.divide(TP, TP + FN)\n return TPR * 100.0\n\n\ndef specificity_score(prediction, groundtruth):\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TN + FP) <= 0.0:\n return 0.0\n TNR = np.divide(TN, TN + FP)\n return TNR * 100.0\n\n\ndef intersection_over_union(prediction, groundtruth):\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FP + FN) <= 0.0:\n return 0.0\n return TP / (TP + FP + FN) * 100.0\n\n\ndef accuracy_score(prediction, groundtruth):\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n N = FP + FN + TP + TN\n accuracy = np.divide(TP + TN, N)\n return accuracy * 100.0\n" ]
[ [ "scipy.spatial.distance.jaccard", "scipy.spatial.distance.dice", "numpy.isnan", "scipy.spatial.distance.directed_hausdorff", "numpy.sum", "numpy.divide" ] ]
dertilo/espnet
[ "4d2414b3d56154ab8c6ded0eb0a3f076e073344b" ]
[ "tools/check_install.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"Script to check whether the installation is done correctly.\"\"\"\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nimport importlib\nimport logging\nimport sys\nimport traceback\n\nfrom distutils.version import LooseVersion\n\n\n# NOTE: add the libraries which are not included in setup.py\nMANUALLY_INSTALLED_LIBRARIES = [\n (\"espnet\", None),\n (\"kaldiio\", None),\n (\"matplotlib\", None),\n (\"chainer\", (\"6.0.0\")),\n (\"chainer_ctc\", None),\n (\"warprnnt_pytorch\", (\"0.1\")),\n]\n\n# NOTE: list all torch versions which are compatible with espnet\nCOMPATIBLE_TORCH_VERSIONS = (\n \"0.4.1\",\n \"1.0.0\",\n \"1.0.1\",\n \"1.0.1.post2\",\n \"1.1.0\",\n \"1.2.0\",\n \"1.3.0\",\n \"1.3.1\",\n \"1.4.0\",\n \"1.5.0\",\n \"1.5.1\",\n \"1.6.0\",\n)\n\n\ndef main(args):\n \"\"\"Check the installation.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--no-cuda\",\n action=\"store_true\",\n default=False,\n help=\"Disable cuda-related tests\",\n )\n parser.add_argument(\n \"--no-cupy\",\n action=\"store_true\",\n default=False,\n help=\"Disable cupy test\",\n )\n args = parser.parse_args(args)\n\n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n logging.info(f\"python version = {sys.version}\")\n\n library_list = []\n if args.no_cuda:\n args.no_cupy = True\n\n if not args.no_cupy:\n library_list.append((\"cupy\", (\"6.0.0\")))\n\n # check torch installation at first\n try:\n import torch\n\n logging.info(f\"pytorch version = {torch.__version__}\")\n if torch.__version__ not in COMPATIBLE_TORCH_VERSIONS:\n logging.warning(f\"{torch.__version__} is not tested. please be careful.\")\n except ImportError:\n logging.warning(\"torch is not installed.\")\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # warpctc can be installed only for pytorch < 1.4\n if LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\"):\n library_list.append((\"warpctc_pytorch\", (\"0.1.1\", \"0.1.2\", \"0.1.3\")))\n\n library_list.extend(MANUALLY_INSTALLED_LIBRARIES)\n\n # check library availableness\n logging.info(\"library availableness check start.\")\n logging.info(\"# libraries to be checked = %d\" % len(library_list))\n is_correct_installed_list = []\n for idx, (name, version) in enumerate(library_list):\n try:\n importlib.import_module(name)\n logging.info(\"--> %s is installed.\" % name)\n is_correct_installed_list.append(True)\n except ImportError:\n logging.warning(\"--> %s is not installed.\\n###### Raw Error ######\\n%s#######################\" % (name, traceback.format_exc()))\n is_correct_installed_list.append(False)\n logging.info(\"library availableness check done.\")\n logging.info(\n \"%d / %d libraries are correctly installed.\"\n % (sum(is_correct_installed_list), len(library_list))\n )\n\n if len(library_list) != sum(is_correct_installed_list):\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check library version\n num_version_specified = sum(\n [True if v is not None else False for n, v in library_list]\n )\n logging.info(\"library version check start.\")\n logging.info(\"# libraries to be checked = %d\" % num_version_specified)\n is_correct_version_list = []\n for idx, (name, version) in enumerate(library_list):\n if version is not None:\n # Note: temp. fix for warprnnt_pytorch\n # not found version with importlib\n if name == \"warprnnt_pytorch\":\n import pkg_resources\n\n vers = pkg_resources.get_distribution(name).version\n else:\n vers = importlib.import_module(name).__version__\n if vers is not None:\n is_correct = vers in version\n if is_correct:\n logging.info(\"--> %s version is matched (%s).\" % (name, vers))\n is_correct_version_list.append(True)\n else:\n logging.warning(\n \"--> %s version is incorrect (%s is not in %s).\"\n % (name, vers, str(version))\n )\n is_correct_version_list.append(False)\n else:\n logging.info(\n \"--> %s has no version info, but version is specified.\" % name\n )\n logging.info(\"--> maybe it is better to reinstall the latest version.\")\n is_correct_version_list.append(False)\n logging.info(\"library version check done.\")\n logging.info(\n \"%d / %d libraries are correct version.\"\n % (sum(is_correct_version_list), num_version_specified)\n )\n\n if sum(is_correct_version_list) != num_version_specified:\n logging.info(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check cuda availableness\n if args.no_cuda:\n logging.info(\"cuda availableness check skipped.\")\n else:\n logging.info(\"cuda availableness check start.\")\n import chainer\n import torch\n\n try:\n assert torch.cuda.is_available()\n logging.info(\"--> cuda is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in torch.\")\n try:\n assert torch.backends.cudnn.is_available()\n logging.info(\"--> cudnn is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in torch.\")\n try:\n assert chainer.backends.cuda.available\n logging.info(\"--> cuda is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in chainer.\")\n try:\n assert chainer.backends.cuda.cudnn_enabled\n logging.info(\"--> cudnn is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in chainer.\")\n try:\n from cupy.cuda import nccl # NOQA\n\n logging.info(\"--> nccl is installed.\")\n except ImportError:\n logging.warning(\n \"--> it seems that nccl is not installed. multi-gpu is not enabled.\"\n )\n logging.warning(\n \"--> if you want to use multi-gpu, please install it and then re-setup.\"\n )\n try:\n assert torch.cuda.device_count() > 1\n logging.info(\n f\"--> multi-gpu is available (#gpus={torch.cuda.device_count()}).\"\n )\n except AssertionError:\n logging.warning(\"--> it seems that only single gpu is available.\")\n logging.warning(\"--> maybe your machine has only one gpu.\")\n logging.info(\"cuda availableness check done.\")\n\n logging.info(\"installation check is done.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" ]
[ [ "torch.cuda.device_count", "torch.backends.cudnn.is_available", "torch.cuda.is_available" ] ]
HubBucket-Team/lingvo
[ "fb929def2f27cf73a6ee1b1eaa8bee982bd92987", "fb929def2f27cf73a6ee1b1eaa8bee982bd92987" ]
[ "lingvo/core/base_model_test.py", "lingvo/core/ops/tokenizer_ops_test.py" ]
[ "# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for base_model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport lingvo.compat as tf\nfrom lingvo.core import base_decoder\nfrom lingvo.core import base_input_generator\nfrom lingvo.core import base_layer\nfrom lingvo.core import base_model\nfrom lingvo.core import base_model_params\nfrom lingvo.core import hyperparams\nfrom lingvo.core import layers\nfrom lingvo.core import learner\nfrom lingvo.core import py_utils\nfrom lingvo.core import task_scheduler\nfrom lingvo.core import test_utils\nimport numpy as np\nimport six\nfrom six.moves import range\n\n\nFLAGS = tf.flags.FLAGS\n\n_NUMPY_RANDOM_SEED = 9885784\n\n\nclass BaseTaskTest(test_utils.TestCase):\n\n def testStatsCounter(self):\n with self.session() as sess:\n foo = base_model.StatsCounter('foo')\n val = foo.Value()\n params = base_layer.BaseLayer.Params()\n inc = foo.IncBy(params, 100)\n\n tf.global_variables_initializer().run()\n self.assertAllEqual(0, val.eval())\n self.assertAllEqual(100, sess.run(inc))\n self.assertAllEqual(100, val.eval())\n self.assertAllEqual([100, 200], sess.run([val, inc]))\n self.assertAllEqual([200, 300], sess.run([val, inc]))\n\n @classmethod\n def TestParams(cls):\n p = base_model.BaseTask.Params()\n p.name = 'base_mdl'\n p.encoder = base_layer.BaseLayer.Params()\n p.encoder.name = 'encoder'\n p.decoder = base_decoder.BaseDecoder.Params()\n p.decoder.name = 'decoder'\n return p\n\n def testInit(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n _ = p.Instantiate()\n\n def testScaleGradients(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n var_grads = py_utils.NestedMap(a=(var_a, tf.ones_like(var_a)))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n FLAGS.enable_check_numerics = False\n with self.session():\n tf.global_variables_initializer().run()\n self.assertFalse(scaled_grads_map.has_nan_or_inf.eval())\n self.assertEqual(1.0, scaled_grads_map.grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(scaled_grads_map.final_var_grads.a[1]).eval())\n self.assertTrue(\n tf.is_finite(scaled_grads_map.final_var_grads.a[1]).eval())\n\n def testScaleGradientsInf(self):\n FLAGS.enable_check_numerics = False\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Infinite gradient.\n var_grads = py_utils.NestedMap(a=(var_a, tf.log(0.)))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n self.assertTrue(scaled_grads_map.has_nan_or_inf.eval())\n self.assertEqual(0., scaled_grads_map.grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(scaled_grads_map.final_var_grads.a[1]).eval())\n self.assertTrue(\n tf.is_finite(scaled_grads_map.final_var_grads.a[1]).eval())\n\n def testScaleGradientsNaN(self):\n FLAGS.enable_check_numerics = False\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Make a NaN gradient.\n var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n self.assertTrue(scaled_grads_map.has_nan_or_inf.eval())\n self.assertEqual(0., scaled_grads_map.grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(scaled_grads_map.final_var_grads.a[1]).eval())\n self.assertTrue(\n tf.is_finite(scaled_grads_map.final_var_grads.a[1]).eval())\n\n def testScaleGradientsCheckNumerics(self):\n \"\"\"ScaleGradients when enable_check_numerics=True.\"\"\"\n FLAGS.enable_check_numerics = True\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Make a NaN gradient.\n var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n 'is not finite'):\n self.assertTrue(scaled_grads_map.has_nan_or_inf.eval())\n self.assertEqual(0., scaled_grads_map.grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(\n tf.is_nan(scaled_grads_map.final_var_grads.a[1]).eval())\n self.assertTrue(\n tf.is_finite(scaled_grads_map.final_var_grads.a[1]).eval())\n\n def testScaleGradientsError(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n p.train.clip_gradient_single_norm_to_value = 1.0\n p.train.clip_gradient_norm_to_value = 1.0\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n var_grads = py_utils.NestedMap(a=(var_a, tf.ones_like(var_a)))\n self.assertRaises(ValueError, task.learners[0].ScaleGradients, var_grads)\n\n def testScaleGradientsSingleTensorNorm(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n p.train.clip_gradient_single_norm_to_value = 1.0\n p.train.clip_gradient_norm_to_value = None\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n task.CreateVariable(\n 'b',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n\n var_a = task.theta.a\n var_b = task.theta.b\n var_grads = py_utils.NestedMap(\n a=(var_a, tf.ones_like(var_a) * 10.0),\n b=(var_b, tf.ones_like(var_b) * 0.5))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n FLAGS.enable_check_numerics = False\n with self.session():\n tf.global_variables_initializer().run()\n\n # Each variable is clipped indipendently to grad scale of 1.\n self.assertAllClose(scaled_grads_map.final_var_grads.a[1].eval(), 1.0)\n self.assertAllClose(scaled_grads_map.final_var_grads.b[1].eval(), 0.5)\n\n\nclass TeacherTask(base_model.BaseTask):\n\n @base_layer.initializer\n def __init__(self, params):\n super(TeacherTask, self).__init__(params)\n p = self.params\n with tf.variable_scope(p.name):\n self.CreateVariable('x',\n py_utils.WeightParams(\n shape=[], init=py_utils.WeightInit.Constant(0)))\n\n def ComputePredictions(self, theta, input_batch):\n return theta.x\n\n\nclass StudentTask(base_model.BaseTask):\n\n @base_layer.initializer\n def __init__(self, params):\n super(StudentTask, self).__init__(params)\n p = self.params\n with tf.variable_scope(p.name):\n self.CreateVariable('x',\n py_utils.WeightParams(\n shape=[], init=py_utils.WeightInit.Uniform()))\n\n def ComputePredictions(self, theta, input_batch):\n return theta.x\n\n\nclass TestInputGenerator(base_input_generator.BaseSequenceInputGenerator):\n\n def __init__(self, params):\n super(TestInputGenerator, self).__init__(params)\n self._input_batch_size = tf.constant(1)\n\n def InputBatch(self):\n return 0\n\n\nclass DistillationTestTask(base_model.DistillationTask):\n\n @classmethod\n def Params(cls):\n p = super(DistillationTestTask, cls).Params()\n p.name = 'distillation_test'\n p.teacher = TeacherTask.Params()\n p.student = StudentTask.Params()\n p.input = TestInputGenerator.Params()\n p.train.learning_rate = 1e3\n p.teacher.train = None\n p.teacher.eval = None\n p.student.train = None\n p.student.eval = None\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n super(DistillationTestTask, self).__init__(params)\n\n def ComputeLoss(self, theta, predictions, input_batch):\n return {'loss': (predictions.teacher - predictions.student, 1)}, {}\n\n\nclass DistillationTaskTest(test_utils.TestCase):\n\n def testFProp(self):\n p = DistillationTestTask.Params()\n task = p.Instantiate()\n self.assertFalse(task.params.is_eval)\n self.assertFalse(task.teacher.params.is_eval)\n self.assertIsNotNone(task.teacher.params.input)\n self.assertFalse(task.student.params.is_eval)\n self.assertIsNotNone(task.student.params.input)\n metrics = task.FPropDefaultTheta()[0]\n self.assertItemsEqual(['loss', 'num_samples_in_batch'],\n list(metrics.keys()))\n task.BProp()\n # Expected side effects of BProp().\n self.assertIsNotNone(task.train_op)\n self.assertIsNotNone(task.total_examples)\n\n with self.session() as sess:\n tf.global_variables_initializer().run()\n\n variables = {}\n values_before_training = {}\n values_after_training = {}\n for child in ('teacher', 'student'):\n variables[child] = {\n k: v\n for k, v in getattr(task, child).vars.FlattenItems()\n }\n values_before_training[child] = sess.run(variables[child])\n\n # Train for a few steps.\n for _ in range(10):\n sess.run(task.train_op)\n\n for child in ('teacher', 'student'):\n values_after_training[child] = sess.run(variables[child])\n for k, v in six.iteritems(values_after_training[child]):\n print('Comparing variable %s' % k)\n if child == 'teacher':\n # Teacher vars should not change after training.\n self.assertAllEqual(values_before_training[child][k], v)\n else:\n # Student vars should change after training.\n self.assertNotAlmostEqual(values_before_training[child][k], v)\n\n\nclass SingleTaskModelTest(test_utils.TestCase):\n\n def testInit(self):\n p = base_model.SingleTaskModel.Params()\n p.task = BaseTaskTest.TestParams()\n p.task.train.learner = (learner.Learner.Params().Set(name='loss'))\n p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()\n model = p.Instantiate()\n self.assertEqual(model.params.name, model.GetTask().params.name)\n self.assertEqual(model.params.task, model.GetTask().params)\n self.assertEqual(len(model.tasks), 1)\n self.assertEqual(model.tasks[0], model.GetTask())\n self.assertEqual(model.tasks[0], model.SampleTask(None))\n\n def testExponentialMovingAverage(self):\n p = base_model.SingleTaskModel.Params()\n p.task = BaseTaskTest.TestParams()\n p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()\n p.train.ema_decay = 0.9\n model = p.Instantiate()\n model._task.CreateChild('a',\n layers.BatchNormLayer.Params().Set(name='a', dim=1))\n model._task._train_op = tf.no_op()\n model._task.ApplyExponentialMovingAverage(model.ema)\n with tf.variable_scope('', reuse=True):\n beta = tf.get_variable('a/beta/var')\n mean = tf.get_variable('a/moving_mean/var')\n self.assertIsNotNone(model.ema.average(beta))\n self.assertIsNone(model.ema.average(mean))\n\n\nclass MultiTaskModelTest(test_utils.TestCase):\n\n def testInit(self):\n p = base_model.MultiTaskModel.Params()\n p.name = 'MultiTaskModel'\n p0 = BaseTaskTest.TestParams()\n p0.train.learner = (learner.Learner.Params().Set(name='loss'))\n p1 = BaseTaskTest.TestParams()\n p1.train.learner = (learner.Learner.Params().Set(name='loss'))\n\n p.input = base_model_params.MultiTaskModelParams().Train()\n p.input.Define('a',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n p.input.Define('b',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n\n p.task_params = hyperparams.Params()\n p.task_params.Define('a', p0, '')\n p.task_params.Define('b', p1, '')\n\n p.task_probs = hyperparams.Params()\n p.task_probs.Define('a', 0.5, '')\n p.task_probs.Define('b', 0.5, '')\n\n model = p.Instantiate()\n self.assertEqual(len(model.tasks), 2)\n self.assertEqual(set(model.task_names), {'a', 'b'})\n self.assertEqual(set(model.tasks), {model.GetTask('a'), model.GetTask('b')})\n self.assertEqual(model.params.task_params.a, model.GetTask('a').params)\n self.assertEqual(model.params.task_params.b, model.GetTask('b').params)\n\n def _setUpTestSampleTask(self):\n np.random.seed(_NUMPY_RANDOM_SEED)\n\n # define and initialize tasks, model and params\n p = base_model.MultiTaskModel.Params()\n p.name = 'MultiTaskModel'\n p0 = BaseTaskTest.TestParams()\n p1 = BaseTaskTest.TestParams()\n\n p.input = base_model_params.MultiTaskModelParams().Train()\n p.input.Define('a',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n p.input.Define('b',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n\n p.task_params = hyperparams.Params()\n p.task_params.Define('a', p0, '')\n p.task_params.Define('b', p1, '')\n\n return p\n\n def _testSampleTaskHelper(self, p):\n model = p.Instantiate()\n\n task_to_id = {model.children['a']: 'a', model.children['b']: 'b'}\n task_counts = {'a': 0, 'b': 0}\n\n # initialize tensorflow graph and global step\n with self.session() as sess:\n tf.global_variables_initializer().run()\n global_step = sess.run(model.global_step)\n for _ in range(100):\n task = model.SampleTask(global_step)\n task_counts[task_to_id[task]] += 1\n\n self.assertEqual(task_counts['a'], 83)\n self.assertEqual(task_counts['b'], 17)\n\n def testSampleTaskSpecifiedWithoutScheduler(self):\n \"\"\"Expected distribution: 'a': 0.8 , 'b': 0.2.\"\"\"\n p = self._setUpTestSampleTask()\n\n p.task_probs = hyperparams.Params()\n p.task_probs.Define('a', 0.8, '')\n p.task_probs.Define('b', 0.2, '')\n\n self._testSampleTaskHelper(p)\n\n def testSampleTask(self):\n \"\"\"Expected distribution: 'a': 0.8 , 'b': 0.2.\"\"\"\n p = self._setUpTestSampleTask()\n\n p.task_schedule = task_scheduler.ConstantScheduler.Params()\n p.task_schedule.task_probs = [('a', 0.8), ('b', 0.2)]\n\n self._testSampleTaskHelper(p)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Lint as: python2, python3\n# -*- coding: utf-8 -*-\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tokenizer_ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom lingvo.core import ops\nfrom lingvo.core import test_helper\nfrom lingvo.core import test_utils\nimport tensorflow as tf\n\n\nclass TokenizerOpsTest(test_utils.TestCase):\n\n def testLabelsToTokenId(self):\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.ascii_to_token_id([\n 'hElLo', 'sIr<epsilon>', 'What a <unk> day', 'america\\'s',\n '<noise> early', '1:00 AM', '<text_only>morning'\n ],\n append_eos=True,\n maxlen=10))\n self.assertAllEqual(token_ids, [\n [1, 12, 9, 16, 16, 19, 2, 2, 2, 2],\n [1, 23, 13, 22, 73, 2, 2, 2, 2, 2],\n [1, 27, 12, 5, 24, 3, 5, 3, 0, 3],\n [1, 5, 17, 9, 22, 13, 7, 5, 32, 23],\n [1, 4, 3, 9, 5, 22, 16, 29, 2, 2],\n [1, 40, 34, 39, 39, 3, 5, 17, 2, 2],\n [1, 74, 17, 19, 22, 18, 13, 18, 11, 2],\n ])\n self.assertAllEqual(\n target_ids,\n [[12, 9, 16, 16, 19, 2, 2, 2, 2, 2], [23, 13, 22, 73, 2, 2, 2, 2, 2, 2],\n [27, 12, 5, 24, 3, 5, 3, 0, 3, 2], [5, 17, 9, 22, 13, 7, 5, 32, 23, 2],\n [4, 3, 9, 5, 22, 16, 29, 2, 2, 2], [40, 34, 39, 39, 3, 5, 17, 2, 2, 2],\n [74, 17, 19, 22, 18, 13, 18, 11, 2, 2]])\n self.assertAllEqual(\n paddings,\n [[0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])\n\n def testLabelsToTokenIdAppendEOSFalse(self):\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.ascii_to_token_id([\n 'hElLo', 'sIr<epsilon>', 'What a <unk> day', 'america\\'s',\n '<noise> early', '1:00 AM', '100%'\n ],\n append_eos=False,\n maxlen=10))\n self.assertAllEqual(\n token_ids,\n [[1, 12, 9, 16, 16, 19, 2, 2, 2, 2], [1, 23, 13, 22, 73, 2, 2, 2, 2, 2],\n [1, 27, 12, 5, 24, 3, 5, 3, 0, 3], [1, 5, 17, 9, 22, 13, 7, 5, 32, 23],\n [1, 4, 3, 9, 5, 22, 16, 29, 2, 2], [1, 40, 34, 39, 39, 3, 5, 17, 2, 2],\n [1, 40, 39, 39, 52, 2, 2, 2, 2, 2]])\n self.assertAllEqual(\n target_ids,\n [[12, 9, 16, 16, 19, 2, 2, 2, 2, 2], [23, 13, 22, 73, 2, 2, 2, 2, 2, 2],\n [27, 12, 5, 24, 3, 5, 3, 0, 3, 2], [5, 17, 9, 22, 13, 7, 5, 32, 23, 2],\n [4, 3, 9, 5, 22, 16, 29, 2, 2, 2], [40, 34, 39, 39, 3, 5, 17, 2, 2, 2],\n [40, 39, 39, 52, 2, 2, 2, 2, 2, 2]])\n self.assertAllEqual(\n paddings,\n [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1\n ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])\n\n def testLabelsToTokenIdNoPadToMaxlen(self):\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.ascii_to_token_id([\n 'hElLo', 'sIr<epsilon>', 'What a <unk> day', 'america\\'s',\n '<noise> early', '1:00 AM', '<text_only>morning'\n ],\n append_eos=True,\n maxlen=20,\n pad_to_maxlen=False))\n self.assertAllEqual(token_ids, [\n [1, 12, 9, 16, 16, 19, 2, 2, 2, 2, 2, 2, 2],\n [1, 23, 13, 22, 73, 2, 2, 2, 2, 2, 2, 2, 2],\n [1, 27, 12, 5, 24, 3, 5, 3, 0, 3, 8, 5, 29],\n [1, 5, 17, 9, 22, 13, 7, 5, 32, 23, 2, 2, 2],\n [1, 4, 3, 9, 5, 22, 16, 29, 2, 2, 2, 2, 2],\n [1, 40, 34, 39, 39, 3, 5, 17, 2, 2, 2, 2, 2],\n [1, 74, 17, 19, 22, 18, 13, 18, 11, 2, 2, 2, 2],\n ])\n self.assertAllEqual(target_ids, [\n [12, 9, 16, 16, 19, 2, 2, 2, 2, 2, 2, 2, 2],\n [23, 13, 22, 73, 2, 2, 2, 2, 2, 2, 2, 2, 2],\n [27, 12, 5, 24, 3, 5, 3, 0, 3, 8, 5, 29, 2],\n [5, 17, 9, 22, 13, 7, 5, 32, 23, 2, 2, 2, 2],\n [4, 3, 9, 5, 22, 16, 29, 2, 2, 2, 2, 2, 2],\n [40, 34, 39, 39, 3, 5, 17, 2, 2, 2, 2, 2, 2],\n [74, 17, 19, 22, 18, 13, 18, 11, 2, 2, 2, 2, 2],\n ])\n self.assertAllEqual(paddings, [\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],\n ])\n\n def testIdToToken(self):\n with self.session(use_gpu=False) as sess:\n token_ids = [[12, 9, 16, 16, 19, 2, 2, 2, 2,\n 2], [23, 13, 22, 73, 2, 2, 2, 2, 2,\n 2], [27, 12, 5, 24, 3, 5, 3, 0, 3,\n 2], [5, 17, 9, 22, 13, 7, 5, 32, 23, 2],\n [4, 3, 9, 5, 22, 16, 29, 2, 2,\n 2], [40, 34, 39, 39, 3, 5, 17, 2, 2,\n 2], [52, 2, 2, 2, 2, 2, 2, 2, 2, 2]]\n seq_lens = [5, 4, 9, 9, 7, 7, 1]\n tokens = sess.run(ops.id_to_ascii(token_ids, seq_lens))\n\n self.assertEqual(tokens.tolist(), [\n b'hello', b'sir<epsilon>', b'what a <unk> ', b\"america's\",\n b'<noise> early', b'1:00 am', b'%'\n ])\n\n def testStrToVocabToken(self):\n vocab = test_helper.test_src_dir_path('core/ops/testdata/test_vocab.txt')\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.str_to_vocab_tokens([\n 'a b c d e',\n '<epsilon> <S> </S> <UNK>',\n 'øut über ♣ 愤青 ←',\n ],\n append_eos=True,\n maxlen=10,\n vocab_filepath=vocab))\n self.assertEqual(\n token_ids.tolist(),\n [[1, 5, 6, 7, 8, 9, 2, 2, 2, 2], [1, 0, 1, 2, 3, 2, 2, 2, 2, 2],\n [1, 10, 11, 12, 13, 3, 2, 2, 2, 2]])\n self.assertEqual(\n target_ids.tolist(),\n [[5, 6, 7, 8, 9, 2, 2, 2, 2, 2], [0, 1, 2, 3, 2, 2, 2, 2, 2, 2],\n [10, 11, 12, 13, 3, 2, 2, 2, 2, 2]])\n self.assertEqual(paddings.tolist(),\n [[0., 0., 0., 0., 0., 0., 1., 1., 1., 1.], [\n 0., 0., 0., 0., 0., 1., 1., 1., 1., 1.\n ], [0., 0., 0., 0., 0., 0., 1., 1., 1., 1.]])\n\n def testStrToVocabTokenAppendEOSFalse(self):\n vocab = test_helper.test_src_dir_path('core/ops/testdata/test_vocab.txt')\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.str_to_vocab_tokens([\n 'a b c d e',\n '<epsilon> <S> </S> <UNK>',\n 'øut über ♣ 愤青 ←',\n ],\n append_eos=False,\n maxlen=10,\n vocab_filepath=vocab))\n self.assertEqual(\n token_ids.tolist(),\n [[1, 5, 6, 7, 8, 9, 2, 2, 2, 2], [1, 0, 1, 2, 3, 2, 2, 2, 2, 2],\n [1, 10, 11, 12, 13, 3, 2, 2, 2, 2]])\n self.assertEqual(\n target_ids.tolist(),\n [[5, 6, 7, 8, 9, 2, 2, 2, 2, 2], [0, 1, 2, 3, 2, 2, 2, 2, 2, 2],\n [10, 11, 12, 13, 3, 2, 2, 2, 2, 2]])\n self.assertEqual(paddings.tolist(),\n [[0., 0., 0., 0., 0., 1., 1., 1., 1., 1.], [\n 0., 0., 0., 0., 1., 1., 1., 1., 1., 1.\n ], [0., 0., 0., 0., 0., 1., 1., 1., 1., 1.]])\n\n def testStrToVocabTokenTruncates(self):\n vocab = test_helper.test_src_dir_path('core/ops/testdata/test_vocab.txt')\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.str_to_vocab_tokens(['a b c d e ' * 1000],\n append_eos=True,\n maxlen=5,\n vocab_filepath=vocab))\n self.assertEqual(token_ids.tolist(), [[1, 5, 6, 7, 8]])\n self.assertEqual(target_ids.tolist(), [[5, 6, 7, 8, 9]])\n self.assertEqual(paddings.tolist(), [[0., 0., 0., 0., 0.]])\n\n def testStrToVocabTokenNoPadToMaxlen(self):\n vocab = test_helper.test_src_dir_path('core/ops/testdata/test_vocab.txt')\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.str_to_vocab_tokens([\n 'a b c d e',\n '<epsilon> <S> </S> <UNK>',\n 'øut über ♣ 愤青 ←',\n ],\n append_eos=True,\n maxlen=10,\n pad_to_maxlen=False,\n vocab_filepath=vocab))\n self.assertEqual(\n token_ids.tolist(),\n [[1, 5, 6, 7, 8, 9], [1, 0, 1, 2, 3, 2], [1, 10, 11, 12, 13, 3]])\n self.assertEqual(\n target_ids.tolist(),\n [[5, 6, 7, 8, 9, 2], [0, 1, 2, 3, 2, 2], [10, 11, 12, 13, 3, 2]])\n self.assertEqual(paddings.tolist(),\n [[0., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 0., 1.],\n [0., 0., 0., 0., 0., 0.]])\n\n def testStrToVocabTokenCustomDelimiter(self):\n custom_delimiter = '_'\n vocab = test_helper.test_src_dir_path('core/ops/testdata/test_vocab.txt')\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.str_to_vocab_tokens([custom_delimiter.join('abcde')],\n append_eos=True,\n maxlen=8,\n vocab_filepath=vocab,\n delimiter=custom_delimiter))\n self.assertEqual(token_ids.tolist(), [[1, 5, 6, 7, 8, 9, 2, 2]])\n self.assertEqual(target_ids.tolist(), [[5, 6, 7, 8, 9, 2, 2, 2]])\n self.assertEqual(paddings.tolist(), [[0., 0., 0., 0., 0., 0., 1., 1.]])\n\n def testStrToVocabTokenSplitToCharacters(self):\n custom_delimiter = ''\n vocab = test_helper.test_src_dir_path('core/ops/testdata/test_vocab.txt')\n with self.session(use_gpu=False) as sess:\n token_ids, target_ids, paddings = sess.run(\n ops.str_to_vocab_tokens(['abcde'],\n append_eos=True,\n maxlen=8,\n vocab_filepath=vocab,\n delimiter=custom_delimiter))\n self.assertEqual(token_ids.tolist(), [[1, 5, 6, 7, 8, 9, 2, 2]])\n self.assertEqual(target_ids.tolist(), [[5, 6, 7, 8, 9, 2, 2, 2]])\n self.assertEqual(paddings.tolist(), [[0., 0., 0., 0., 0., 0., 1., 1.]])\n\n def testNgramIdToToken(self):\n vocab = test_helper.test_src_dir_path('core/ops/testdata/test_ngrams.txt')\n with self.session(use_gpu=False):\n ngram_ids = [[14, 11, 6, 24, 7, 3, 13, 82, 2, 2],\n [57, 3, 73, 17, 22, 9, 2, 2, 2, 2]]\n lengths = [8, 6]\n scripts = ops.ngram_id_to_token(\n ngram_ids, lengths, ngram_vocab_filepath=vocab)\n scripts_expected = [b'pn?o\"{twe', b'gh{rtlcr']\n self.assertEqual(scripts_expected, scripts.eval().tolist())\n\n def testNgramIdToTokenSeparator(self):\n vocab = test_helper.test_src_dir_path('core/ops/testdata/test_ngrams.txt')\n with self.session(use_gpu=False):\n ngram_ids = [[14, 11, 6, 24, 7, 3, 13, 82, 2, 2],\n [57, 3, 73, 17, 22, 9, 2, 2, 2, 2]]\n lengths = [8, 6]\n scripts = ops.ngram_id_to_token(\n ngram_ids, lengths, ngram_vocab_filepath=vocab, ngram_separator='.')\n scripts_expected = [b'p.n.?.o.\".{.t.we', b'gh.{.rt.l.c.r']\n self.assertEqual(scripts_expected, scripts.eval().tolist())\n\n def testBpeTokenization(self):\n word_vocab = test_helper.test_src_dir_path(\n 'core/ops/testdata/bpe_words.vocab')\n code_vocab = test_helper.test_src_dir_path(\n 'core/ops/testdata/bpe_codes.vocab')\n sentences = [\n 'GIVE ME A PENNY', 'THEY LIVED ALONE', 'THEY GIVE ME A PENNY ALONE'\n ]\n expected_sentences = [\n b'GIVE ME A PENNY </s> ',\n b'THEY LIVED ALONE </s> ',\n b'THEY GIVE ME A PENNY ',\n ]\n expected_token_ids = [\n [27, 9, 30, 14, 28, 14, 52, 11, 4, 6, 6, 10, 2, 2, 2],\n [16, 4, 10, 12, 9, 30, 24, 7, 12, 49, 14, 2, 2, 2, 2],\n [16, 4, 10, 27, 9, 30, 14, 28, 14, 52, 11, 4, 6, 6, 10],\n ]\n with self.session(use_gpu=False):\n label_tensor = tf.constant(sentences)\n _, token_ids, paddings = ops.bpe_words_to_ids(\n label_tensor, tokenization_filepath=word_vocab, maxlen=15)\n seq_lens = tf.to_int32(tf.round(tf.reduce_sum(1 - paddings, axis=-1)))\n\n target_string = ops.bpe_ids_to_words(\n token_ids, seq_lengths=seq_lens, vocab_filepath=code_vocab)\n self.assertEqual(expected_sentences, target_string.eval().tolist())\n self.assertEqual(expected_token_ids, token_ids.eval().tolist())\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.random.seed" ], [ "tensorflow.reduce_sum", "tensorflow.constant", "tensorflow.test.main" ] ]
MilesCranmer/bifrost
[ "951dd4a449850d22cfd74f4db13ecf806fe5cc30" ]
[ "python/bifrost/dtype.py" ]
[ "\n# Copyright (c) 2016, The Bifrost Authors. All rights reserved.\n# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of The Bifrost Authors nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n\ni: signed integer\nu: unsigned integer\nf: floating point\nci: complex signed integer\ncu: complex unsigned integer\ncf: complex floating pointer\n\ni4: 4-bit signed integer\nf16: 16-bit floating point\nci4: 4+4-bit complex signed integer\ncf32: 32+32-bit complex floating point\n\n\"\"\"\n\nfrom libbifrost import _bf\n\nimport numpy as np\n\ndef split_name_nbit(dtype_str):\n\t\"\"\"Splits a dtype string into (name, nbit)\"\"\"\n\tfor i,char in enumerate(dtype_str):\n\t\tif char.isdigit():\n\t\t\tbreak\n\tname = dtype_str[:i]\n\tnbit = int(dtype_str[i:])\n\treturn name, nbit\n\n# Custom dtypes to represent additional complex types\nci8 = np.dtype([('re',np.int8), ('im',np.int8)])\nci16 = np.dtype([('re',np.int16), ('im',np.int16)])\nci32 = np.dtype([('re',np.int32), ('im',np.int32)])\ncf16 = np.dtype([('re',np.float16), ('im',np.float16)])\ndef to_complex64(q):\n\treal_type = q.dtype['re']\n\treturn q.view(real_type).astype(np.float32).view(np.complex64)\ndef from_complex64(f, dtype):\n\treal_type = dtype['re']\n\treturn f.view(np.float32).astype(real_type).view(dtype)\n\ndef numpy2bifrost(dtype):\n\tif dtype == np.int8: return _bf.BF_DTYPE_I8\n\telif dtype == np.int16: return _bf.BF_DTYPE_I16\n\telif dtype == np.int32: return _bf.BF_DTYPE_I32\n\telif dtype == np.uint8: return _bf.BF_DTYPE_U8\n\telif dtype == np.uint16: return _bf.BF_DTYPE_U16\n\telif dtype == np.uint32: return _bf.BF_DTYPE_U32\n\telif dtype == np.float16: return _bf.BF_DTYPE_F16\n\telif dtype == np.float32: return _bf.BF_DTYPE_F32\n\telif dtype == np.float64: return _bf.BF_DTYPE_F64\n\telif dtype == np.float128: return _bf.BF_DTYPE_F128\n\telif dtype == ci8: return _bf.BF_DTYPE_CI8\n\telif dtype == ci16: return _bf.BF_DTYPE_CI16\n\telif dtype == ci32: return _bf.BF_DTYPE_CI32\n\telif dtype == cf16: return _bf.BF_DTYPE_CF16\n\telif dtype == np.complex64: return _bf.BF_DTYPE_CF32\n\telif dtype == np.complex128: return _bf.BF_DTYPE_CF64\n\telif dtype == np.complex256: return _bf.BF_DTYPE_CF128\n\telse: raise ValueError(\"Unsupported dtype: \" + str(dtype))\n\ndef name_nbit2numpy(name, nbit):\n\tif name == 'i':\n\t\tif nbit == 8: return np.int8\n\t\telif nbit == 16: return np.int16\n\t\telif nbit == 32: return np.int32\n\t\telif nbit == 64: return np.int64\n\t\telse: raise TypeError(\"Invalid signed integer type size: %i\" % nbit)\n\telif name == 'u':\n\t\tif nbit == 8: return np.uint8\n\t\telif nbit == 16: return np.uint16\n\t\telif nbit == 32: return np.uint32\n\t\telif nbit == 64: return np.uint64\n\t\telse: raise TypeError(\"Invalid unsigned integer type size: %i\" % nbit)\n\telif name == 'f':\n\t\tif nbit == 16: return np.float16\n\t\telif nbit == 32: return np.float32\n\t\telif nbit == 64: return np.float64\n\t\telif nbit == 128: return np.float128\n\t\telse: raise TypeError(\"Invalid floating-point type size: %i\" % nbit)\n\telif name == 'ci':\n\t\tif nbit == 8: return ci8\n\t\telif nbit == 16: return ci16\n\t\telif nbit == 32: return ci32\n\t#elif name in set(['ci', 'cu']):\n\t\t## Note: This gives integer types in place of proper complex types\n\t\t#return name_nbit2numpy(name[1:], nbit*2)\n\telif name == 'cf':\n\t\tif nbit == 16: return cf16\n\t\telif nbit == 32: return np.complex64\n\t\telif nbit == 64: return np.complex128\n\t\telif nbit == 128: return np.complex256\n\t\telse: raise TypeError(\"Invalid complex floating-point type size: %i\" % nbit)\n\telse:\n\t\traise TypeError(\"Invalid type name: \" + name)\ndef string2numpy(dtype_str):\n\treturn name_nbit2numpy(*split_name_nbit(dtype_str))\n\ndef numpy2string(dtype):\n\tif dtype == np.int8: return 'i8'\n\telif dtype == np.int16: return 'i16'\n\telif dtype == np.int32: return 'i32'\n\telif dtype == np.int64: return 'i64'\n\telif dtype == np.uint8: return 'u8'\n\telif dtype == np.uint16: return 'u16'\n\telif dtype == np.uint32: return 'u32'\n\telif dtype == np.uint64: return 'u64'\n\telif dtype == np.float16: return 'f16'\n\telif dtype == np.float32: return 'f32'\n\telif dtype == np.float64: return 'f64'\n\telif dtype == np.float128: return 'f128'\n\telif dtype == np.complex64: return 'cf32'\n\telif dtype == np.complex128: return 'cf64'\n\telif dtype == np.complex256: return 'cf128'\n\telse: raise TypeError(\"Unsupported dtype: \" + str(dtype))\n" ]
[ [ "numpy.dtype" ] ]
acctouhou/Prediction_of_battery
[ "c7b1f4ccb11ddf416d1026c0a528ff2ef15eb842" ]
[ "1_Predicting/predict.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 26 00:06:46 2019\n\n@author: Acc\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom sklearn import preprocessing\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tqdm import tqdm\n\n\ndata_dir='dataset'\nmodel_dir='pretrained'\n\ndef norm(data):\n a= preprocessing.StandardScaler().fit(data)\n d=a.transform(data)\n m=a.mean_\n s=a.scale_\n return m,s\ndef mish(x):\n return x * K.tanh(K.softplus(x)) \n\ndef mae(x,y):\n return np.abs(x-y).mean()\ndef feature_selector(model,x,norm):\n normalized_data=(np.transpose(x,(0,2,1))-norm[0])/norm[1]\n return model.predict(normalized_data,batch_size=128)\ndef concat_data(x1,x2,x3):\n normalized_data=(np.array(x3)-summary_norm[0])/summary_norm[1]\n return np.hstack((x1,x2,normalized_data))\ndef re_norm(cell_feature):\n log1=[]\n log2=[]\n for i in range(len(cell_feature)):\n len_=len(cell_feature['%d'%(i)])-100\n for k in range(len_):\n for j in range(0,50,1): \n log1.append(np.float32(k))\n log2.append(np.float32(eol_data[i]-k))\n log1=np.float32(norm(np.array(log1).reshape(-1,1)))\n log2=np.float32(norm(np.array(log2).reshape(-1,1)))\n return log1,log2\ndef process2predict(cell_feature):\n x_in=[]\n y_in=[]\n for i in range(len(cell_feature)):\n col1=[]\n col2=[]\n len_=len(cell_feature['%d'%(i)])-100 \n for k in range(len_):\n for j in range(0,50,1): \n temp=cell_feature['%d'%(i)][k:(j+k+1)]\n col1.append(np.float32(np.pad(temp, ((0,50-j-1),(0,0)), 'edge')))\n col2.append(np.float32(((eol_data[i]-k))-rul_norm[0])/rul_norm[1])\n col2.append((np.float32(k)-s_norm[0])/s_norm[1])\n x_in.append(col1)\n y_in.append(col2)\n return x_in,y_in\n\n\neol_data = np.load('%s/battery_EoL.npy'%(data_dir),allow_pickle='TRUE')\nbattery_id = np.load('%s/index_battery.npy'%(data_dir),allow_pickle='TRUE')\ncharge_data=np.load('%s/charge_data.npy'%(data_dir),allow_pickle='TRUE').tolist()\ndischarge_data=np.load('%s/discharge_data.npy'%(data_dir),allow_pickle='TRUE').tolist()\nsummary_data=np.load('%s/summary_data.npy'%(data_dir),allow_pickle='TRUE').tolist()\ncharge_norm=np.load('%s/charge_norm.npy'%(data_dir),allow_pickle='TRUE').tolist()\ndischarge_norm=np.load('%s/discharge_norm.npy'%(data_dir),allow_pickle='TRUE').tolist()\nsummary_norm=np.load('%s/summary_norm.npy'%(data_dir),allow_pickle='TRUE').tolist()\nfeature_selector_ch=tf.keras.models.load_model('%s/feature_selector_ch.h5'%(model_dir), compile=False)\nfeature_selector_dis=tf.keras.models.load_model('%s/feature_selector_dis.h5'%(model_dir), compile=False,custom_objects={'mish':mish})\npredictor=tf.keras.models.load_model('%s/predictor.h5'%(model_dir), compile=False,custom_objects={'mish':mish})\nindex=np.load('%s/index_battery.npy'%(data_dir))\n\ncell_feature={}\n\n\n\nfor i in tqdm(range(len(charge_data))):\n charge_feature=feature_selector(feature_selector_ch,\n charge_data[i],charge_norm)\n discharge_feature=feature_selector(feature_selector_dis,\n discharge_data[i],discharge_norm)\n cell_feature['%d'%(i)]=concat_data(charge_feature,discharge_feature,\n summary_data[i]) \ns_norm,rul_norm=re_norm(cell_feature)\nx_in,y_in=process2predict(cell_feature,s_norm,rul_norm)\ntf.keras.backend.clear_session()\nin_x1,in_x2=[x_in[i] for i in index[17:]],[x_in[j] for j in index[:17]]\nin_x2=np.vstack(in_x2).reshape(-1,50,12)\nin_x1=np.vstack(in_x1).reshape(-1,50,12)\nin_y1,in_y2=[y_in[i] for i in index[17:]],[y_in[j] for j in index[:17]]\nin_y2=np.vstack(in_y2).reshape(-1,2)\nin_y1=np.vstack(in_y1).reshape(-1,2)\n\npredict_renorm=np.stack((rul_norm,s_norm)).reshape(2,2)\n\np1=predictor.predict(in_x1,batch_size=256)*predict_renorm[:,1]+predict_renorm[:,0]\np2=predictor.predict(in_x2,batch_size=256)*predict_renorm[:,1]+predict_renorm[:,0]\n\nans1=in_y1*predict_renorm[:,1]+predict_renorm[:,0]\nans2=in_y2*predict_renorm[:,1]+predict_renorm[:,0]\n\nprint('training_RUL_mae:%.3f'%(mae(p1[:,0],ans1[:,0])))\nprint('training_S_mae:%.3f'%(mae(p1[:,1],ans1[:,1])))\nprint('testing_RUL_mae:%.3f'%(mae(p2[:,0],ans2[:,0])))\nprint('testing_S_rmae:%.3f'%(mae(p2[:,1],ans2[:,1])))\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.hstack", "numpy.abs", "numpy.pad", "numpy.stack", "tensorflow.keras.backend.softplus", "tensorflow.keras.backend.clear_session", "numpy.float32", "numpy.transpose", "numpy.load", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.vstack" ] ]
Ostyk/unet-plus-plus
[ "924edd8b90856650da2f040fa2ae2db6fcda18b1" ]
[ "train.py" ]
[ "import argparse\nimport os\nfrom collections import OrderedDict\nfrom glob import glob\n\nimport pandas as pd\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.optim as optim\nimport yaml\nfrom albumentations.augmentations import transforms\nfrom albumentations.core.composition import Compose, OneOf\nfrom sklearn.model_selection import train_test_split\nfrom torch.optim import lr_scheduler\nfrom tqdm import tqdm\n\nimport archs\nimport losses\nfrom dataset import Dataset\nfrom metrics import iou_score\nfrom utils import AverageMeter, str2bool\n\nARCH_NAMES = archs.__all__\nLOSS_NAMES = losses.__all__\nLOSS_NAMES.append('BCEWithLogitsLoss')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--name', default=None,\n help='model name: (default: arch+timestamp)')\n parser.add_argument('--epochs', default=100, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('-b', '--batch_size', default=16, type=int,\n metavar='N', help='mini-batch size (default: 16)')\n \n # model\n parser.add_argument('--arch', '-a', metavar='ARCH', default='NestedUNet',\n choices=ARCH_NAMES,\n help='model architecture: ' +\n ' | '.join(ARCH_NAMES) +\n ' (default: NestedUNet)')\n parser.add_argument('--deep_supervision', default=False, type=str2bool)\n parser.add_argument('--input_channels', default=3, type=int,\n help='input channels')\n parser.add_argument('--num_classes', default=1, type=int,\n help='number of classes')\n parser.add_argument('--input_w', default=384, type=int,\n help='image width')\n parser.add_argument('--input_h', default=384, type=int,\n help='image height')\n \n # loss\n parser.add_argument('--loss', default='BCEDiceLoss',\n choices=LOSS_NAMES,\n help='loss: ' +\n ' | '.join(LOSS_NAMES) +\n ' (default: BCEDiceLoss)')\n \n # dataset\n parser.add_argument('--dataset', default='dsb2018_96',\n help='dataset name')\n parser.add_argument('--img_ext', default='.png',\n help='image file extension')\n parser.add_argument('--mask_ext', default='.png',\n help='mask file extension')\n\n # optimizer\n parser.add_argument('--optimizer', default='SGD',\n choices=['Adam', 'SGD'],\n help='loss: ' +\n ' | '.join(['Adam', 'SGD']) +\n ' (default: Adam)')\n parser.add_argument('--lr', '--learning_rate', default=1e-3, type=float,\n metavar='LR', help='initial learning rate')\n parser.add_argument('--momentum', default=0.9, type=float,\n help='momentum')\n parser.add_argument('--weight_decay', default=1e-4, type=float,\n help='weight decay')\n parser.add_argument('--nesterov', default=False, type=str2bool,\n help='nesterov')\n\n # scheduler\n parser.add_argument('--scheduler', default='CosineAnnealingLR',\n choices=['CosineAnnealingLR', 'ReduceLROnPlateau', 'MultiStepLR', 'ConstantLR'])\n parser.add_argument('--min_lr', default=1e-5, type=float,\n help='minimum learning rate')\n parser.add_argument('--factor', default=0.1, type=float)\n parser.add_argument('--patience', default=2, type=int)\n parser.add_argument('--milestones', default='1,2', type=str)\n parser.add_argument('--gamma', default=2/3, type=float)\n parser.add_argument('--early_stopping', default=-1, type=int,\n metavar='N', help='early stopping (default: -1)')\n \n parser.add_argument('--num_workers', default=4, type=int)\n\n config = parser.parse_args()\n\n return config\n\n\ndef train(config, train_loader, model, criterion, optimizer):\n avg_meters = {'loss': AverageMeter(),\n 'iou': AverageMeter()}\n\n model.train()\n\n pbar = tqdm(total=len(train_loader))\n for input, target, _ in train_loader:\n input = input.cuda()\n target = target.cuda()\n\n # compute output\n if config['deep_supervision']:\n outputs = model(input)\n loss = 0\n for output in outputs:\n loss += criterion(output, target)\n loss /= len(outputs)\n iou = iou_score(outputs[-1], target)\n else:\n output = model(input)\n loss = criterion(output, target)\n iou = iou_score(output, target)\n\n # compute gradient and do optimizing step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n avg_meters['loss'].update(loss.item(), input.size(0))\n avg_meters['iou'].update(iou, input.size(0))\n\n postfix = OrderedDict([\n ('loss', avg_meters['loss'].avg),\n ('iou', avg_meters['iou'].avg),\n ])\n pbar.set_postfix(postfix)\n pbar.update(1)\n pbar.close()\n\n return OrderedDict([('loss', avg_meters['loss'].avg),\n ('iou', avg_meters['iou'].avg)])\n\n\ndef validate(config, val_loader, model, criterion):\n avg_meters = {'loss': AverageMeter(),\n 'iou': AverageMeter()}\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n pbar = tqdm(total=len(val_loader))\n for input, target, _ in val_loader:\n input = input.cuda()\n target = target.cuda()\n\n # compute output\n if config['deep_supervision']:\n outputs = model(input)\n loss = 0\n for output in outputs:\n loss += criterion(output, target)\n loss /= len(outputs)\n iou = iou_score(outputs[-1], target)\n else:\n output = model(input)\n loss = criterion(output, target)\n iou = iou_score(output, target)\n\n avg_meters['loss'].update(loss.item(), input.size(0))\n avg_meters['iou'].update(iou, input.size(0))\n\n postfix = OrderedDict([\n ('loss', avg_meters['loss'].avg),\n ('iou', avg_meters['iou'].avg),\n ])\n pbar.set_postfix(postfix)\n pbar.update(1)\n pbar.close()\n\n return OrderedDict([('loss', avg_meters['loss'].avg),\n ('iou', avg_meters['iou'].avg)])\n\n\ndef main():\n config = vars(parse_args())\n\n if config['name'] is None:\n if config['deep_supervision']:\n config['name'] = '%s_%s_wDS' % (config['dataset'], config['arch'])\n else:\n config['name'] = '%s_%s_woDS' % (config['dataset'], config['arch'])\n os.makedirs('models/%s' % config['name'], exist_ok=True)\n\n print('-' * 20)\n for key in config:\n print('%s: %s' % (key, config[key]))\n print('-' * 20)\n\n with open('models/%s/config.yml' % config['name'], 'w') as f:\n yaml.dump(config, f)\n\n # define loss function (criterion)\n if config['loss'] == 'BCEWithLogitsLoss':\n criterion = nn.BCEWithLogitsLoss().cuda()\n else:\n criterion = losses.__dict__[config['loss']]().cuda()\n\n cudnn.benchmark = True\n\n # create model\n print(\"=> creating model %s\" % config['arch'])\n model = archs.__dict__[config['arch']](config['num_classes'],\n config['input_channels'],\n config['deep_supervision'])\n\n model = model.cuda()\n\n params = filter(lambda p: p.requires_grad, model.parameters())\n if config['optimizer'] == 'Adam':\n optimizer = optim.Adam(\n params, lr=config['lr'], weight_decay=config['weight_decay'])\n elif config['optimizer'] == 'SGD':\n optimizer = optim.SGD(params, lr=config['lr'], momentum=config['momentum'],\n nesterov=config['nesterov'], weight_decay=config['weight_decay'])\n else:\n raise NotImplementedError\n\n if config['scheduler'] == 'CosineAnnealingLR':\n scheduler = lr_scheduler.CosineAnnealingLR(\n optimizer, T_max=config['epochs'], eta_min=config['min_lr'])\n elif config['scheduler'] == 'ReduceLROnPlateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=config['factor'], patience=config['patience'],\n verbose=1, min_lr=config['min_lr'])\n elif config['scheduler'] == 'MultiStepLR':\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[int(e) for e in config['milestones'].split(',')], gamma=config['gamma'])\n elif config['scheduler'] == 'ConstantLR':\n scheduler = None\n else:\n raise NotImplementedError\n\n # Data loading code\n# img_ids = glob(os.path.join('inputs', config['dataset'], 'images', '*' + config['img_ext']))\n# img_ids = [os.path.splitext(os.path.basename(p))[0] for p in img_ids]\n\n# train_img_ids, val_img_ids = train_test_split(img_ids, test_size=0.2, random_state=41)\n\n train_transform = Compose([\n transforms.RandomRotate90(),\n transforms.Flip(),\n OneOf([\n transforms.HueSaturationValue(),\n transforms.RandomBrightness(),\n transforms.RandomContrast(),\n ], p=1),\n transforms.Resize(config['input_h'], config['input_w']),\n transforms.Normalize(),\n ])\n\n val_transform = Compose([\n transforms.Resize(config['input_h'], config['input_w']),\n transforms.Normalize(),\n ])\n\n# train_dataset = Dataset(\n# img_ids=train_img_ids,\n# img_dir=os.path.join('inputs', config['dataset'], 'images'),\n# mask_dir=os.path.join('inputs', config['dataset'], 'masks'),\n# img_ext=config['img_ext'],\n# mask_ext=config['mask_ext'],\n# num_classes=config['num_classes'],\n# transform=train_transform)\n# val_dataset = Dataset(\n# img_ids=val_img_ids,\n# img_dir=os.path.join('inputs', config['dataset'], 'images'),\n# mask_dir=os.path.join('inputs', config['dataset'], 'masks'),\n# img_ext=config['img_ext'],\n# mask_ext=config['mask_ext'],\n# num_classes=config['num_classes'],\n# transform=val_transform)\n\n train_dataset = Dataset(\n root = 'inputs/'+config['dataset'],\n subset = 'train',\n num_classes=1,\n transform=train_transform)\n \n val_dataset = Dataset(\n root = 'inputs/'+config['dataset'],\n subset = 'val',\n num_classes=1,\n transform=val_transform)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=config['batch_size'],\n shuffle=True,\n num_workers=config['num_workers'],\n drop_last=True)\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=config['batch_size'],\n shuffle=False,\n num_workers=config['num_workers'],\n drop_last=False)\n\n log = OrderedDict([\n ('epoch', []),\n ('lr', []),\n ('loss', []),\n ('iou', []),\n ('val_loss', []),\n ('val_iou', []),\n ])\n\n best_iou = 0\n trigger = 0\n for epoch in range(config['epochs']):\n print('Epoch [%d/%d]' % (epoch, config['epochs']))\n\n # train for one epoch\n train_log = train(config, train_loader, model, criterion, optimizer)\n # evaluate on validation set\n val_log = validate(config, val_loader, model, criterion)\n\n if config['scheduler'] == 'CosineAnnealingLR':\n scheduler.step()\n elif config['scheduler'] == 'ReduceLROnPlateau':\n scheduler.step(val_log['loss'])\n\n print('loss %.4f - iou %.4f - val_loss %.4f - val_iou %.4f'\n % (train_log['loss'], train_log['iou'], val_log['loss'], val_log['iou']))\n\n log['epoch'].append(epoch)\n log['lr'].append(config['lr'])\n log['loss'].append(train_log['loss'])\n log['iou'].append(train_log['iou'])\n log['val_loss'].append(val_log['loss'])\n log['val_iou'].append(val_log['iou'])\n\n pd.DataFrame(log).to_csv('models/%s/log.csv' %\n config['name'], index=False)\n\n trigger += 1\n\n if val_log['iou'] > best_iou:\n torch.save(model.state_dict(), 'models/%s/model.pth' %\n config['name'])\n best_iou = val_log['iou']\n print(\"=> saved best model\")\n trigger = 0\n\n # early stopping\n if config['early_stopping'] >= 0 and trigger >= config['early_stopping']:\n print(\"=> early stopping\")\n break\n\n torch.cuda.empty_cache()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.Adam", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "pandas.DataFrame", "torch.nn.BCEWithLogitsLoss", "torch.no_grad", "torch.optim.SGD" ] ]
rogersheu/AllLeague-NBA-Predictions
[ "3675277e283ed48b4f0ab6a87b6403e8c29d287e" ]
[ "scripts/daily_database_update.py" ]
[ "import sqlite3\nfrom os import listdir\n\nimport pandas as pd\n\nfrom transfer_data import pick_path\n\n\ndef database_pipeline(path):\n connection = sqlite3.connect(\"./baseData/allPlayerStats.db\")\n\n cursor = connection.cursor()\n\n # See this for various ways to import CSV into sqlite using Python. Pandas used here because files are not prohibitively large.\n # https://stackoverflow.com/questions/2887878/importing-a-csv-file-into-a-sqlite3-database-table-using-python\n\n print(\"SQL scripts starting...\")\n # Drop old tables, might not be necessary since we're dropping them\n sql_file = open(\"./scripts/SQL/drop_old_tables.sql\")\n try:\n sql_as_string = sql_file.read()\n cursor.executescript(sql_as_string)\n sql_file.close()\n except Exception:\n pass\n\n # Decide whether to have user pick path or just set it automatically...\n for fileName in listdir(path):\n if fileName.endswith('.csv'): # Avoid any accidents\n df = pd.read_csv(f'{path}/{fileName}')\n df.to_sql(\n f'{fileName.replace(\".csv\",\"\").split(\"_\")[0]}', connection, if_exists='replace', index=False)\n try:\n date = f'{fileName.replace(\".csv\",\"\").split(\"_\")[1]}'\n except Exception:\n pass\n\n # Make changes to tables\n sql_file = open(\"./scripts/SQL/prep_tables_for_extraction.sql\")\n try:\n sql_as_string = sql_file.read()\n cursor.executescript(sql_as_string)\n except Exception:\n pass\n\n sql_file.close()\n\n # Extract this season's qualified players\n sql_file = open(\"./scripts/SQL/players2022_dbeaver.sql\")\n df_output = pd.read_sql_query(sql_file.read(), connection)\n sql_file.close()\n # sql_as_string = sql_file.read()\n # cursor.executescript(sql_as_string)\n print(df_output)\n df_output.to_csv(f'{path}/stats_{date}.csv', index=False)\n\n print(\"SQL scripts complete.\")\n\n\ndef main():\n data_path = pick_path()\n database_pipeline(data_path)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv" ] ]
tonyshao5/Tensorflow-up
[ "f8f8fce9436c40cad298f6211db2be3a18480bad", "f8f8fce9436c40cad298f6211db2be3a18480bad" ]
[ "tflib/data/disk_image.py", "tflib/data/dataset.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\n\nimport tensorflow as tf\nfrom tflib.data.dataset import batch_dataset, Dataset\n\n\n_N_CPU = multiprocessing.cpu_count()\n\n\ndef disk_image_batch_dataset(img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,\n map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):\n \"\"\"Disk image batch dataset.\n\n This function is suitable for jpg and png files\n\n img_paths: string list or 1-D tensor, each of which is an iamge path\n labels: label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label\n \"\"\"\n if labels is None:\n dataset = tf.data.Dataset.from_tensor_slices(img_paths)\n elif isinstance(labels, tuple):\n dataset = tf.data.Dataset.from_tensor_slices((img_paths,) + tuple(labels))\n else:\n dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels))\n\n def parse_func(path, *label):\n img = tf.read_file(path)\n img = tf.image.decode_png(img, 3)\n return (img,) + label\n\n if map_func:\n def map_func_(*args):\n return map_func(*parse_func(*args))\n else:\n map_func_ = parse_func\n\n # dataset = dataset.map(parse_func, num_parallel_calls=num_threads) is slower\n\n dataset = batch_dataset(dataset, batch_size, prefetch_batch, drop_remainder, filter,\n map_func_, num_threads, shuffle, buffer_size, repeat)\n\n return dataset\n\n\nclass DiskImageData(Dataset):\n \"\"\"DiskImageData.\n\n This function is suitable for jpg and png files\n\n img_paths: string list or 1-D tensor, each of which is an iamge path\n labels: label list or tensor, each of which is a corresponding label\n \"\"\"\n\n def __init__(self, img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,\n map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1, sess=None):\n super(DiskImageData, self).__init__()\n dataset = disk_image_batch_dataset(img_paths, batch_size, labels, prefetch_batch, drop_remainder, filter,\n map_func, num_threads, shuffle, buffer_size, repeat)\n self._bulid(dataset, sess)\n", "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\n\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\nfrom tflib.utils import session\n\n\n_N_CPU = multiprocessing.cpu_count()\n\n\ndef batch_dataset(dataset, batch_size, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,\n map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):\n if filter:\n dataset = dataset.filter(filter)\n\n if map_func:\n dataset = dataset.map(map_func, num_parallel_calls=num_threads)\n\n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n\n if drop_remainder:\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n else:\n dataset = dataset.batch(batch_size)\n\n dataset = dataset.repeat(repeat).prefetch(prefetch_batch)\n\n return dataset\n\n\nclass Dataset(object):\n\n def __init__(self):\n self._dataset = None\n self._iterator = None\n self._batch_op = None\n self._sess = None\n\n self._is_eager = tf.executing_eagerly()\n self._eager_iterator = None\n\n def __del__(self):\n if self._sess:\n self._sess.close()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n try:\n b = self.get_next()\n except:\n raise StopIteration\n else:\n return b\n\n next = __next__\n\n def get_next(self):\n if self._is_eager:\n return self._eager_iterator.get_next()\n else:\n return self._sess.run(self._batch_op)\n\n def reset(self, feed_dict={}):\n if self._is_eager:\n self._eager_iterator = tfe.Iterator(self._dataset)\n else:\n self._sess.run(self._iterator.initializer, feed_dict=feed_dict)\n\n def _bulid(self, dataset, sess=None):\n self._dataset = dataset\n\n if self._is_eager:\n self._eager_iterator = tfe.Iterator(dataset)\n else:\n self._iterator = dataset.make_initializable_iterator()\n self._batch_op = self._iterator.get_next()\n if sess:\n self._sess = sess\n else:\n self._sess = session()\n\n try:\n self.reset()\n except:\n pass\n\n @property\n def dataset(self):\n return self._dataset\n\n @property\n def iterator(self):\n return self._iterator\n\n @property\n def batch_op(self):\n return self._batch_op\n" ]
[ [ "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.image.decode_png", "tensorflow.read_file" ], [ "tensorflow.executing_eagerly", "tensorflow.contrib.eager.Iterator", "tensorflow.contrib.data.batch_and_drop_remainder" ] ]
machanic/TangentAttack
[ "17c1a8e93f9bbd03e209e8650631af744a0ff6b8", "17c1a8e93f9bbd03e209e8650631af744a0ff6b8", "17c1a8e93f9bbd03e209e8650631af744a0ff6b8" ]
[ "adversarial_defense/model/feature_defense_model.py", "biased_boundary_attack/utils/util.py", "QEBATangentAttack/utils.py" ]
[ "import glob\nimport os\nimport pretrainedmodels\nimport torch\nfrom torch import nn\nfrom torchvision import models as torch_models\nimport cifar_models as models\nfrom adversarial_defense.model.denoise_resnet import DenoiseResNet50, DenoiseResNet101, DenoiseResNet152\nfrom adversarial_defense.model.pcl_resnet import PrototypeConformityLossResNet\nfrom cifar_models_myself import Conv3, DenseNet121, DenseNet169, DenseNet201, GoogLeNet, MobileNet, MobileNetV2, \\\n ResNet18, \\\n ResNet34, ResNet50, ResNet101, ResNet152, PNASNetA, PNASNetB, EfficientNetB0, DPN26, DPN92, ResNeXt29_2x64d, \\\n ResNeXt29_4x64d, ResNeXt29_8x64d, ResNeXt29_32x4d, SENet18, ShuffleNetG2, ShuffleNetG3, vgg11, vgg13, vgg16, vgg19, \\\n PreActResNet18, PreActResNet34, PreActResNet50, PreActResNet101, PreActResNet152, wideresnet28, wideresnet34, \\\n wideresnet40, carlinet, wideresnet28drop, wideresnet34drop, wideresnet40drop\nfrom cifar_models_myself.miscellaneous import Identity\nfrom config import pretrained_cifar_model_conf, IN_CHANNELS, IMAGE_SIZE, CLASS_NUM, PROJECT_PATH\nfrom cifar_models_myself.efficient_densenet import EfficientDenseNet\nfrom cifar_models_myself.ghostnet import ghost_net\nfrom tiny_imagenet_models.densenet import densenet161, densenet121, densenet169, densenet201\nfrom tiny_imagenet_models.resnext import resnext101_32x4d, resnext101_64x4d\nimport torchvision.models as vision_models\nfrom tiny_imagenet_models.inception import inception_v3\nfrom tiny_imagenet_models.wrn import tiny_imagenet_wrn\n\n\nclass FeatureDefenseModel(nn.Module):\n \"\"\"\n A StandardModel object wraps a cnn model.\n This model always accept standard image: in [0, 1] range, RGB order, un-normalized, NCHW format\n \"\"\"\n def __init__(self, dataset, arch, no_grad=True):\n super(FeatureDefenseModel, self).__init__()\n # init cnn model\n self.in_channels = IN_CHANNELS[dataset]\n self.dataset = dataset\n if \"denoise\" in arch.lower():\n # CIFAR-100@ResNet50_with_denoise_NonLocal_Filter_3.pth.tar\n trained_model_path = \"{root}/train_pytorch_model/adversarial_train/feature_denoise/{dataset}@{arch}_NonLocal_Filter_3.pth.tar\".format(root=PROJECT_PATH, dataset=dataset, arch=arch)\n assert os.path.exists(trained_model_path), \"{} does not exist!\".format(trained_model_path)\n elif dataset.startswith(\"CIFAR\"):\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/{arch}/checkpoint.pth.tar\".format(root=PROJECT_PATH, dataset=dataset, arch=arch)\n assert os.path.exists(trained_model_path), \"{} does not exist!\".format(trained_model_path)\n elif dataset == \"TinyImageNet\":\n arch = arch.replace(\"resnet-\", \"resnet\")\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}@{arch}@*.pth.tar\".format(root=PROJECT_PATH, dataset=dataset, arch=arch)\n trained_model_path_list = list(glob.glob(trained_model_path))\n assert len(trained_model_path_list)>0, \"{} does not exist!\".format(trained_model_path)\n trained_model_path = trained_model_path_list[0]\n else:\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/checkpoints/{arch}*.pth\".format(\n root=PROJECT_PATH, dataset=dataset, arch=arch)\n trained_model_path_ls = list(glob.glob(trained_model_path))\n assert trained_model_path_ls, \"{} does not exist!\".format(trained_model_path)\n trained_model_path = trained_model_path_ls[0]\n\n self.cnn = self.make_model(dataset, arch, self.in_channels, CLASS_NUM[dataset], trained_model_path=trained_model_path)\n # init cnn model meta-information\n self.mean = torch.FloatTensor(self.cnn.mean).view(1, self.in_channels, 1, 1).cuda()\n self.mean.requires_grad =True\n\n self.std = torch.FloatTensor(self.cnn.std).view(1, self.in_channels, 1, 1).cuda()\n self.std.requires_grad = True\n\n self.input_space = self.cnn.input_space # 'RGB' or 'GBR'\n self.input_range = self.cnn.input_range # [0, 1] or [0, 255]\n self.input_size = self.cnn.input_size\n self.no_grad = no_grad\n self.arch = arch\n\n @staticmethod\n def check_arch(arch, dataset):\n if dataset == \"ImageNet\":\n return arch in pretrainedmodels.__dict__\n elif dataset == \"TinyImageNet\":\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}@{arch}@*.pth.tar\".format(\n root=PROJECT_PATH, dataset=dataset, arch=arch)\n trained_model_path_list = list(glob.glob(trained_model_path))\n return len(trained_model_path_list) > 0\n else:\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/{arch}*\".format(\n root=PROJECT_PATH, dataset=dataset, arch=arch)\n trained_model_path = glob.glob(trained_model_path)\n if len(trained_model_path) > 0:\n return os.path.exists(trained_model_path[0] + \"/checkpoint.pth.tar\")\n else:\n return False\n\n\n def forward(self, x):\n # assign dropout probability\n # if hasattr(self, 'drop'):\n # self.cnn.drop = self.drop\n # channel order\n if self.input_space == 'BGR':\n x = x[:, [2, 1, 0], :, :] # pytorch does not support negative stride index (::-1) yet\n # input range\n if max(self.input_range) == 255:\n x = x * 255\n # normalization\n x = (x - self.mean.type(x.dtype).to(x.device)) / self.std.type(x.dtype).to(x.device)\n if self.no_grad:\n with torch.no_grad():\n if \"pcl\" in self.arch:\n feats128, feats256, feats1024, x = self.cnn(x)\n else:\n x = self.cnn(x)\n else:\n if \"pcl\" in self.arch:\n feats128, feats256, feats1024, x = self.cnn(x)\n else:\n x = self.cnn(x)\n\n x = x.view(x.size(0), -1)\n if \"pcl\" in self.arch:\n return feats128, feats256, feats1024, x\n return x\n\n def load_weight_from_pth_checkpoint(self, model, fname):\n raw_state_dict = torch.load(fname, map_location='cpu')\n if \"state_dict\" in raw_state_dict:\n raw_state_dict = raw_state_dict[\"state_dict\"]\n state_dict = dict()\n for key, val in raw_state_dict.items():\n new_key = key.replace('module.', '')\n state_dict[new_key] = val\n model.load_state_dict(state_dict)\n\n\n\n def construct_cifar_model(self, arch, dataset, num_classes):\n if \"denoise\" not in arch.lower():\n conf = pretrained_cifar_model_conf[dataset][arch]\n arch = arch.split(\"-\")[0]\n if arch.startswith('resnext'):\n model = models.__dict__[arch](\n cardinality=conf[\"cardinality\"],\n num_classes=num_classes,\n depth=conf[\"depth\"],\n widen_factor=conf[\"widen_factor\"],\n dropRate=conf[\"drop\"],\n )\n elif arch.startswith('densenet'):\n model = models.__dict__[arch](\n num_classes=num_classes,\n depth=conf[\"depth\"],\n growthRate=conf[\"growthRate\"],\n compressionRate=conf[\"compressionRate\"],\n dropRate=conf[\"drop\"],\n )\n elif arch.startswith('wrn'):\n model = models.__dict__[arch](\n num_classes=num_classes,\n depth=conf[\"depth\"],\n widen_factor=conf[\"widen_factor\"],\n dropRate=conf[\"drop\"],\n )\n elif arch.endswith('resnet') and \"pcl_\" not in arch and \"denoise\" not in arch:\n model = models.__dict__[arch](\n num_classes=num_classes,\n depth=conf[\"depth\"],\n block_name=conf[\"block_name\"],\n )\n elif \"pcl_resnet\" in arch:\n model = PrototypeConformityLossResNet(in_channels=IN_CHANNELS[dataset], depth=conf[\"depth\"], num_classes=CLASS_NUM[dataset])\n elif arch == \"DenoiseResNet50\":\n model = DenoiseResNet50(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n elif arch == \"DenoiseResNet101\":\n model = DenoiseResNet101(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n elif arch == \"DenoiseResNet152\":\n model = DenoiseResNet152(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n else:\n model = models.__dict__[arch](num_classes=num_classes)\n return model\n\n def make_model(self, dataset, arch, in_channel, num_classes, trained_model_path=None):\n \"\"\"\n Make model, and load pre-trained weights.\n :param dataset: cifar10 or imagenet\n :param arch: arch name, e.g., alexnet_bn\n :return: model (in cpu and training mode)\n \"\"\"\n if dataset in ['CIFAR-10',\"CIFAR-100\", \"MNIST\",\"FashionMNIST\"]:\n assert trained_model_path is not None and os.path.exists(trained_model_path), \"Pretrained weight model file {} does not exist!\".format(trained_model_path)\n if arch == 'gdas':\n model = models.gdas(in_channel, num_classes)\n model.mean = [125.3 / 255, 123.0 / 255, 113.9 / 255]\n model.std = [63.0 / 255, 62.1 / 255, 66.7 / 255]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]\n elif arch == 'pyramidnet272':\n model = models.pyramidnet272(in_channel, num_classes)\n model.mean = [0.49139968, 0.48215841, 0.44653091]\n model.std = [0.24703223, 0.24348513, 0.26158784]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]\n else:\n model = self.construct_cifar_model(arch, dataset, num_classes) #\n model.mean = [0.4914, 0.4822, 0.4465]\n model.std = [0.2023, 0.1994, 0.2010]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]\n # self.load_weight_from_pth_checkpoint(model, trained_model_path)\n elif dataset == \"TinyImageNet\":\n model = MetaLearnerModelBuilder.construct_tiny_imagenet_model(arch, dataset)\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.mean = [0.4914, 0.4822, 0.4465] # if \"defense_resnet\" not in arch and \"denoise\" not in arch: [0,0,0] . [1,1,1]\n model.std = [0.2023, 0.1994, 0.2010]\n model.input_size = [in_channel,IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]\n # model.load_state_dict(torch.load(trained_model_path, map_location=lambda storage, location: storage)[\"state_dict\"])\n elif dataset == 'ImageNet':\n os.environ[\"TORCH_HOME\"] = \"{}/train_pytorch_model/real_image_model/ImageNet-pretrained\".format(PROJECT_PATH)\n model = pretrainedmodels.__dict__[arch](num_classes=1000, pretrained=\"imagenet\")\n return model\n\n\nclass MetaLearnerModelBuilder(object):\n @staticmethod\n def construct_cifar_model(arch, dataset):\n if arch == \"conv3\":\n network = Conv3(IN_CHANNELS[dataset], IMAGE_SIZE[dataset], CLASS_NUM[dataset])\n elif arch == \"densenet121\":\n network = DenseNet121(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"densenet169\":\n network = DenseNet169(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"densenet201\":\n network = DenseNet201(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"googlenet\":\n network = GoogLeNet(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"mobilenet\":\n network = MobileNet(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"mobilenet_v2\":\n network = MobileNetV2(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"ghost_net\":\n network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet18\":\n network = ResNet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet34\":\n network = ResNet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet50\":\n network = ResNet50(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet101\":\n network = ResNet101(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet152\":\n network = ResNet152(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"pnasnetA\":\n network = PNASNetA(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"pnasnetB\":\n network = PNASNetB(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"efficientnet\":\n network = EfficientNetB0(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"dpn26\":\n network = DPN26(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"dpn92\":\n network = DPN92(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnext29_2\":\n network = ResNeXt29_2x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnext29_4\":\n network = ResNeXt29_4x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnext29_8\":\n network = ResNeXt29_8x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnext29_32\":\n network = ResNeXt29_32x4d(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"senet18\":\n network = SENet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"shufflenet_G2\":\n network = ShuffleNetG2(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"shufflenet_G3\":\n network = ShuffleNetG3(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"vgg11\":\n network = vgg11(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"vgg13\":\n network = vgg13(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"vgg16\":\n network = vgg16(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"vgg19\":\n network = vgg19(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet18\":\n network = PreActResNet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet34\":\n network = PreActResNet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet50\":\n network = PreActResNet50(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet101\":\n network = PreActResNet101(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet152\":\n network = PreActResNet152(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet28\":\n network = wideresnet28(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet28drop\":\n network = wideresnet28drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet34\":\n network = wideresnet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet34drop\":\n network = wideresnet34drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet40\":\n network = wideresnet40(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet40drop\":\n network = wideresnet40drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"carlinet\":\n network = carlinet(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == 'efficient_densenet':\n depth = 40\n block_config = [(depth - 4) // 6 for _ in range(3)]\n network = EfficientDenseNet(IN_CHANNELS[dataset], block_config=block_config,\n num_classes=CLASS_NUM[dataset], small_inputs=dataset != \"ImageNet\", efficient=False)\n return network\n\n @staticmethod\n def construct_imagenet_model(arch, dataset):\n os.environ[\"TORCH_HOME\"] = \"{}/train_pytorch_model/real_image_model/ImageNet-pretrained\".format(PROJECT_PATH)\n if arch == 'efficient_densenet':\n depth = 40\n block_config = [(depth - 4) // 6 for _ in range(3)]\n return EfficientDenseNet(IN_CHANNELS[dataset],block_config=block_config, num_classes=CLASS_NUM[dataset], small_inputs=False, efficient=False)\n elif arch == \"ghost_net\":\n network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n return network\n model = vision_models.__dict__[arch](pretrained=False)\n return model\n\n @staticmethod\n def construct_tiny_imagenet_model(arch, dataset):\n if not arch.startswith(\"densenet\") and not arch.startswith(\"resnext\") and arch in torch_models.__dict__:\n network = torch_models.__dict__[arch](pretrained=False)\n num_classes = CLASS_NUM[dataset]\n if arch.startswith(\"resnet\"):\n num_ftrs = network.fc.in_features\n network.fc = nn.Linear(num_ftrs, num_classes)\n elif arch.startswith(\"densenet\"):\n if arch == \"densenet161\":\n network = densenet161(pretrained=False)\n elif arch == \"densenet121\":\n network = densenet121(pretrained=False)\n elif arch == \"densenet169\":\n network = densenet169(pretrained=False)\n elif arch == \"densenet201\":\n network = densenet201(pretrained=False)\n elif arch == \"resnext32_4\":\n network = resnext101_32x4d(pretrained=None)\n elif arch == \"resnext64_4\":\n network = resnext101_64x4d(pretrained=None)\n elif arch == \"ghost_net\":\n network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch.startswith(\"inception\"):\n network = inception_v3(pretrained=False)\n elif arch == \"WRN-28-10-drop\":\n network = tiny_imagenet_wrn(in_channels=IN_CHANNELS[dataset],depth=28,num_classes=CLASS_NUM[dataset],widen_factor=10, dropRate=0.3)\n elif arch == \"WRN-40-10-drop\":\n network = tiny_imagenet_wrn(in_channels=IN_CHANNELS[dataset], depth=40, num_classes=CLASS_NUM[dataset],\n widen_factor=10, dropRate=0.3)\n elif arch.startswith(\"vgg\"):\n network.avgpool = Identity()\n network.classifier[0] = nn.Linear(512 * 2 * 2, 4096) # 64 /2**5 = 2\n network.classifier[-1] = nn.Linear(4096, num_classes)\n elif \"pcl_resnet\" in arch:\n network = PrototypeConformityLossResNet(in_channels=IN_CHANNELS[dataset], depth=pretrained_cifar_model_conf[dataset][arch][\"depth\"], num_classes=CLASS_NUM[dataset])\n elif arch == \"DenoiseResNet50\":\n network = DenoiseResNet50(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n elif arch == \"DenoiseResNet101\":\n network = DenoiseResNet101(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n elif arch == \"DenoiseResNet152\":\n network = DenoiseResNet152(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n return network\n\n\n\n", "import random\n\nimport numpy as np\nimport torch\nimport glog as log\n\ndef line_search_to_boundary(bb_model, x_orig, x_start, label, is_targeted):\n \"\"\"\n Binary search along a line between start and original image in order to find the decision boundary.\n :param bb_model: The (black-box) model.\n :param x_orig: The original image to attack.\n :param x_start: The starting image (which fulfills the adversarial criterion)\n :param is_targeted: true if the attack is targeted.\n :param label: the target label if targeted, or the correct label if untargeted.\n :return: A point next to the decision boundary (but still adversarial)\n \"\"\"\n\n eps = 0.5 # Stop when decision boundary is closer than this (in L2 distance)\n i = 0\n\n x1 = x_start.float()\n x2 = x_orig.float()\n diff = x2 - x1\n while torch.norm(diff) > eps:\n i += 1\n\n x_candidate = x1 + 0.5 * diff\n if (torch.argmax(bb_model(x_candidate),dim=1) == label) == is_targeted:\n x1 = x_candidate\n else:\n x2 = x_candidate\n\n diff = x2 - x1\n\n log.info(\"Found decision boundary after {} queries.\".format(i))\n return x1\n\n\ndef find_closest_img(bb_model, X_orig, X_targets, label, is_targeted):\n \"\"\"\n From a list of potential starting images, finds the closest to the original.\n Before returning, this method makes sure that the image fulfills the adversarial condition (is actually classified as the target label).\n :param bb_model: The (black-box) model.\n :param X_orig: The original image to attack.\n :param X_targets: List of images that fulfill the adversarial criterion (i.e. target class in the targeted case)\n :param is_targeted: true if the attack is targeted.\n :param label: the target label if targeted, or the correct label if untargeted.\n :return: the closest image (in L2 distance) to the original that also fulfills the adversarial condition.\n \"\"\"\n\n dists = torch.empty(len(X_targets), dtype=torch.float32)\n for i in range(len(X_targets)):\n d_l2 = torch.norm((X_targets[i, ...] - X_orig))\n dists[i] = d_l2\n\n indices = torch.argsort(dists)\n for index in indices:\n X_target = X_targets[index]\n pred_clsid = torch.argmax(bb_model(X_target),dim=1)\n if (pred_clsid == label) == is_targeted:\n log.info(\"Found an image of the target class, d_l2={:.3f}.\".format(dists[index]))\n return X_target\n\n log.info(\"Image of target class is wrongly classified by model, skipping.\")\n\n raise ValueError(\"Could not find an image of the target class that was correctly classified by the model!\")\n\n\ndef sample_hypersphere(n_samples, sample_shape, radius, sample_gen=None, seed=None):\n \"\"\"\n Uniformly sample the surface of a L2-hypersphere.\n Uniform picking: create a n-dimensional normal distribution and then normalize it to the desired radius.\n See http://mathworld.wolfram.com/HyperspherePointPicking.html\n :param n_samples: number of image samples to generate.\n :param sample_shape: shape of a single image sample.\n :param radius: radius(=eps) of the hypersphere.\n :param sample_gen: If provided, retrieves random numbers from this generator.\n :param seed: seed for the random generator. Cannot be used with the sample generator.\n :return: Batch of image samples, shape: (n_samples,) + sample_shape\n \"\"\"\n\n if sample_gen is not None:\n assert seed is None, \"Can't provide individual seeds if using the multi-threaded generator.\"\n assert sample_shape == sample_gen.shape, \"sample_gen shape is {}, sample_shape is {}\".format(sample_gen.shape, sample_shape)\n\n # Get precalculated samples from the generator\n gauss = torch.empty(n_samples, np.prod(sample_shape).item()).float()\n for i in range(n_samples):\n gauss[i] = torch.from_numpy(sample_gen.get_normal()).view(-1)\n else:\n if seed is not None:\n np.random.seed(seed)\n random.seed(seed)\n gauss = torch.from_numpy(np.random.normal(size=(n_samples, np.prod(sample_shape))))\n\n # Norm to 1\n norm = torch.linalg.norm(gauss, ord=2, dim=1)\n perturbation = (gauss / torch.unsqueeze(norm,1)) * radius\n perturbation = perturbation.view([n_samples] + sample_shape)\n return perturbation\n", "\"\"\"\nProvides classes to measure the distance between inputs.\n\nDistances\n---------\n\n.. autosummary::\n :nosignatures:\n\n MeanSquaredDistance\n MeanAbsoluteDistance\n Linfinity\n L0\n\nAliases\n-------\n\n.. autosummary::\n :nosignatures:\n\n MSE\n MAE\n Linf\n\nBase class\n----------\n\nTo implement a new distance, simply subclass the :class:`Distance` class and\nimplement the :meth:`_calculate` method.\n\n.. autosummary::\n :nosignatures:\n\n Distance\n\n\"\"\"\nfrom __future__ import division\nimport sys\nimport abc\nimport torch\nabstractmethod = abc.abstractmethod\n\nif sys.version_info >= (3, 4):\n ABC = abc.ABC\nelse: # pragma: no cover\n ABC = abc.ABCMeta('ABC', (), {})\n\nimport functools\nfrom numbers import Number\n\nfrom torch.nn import functional as F\nimport numpy as np\n\n\n\[email protected]_ordering\nclass Distance(ABC):\n \"\"\"Base class for distances.\n\n This class should be subclassed when implementing\n new distances. Subclasses must implement _calculate.\n\n \"\"\"\n\n def __init__(\n self,\n reference=None,\n other=None,\n bounds=None,\n value=None):\n\n if value is not None:\n # alternative constructor\n assert isinstance(value, Number)\n assert reference is None\n assert other is None\n assert bounds is None\n self.reference = None\n self.other = None\n self._bounds = None\n self._value = value\n self._gradient = None\n else:\n # standard constructor\n self.reference = reference\n self.other = other\n self._bounds = bounds\n self._value, self._gradient = self._calculate()\n\n assert self._value is not None\n\n @property\n def value(self):\n return self._value\n\n @property\n def gradient(self):\n return self._gradient\n\n @abstractmethod\n def _calculate(self):\n \"\"\"Returns distance and gradient of distance w.r.t. to self.other\"\"\"\n raise NotImplementedError\n\n def name(self):\n return self.__class__.__name__\n\n def __str__(self):\n return '{} = {:.6e}'.format(self.name(), self._value)\n\n def __repr__(self):\n return self.__str__()\n\n def __eq__(self, other):\n if other.__class__ != self.__class__:\n raise TypeError('Comparisons are only possible between the same distance types.')\n return self.value == other.value\n\n def __lt__(self, other):\n if other.__class__ != self.__class__:\n raise TypeError('Comparisons are only possible between the same distance types.')\n return self.value < other.value\n\n\nclass MeanSquaredDistance(Distance):\n \"\"\"Calculates the mean squared error between two inputs.\n\n \"\"\"\n\n def _calculate(self):\n min_, max_ = self._bounds\n n = self.reference.numel()\n f = n * (max_ - min_)**2\n\n diff = self.other - self.reference\n value = torch.dot(diff.view(-1), diff.view(-1)).item() / f\n\n # calculate the gradient only when needed\n self._g_diff = diff\n self._g_f = f\n gradient = None\n return value, gradient\n\n @property\n def gradient(self):\n if self._gradient is None:\n self._gradient = self._g_diff / (self._g_f / 2)\n return self._gradient\n\n def __str__(self):\n return 'normalized MSE = {:.2e}'.format(self._value)\n\n\nMSE = MeanSquaredDistance\n\n\nclass MeanAbsoluteDistance(Distance):\n \"\"\"Calculates the mean absolute error between two inputs.\n\n \"\"\"\n\n def _calculate(self):\n min_, max_ = self._bounds\n diff = (self.other - self.reference) / (max_ - min_)\n value = torch.mean(torch.abs(diff)).type(torch.float64)\n n = self.reference.size\n gradient = 1 / n * torch.sign(diff) / (max_ - min_)\n return value, gradient\n\n def __str__(self):\n return 'normalized MAE = {:.2e}'.format(self._value)\n\n\nMAE = MeanAbsoluteDistance\n\n\nclass Linfinity(Distance):\n \"\"\"Calculates the L-infinity norm of the difference between two inputs.\n\n \"\"\"\n\n def _calculate(self):\n min_, max_ = self._bounds\n diff = (self.other - self.reference) / (max_ - min_)\n value = torch.max(torch.abs(diff)).type(torch.float64)\n gradient = None\n return value, gradient\n\n @property\n def gradient(self):\n raise NotImplementedError\n\n def __str__(self):\n return 'normalized Linf distance = {:.2e}'.format(self._value)\n\n\nLinf = Linfinity\n\n\nclass L0(Distance):\n \"\"\"Calculates the L0 norm of the difference between two inputs.\n\n \"\"\"\n\n def _calculate(self):\n diff = self.other - self.reference\n value = torch.sum(diff != 0)\n gradient = None\n return value, gradient\n\n @property\n def gradient(self):\n raise NotImplementedError\n\n def __str__(self):\n return 'L0 distance = {}'.format(self._value)\n\n\n\n\n\"\"\"\nProvides classes that define what is adversarial.\n\nCriteria\n--------\n\nWe provide criteria for untargeted and targeted adversarial attacks.\n\n.. autosummary::\n :nosignatures:\n\n Misclassification\n TopKMisclassification\n OriginalClassProbability\n ConfidentMisclassification\n\n.. autosummary::\n :nosignatures:\n\n TargetClass\n TargetClassProbability\n\nExamples\n--------\n\nUntargeted criteria:\n\n>>> from foolbox.criteria import Misclassification\n>>> criterion1 = Misclassification()\n\n>>> from foolbox.criteria import TopKMisclassification\n>>> criterion2 = TopKMisclassification(k=5)\n\nTargeted criteria:\n\n>>> from foolbox.criteria import TargetClass\n>>> criterion3 = TargetClass(22)\n\n>>> from foolbox.criteria import TargetClassProbability\n>>> criterion4 = TargetClassProbability(22, p=0.99)\n\nCriteria can be combined to create a new criterion:\n\n>>> criterion5 = criterion2 & criterion3\n\n\"\"\"\n\n\n\nclass Criterion(ABC):\n \"\"\"Base class for criteria that define what is adversarial.\n\n The :class:`Criterion` class represents a criterion used to\n determine if predictions for an image are adversarial given\n a reference label. It should be subclassed when implementing\n new criteria. Subclasses must implement is_adversarial.\n\n \"\"\"\n\n def name(self):\n \"\"\"Returns a human readable name that uniquely identifies\n the criterion with its hyperparameters.\n\n Returns\n -------\n str\n Human readable name that uniquely identifies the criterion\n with its hyperparameters.\n\n Notes\n -----\n Defaults to the class name but subclasses can provide more\n descriptive names and must take hyperparameters into account.\n\n \"\"\"\n return self.__class__.__name__\n\n @abstractmethod\n def is_adversarial(self, predictions, label):\n \"\"\"Decides if predictions for an image are adversarial given\n a reference label.\n\n Parameters\n ----------\n predictions : :class:`numpy.ndarray`\n A vector with the pre-softmax predictions for some image.\n label : int\n The label of the unperturbed reference image.\n\n Returns\n -------\n bool\n True if an image with the given predictions is an adversarial\n example when the ground-truth class is given by label, False\n otherwise.\n\n \"\"\"\n raise NotImplementedError\n\n def __and__(self, other):\n return CombinedCriteria(self, other)\n\n\nclass CombinedCriteria(Criterion):\n \"\"\"Meta criterion that combines several criteria into a new one.\n\n Considers inputs as adversarial that are considered adversarial\n by all sub-criteria that are combined by this criterion.\n\n Instead of using this class directly, it is possible to combine\n criteria like this: criteria1 & criteria2\n\n Parameters\n ----------\n *criteria : variable length list of :class:`Criterion` instances\n List of sub-criteria that will be combined.\n\n Notes\n -----\n This class uses lazy evaluation of the criteria in the order they\n are passed to the constructor.\n\n \"\"\"\n\n def __init__(self, *criteria):\n super(CombinedCriteria, self).__init__()\n self._criteria = criteria\n\n def name(self):\n \"\"\"Concatenates the names of the given criteria in alphabetical order.\n\n If a sub-criterion is itself a combined criterion, its name is\n first split into the individual names and the names of the\n sub-sub criteria is used instead of the name of the sub-criterion.\n This is done recursively to ensure that the order and the hierarchy\n of the criteria does not influence the name.\n\n Returns\n -------\n str\n The alphabetically sorted names of the sub-criteria concatenated\n using double underscores between them.\n\n \"\"\"\n names = (criterion.name() for criterion in self._criteria)\n return '__'.join(sorted(names))\n\n def is_adversarial(self, predictions, label):\n for criterion in self._criteria:\n if not criterion.is_adversarial(predictions, label):\n # lazy evaluation\n return False\n return True\n\n\nclass Misclassification(Criterion):\n \"\"\"Defines adversarials as inputs for which the predicted class\n is not the original class.\n\n See Also\n --------\n :class:`TopKMisclassification`\n\n Notes\n -----\n Uses `numpy.argmax` to break ties.\n\n \"\"\"\n\n def name(self):\n return 'Top1Misclassification'\n\n def is_adversarial(self, predictions, label):\n top1 = torch.argmax(predictions).item()\n return top1 != label\n\n\nclass ConfidentMisclassification(Criterion):\n \"\"\"Defines adversarials as inputs for which the probability\n of any class other than the original is above a given threshold.\n\n Parameters\n ----------\n p : float\n The threshold probability. If the probability of any class\n other than the original is at least p, the image is\n considered an adversarial. It must satisfy 0 <= p <= 1.\n\n \"\"\"\n\n def __init__(self, p):\n super(ConfidentMisclassification, self).__init__()\n assert 0 <= p <= 1\n self.p = p\n\n def name(self):\n return '{}-{:.04f}'.format(self.__class__.__name__, self.p)\n\n def is_adversarial(self, predictions, label):\n top1 = torch.argmax(predictions)\n probabilities = F.softmax(predictions)\n return (torch.max(probabilities) >= self.p) and (top1 != label)\n\n\nclass TopKMisclassification(Criterion):\n \"\"\"Defines adversarials as inputs for which the original class is\n not one of the top k predicted classes.\n\n For k = 1, the :class:`Misclassification` class provides a more\n efficient implementation.\n\n Parameters\n ----------\n k : int\n Number of top predictions to which the reference label is\n compared to.\n\n See Also\n --------\n :class:`Misclassification` : Provides a more effcient implementation\n for k = 1.\n\n Notes\n -----\n Uses `numpy.argsort` to break ties.\n\n \"\"\"\n\n def __init__(self, k):\n super(TopKMisclassification, self).__init__()\n self.k = k\n\n def name(self):\n return 'Top{}Misclassification'.format(self.k)\n\n def is_adversarial(self, predictions, label):\n topk = torch.argsort(predictions)[-self.k:]\n return label not in topk\n\n\nclass TargetClass(Criterion):\n \"\"\"Defines adversarials as inputs for which the predicted class\n is the given target class.\n\n Parameters\n ----------\n target_class : int\n The target class that needs to be predicted for an image\n to be considered an adversarial.\n\n Notes\n -----\n Uses `numpy.argmax` to break ties.\n\n \"\"\"\n\n def __init__(self, target_class=None):\n super(TargetClass, self).__init__()\n self._target_class = target_class\n\n def target_class(self):\n return self._target_class\n\n def name(self):\n return '{}-{}'.format(self.__class__.__name__, self.target_class())\n\n def is_adversarial(self, predictions, label=None):\n top1 = torch.argmax(predictions,dim=-1).item()\n return top1 == self.target_class() # target class 其实是true label\n\n\nclass OriginalClassProbability(Criterion):\n \"\"\"Defines adversarials as inputs for which the probability\n of the original class is below a given threshold.\n\n This criterion alone does not guarantee that the class\n predicted for the adversarial image is not the original class\n (unless p < 1 / number of classes). Therefore, it should usually\n be combined with a classifcation criterion.\n\n Parameters\n ----------\n p : float\n The threshold probability. If the probability of the\n original class is below this threshold, the image is\n considered an adversarial. It must satisfy 0 <= p <= 1.\n\n \"\"\"\n\n def __init__(self, p):\n super(OriginalClassProbability, self).__init__()\n assert 0 <= p <= 1\n self.p = p\n\n def name(self):\n return '{}-{:.04f}'.format(self.__class__.__name__, self.p)\n\n def is_adversarial(self, predictions, label):\n probabilities = F.softmax(predictions)\n return probabilities[label] < self.p\n\n\nclass TargetClassProbability(Criterion):\n \"\"\"Defines adversarials as inputs for which the probability\n of a given target class is above a given threshold.\n\n If the threshold is below 0.5, this criterion does not guarantee\n that the class predicted for the adversarial image is not the\n original class. In that case, it should usually be combined with\n a classification criterion.\n\n Parameters\n ----------\n target_class : int\n The target class for which the predicted probability must\n be above the threshold probability p, otherwise the image\n is not considered an adversarial.\n p : float\n The threshold probability. If the probability of the\n target class is above this threshold, the image is\n considered an adversarial. It must satisfy 0 <= p <= 1.\n\n \"\"\"\n\n def __init__(self, target_class, p):\n super(TargetClassProbability, self).__init__()\n self._target_class = target_class\n assert 0 <= p <= 1\n self.p = p\n\n def target_class(self):\n return self._target_class\n\n def name(self):\n return '{}-{}-{:.04f}'.format(\n self.__class__.__name__, self.target_class(), self.p)\n\n def is_adversarial(self, predictions, label):\n probabilities = softmax(predictions)\n return probabilities[self.target_class()] > self.p\n" ]
[ [ "torch.FloatTensor", "torch.nn.Linear", "torch.no_grad", "torch.load" ], [ "torch.norm", "numpy.random.seed", "torch.unsqueeze", "torch.linalg.norm", "numpy.prod", "torch.argsort" ], [ "torch.abs", "torch.nn.functional.softmax", "torch.max", "torch.sign", "torch.sum", "torch.argsort", "torch.argmax" ] ]
miksu/edward2
[ "973acdb23701f320ebaee8a56fc44d4414acfa4e", "973acdb23701f320ebaee8a56fc44d4414acfa4e" ]
[ "edward2/tensorflow/initializers.py", "baselines/cifar10/variational_inference.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Initializers.\n\nThis module extends `tf.keras.initializers` with the notion of \"trainable\ninitializers\", where initializers to weights and biases in `tf.keras.layers` may\nthemselves carry parameters. For example, consider a weight initializer which\nreturns a variational distribution: this is reified as an `ed.RandomVariable`\nparameterized by `tf.Variables`.\n\nOne subtlety is how `tf.keras.constraints` are used on the parameters of\ntrainable initializers. Typically, Keras constraints are used with projected\ngradient descent, where one performs unconstrained optimization and then applies\na projection (the constraint) after each gradient update. To stay in line with\nprobabilistic literature, trainable initializers apply constraints on the\n`tf.Variables` themselves (i.e., a constrained parameterization) and do not\napply projections during optimization.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nfrom edward2.tensorflow import constraints\nfrom edward2.tensorflow import generated_random_variables\nfrom edward2.tensorflow import regularizers\nimport six\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\n\n# From `tensorflow/python/ops/init_ops.py`\ndef _compute_fans(shape):\n \"\"\"Computes the number of input and output units for a weight shape.\n\n Args:\n shape: Integer shape tuple or TF tensor shape.\n\n Returns:\n A tuple of scalars (fan_in, fan_out).\n \"\"\"\n if len(shape) < 1: # Just to avoid errors for constants.\n fan_in = fan_out = 1\n elif len(shape) == 1:\n fan_in = fan_out = shape[0]\n elif len(shape) == 2:\n fan_in = shape[0]\n fan_out = shape[1]\n else:\n # Assuming convolution kernels (2D, 3D, or more).\n # kernel shape: (..., input_depth, depth)\n receptive_field_size = 1.\n for dim in shape[:-2]:\n receptive_field_size *= dim\n fan_in = shape[-2] * receptive_field_size\n fan_out = shape[-1] * receptive_field_size\n if isinstance(fan_in, tf1.Dimension):\n fan_in = fan_in.value\n if isinstance(fan_out, tf1.Dimension):\n fan_out = fan_out.value\n return fan_in, fan_out\n\n\nclass ScaledNormalStdDev(tf.keras.initializers.VarianceScaling):\n \"\"\"Initializer capable of adapting its scale to the shape of weights tensors.\n\n This initializes the standard deviation parameter of a Trainable Normal\n distribution with a scale based on the shape of the weights tensor.\n Additionally, A small amount of noise will be added to break weigh symmetry.\n\n With `distribution=\"truncated_normal\" or \"untruncated_normal\"`, the standard\n deviation (after truncation, if used) is `stddev = sqrt(scale / n)`, where n\n is:\n - number of input units in the weight tensor, if mode = \"fan_in\"\n - number of output units, if mode = \"fan_out\"\n - average of the numbers of input and output units, if mode = \"fan_avg\"\n \"\"\"\n\n def __init__(self,\n scale=1.0,\n mode='fan_in',\n distribution='untruncated_normal',\n seed=None):\n \"\"\"Constructs the initializer.\n\n Args:\n scale: Scaling factor (positive float).\n mode: One of \"fan_in\", \"fan_out\", \"fan_avg\".\n distribution: Random distribution to use. One of \"truncated_normal\", or\n \"untruncated_normal\".\n seed: A Python integer. Used to create random seeds. See\n `tf.set_random_seed`\n for behavior.\n\n Raises:\n ValueError: In case of an invalid value for the \"scale\", mode\" or\n \"distribution\" arguments.\n \"\"\"\n distribution = distribution.lower()\n if distribution not in {'truncated_normal', 'untruncated_normal'}:\n raise ValueError('Invalid `distribution` argument:', distribution)\n super(ScaledNormalStdDev, self).__init__(scale=scale, mode=mode,\n distribution=distribution,\n seed=seed)\n\n def __call__(self, shape, dtype=None):\n if dtype is None:\n dtype = self.dtype\n scale = self.scale\n scale_shape = shape\n fan_in, fan_out = _compute_fans(scale_shape)\n if self.mode == 'fan_in':\n scale /= max(1., fan_in)\n elif self.mode == 'fan_out':\n scale /= max(1., fan_out)\n else:\n scale /= max(1., (fan_in + fan_out) / 2.)\n if self.distribution == 'truncated_normal':\n # constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)\n stddev = math.sqrt(scale) / .87962566103423978\n else: # self.distribution == 'untruncated_normal':\n stddev = math.sqrt(scale)\n return tf.random.truncated_normal(shape, mean=stddev, stddev=stddev*0.1,\n dtype=dtype, seed=self.seed)\n\n\nclass TrainableHalfCauchy(tf.keras.layers.Layer):\n \"\"\"Half-Cauchy distribution initializer with trainable parameters.\"\"\"\n\n def __init__(self,\n loc_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=1e-5),\n scale_initializer=tf.keras.initializers.TruncatedNormal(\n mean=-3., stddev=0.1),\n loc_regularizer=None,\n scale_regularizer=None,\n loc_constraint=None,\n scale_constraint='softplus',\n seed=None,\n **kwargs):\n \"\"\"Constructs the initializer.\"\"\"\n super(TrainableHalfCauchy, self).__init__(**kwargs)\n self.loc_initializer = get(loc_initializer)\n self.scale_initializer = get(scale_initializer)\n self.loc_regularizer = regularizers.get(loc_regularizer)\n self.scale_regularizer = regularizers.get(scale_regularizer)\n self.loc_constraint = constraints.get(loc_constraint)\n self.scale_constraint = constraints.get(scale_constraint)\n self.seed = seed\n\n def build(self, shape, dtype=None):\n if dtype is None:\n dtype = self.dtype\n\n self.loc = self.add_weight(\n 'loc',\n shape=shape,\n initializer=self.loc_initializer,\n regularizer=self.loc_regularizer,\n constraint=None,\n dtype=dtype,\n trainable=True)\n self.scale = self.add_weight(\n 'scale',\n shape=shape,\n initializer=self.scale_initializer,\n regularizer=self.scale_regularizer,\n constraint=None,\n dtype=dtype,\n trainable=True)\n self.built = True\n\n def __call__(self, shape, dtype=None):\n if not self.built:\n self.build(shape, dtype)\n loc = self.loc\n if self.loc_constraint:\n loc = self.loc_constraint(loc)\n scale = self.scale\n if self.scale_constraint:\n scale = self.scale_constraint(scale)\n return generated_random_variables.Independent(\n generated_random_variables.HalfCauchy(loc=loc,\n scale=scale).distribution,\n reinterpreted_batch_ndims=len(shape))\n\n def get_config(self):\n return {\n 'loc_initializer':\n serialize(self.loc_initializer),\n 'scale_initializer':\n serialize(self.scale_initializer),\n 'loc_regularizer':\n regularizers.serialize(self.loc_regularizer),\n 'scale_regularizer':\n regularizers.serialize(self.scale_regularizer),\n 'loc_constraint':\n constraints.serialize(self.loc_constraint),\n 'scale_constraint':\n constraints.serialize(self.scale_constraint),\n 'seed': self.seed,\n }\n\n\nclass TrainableNormal(tf.keras.layers.Layer):\n \"\"\"Random normal op as an initializer with trainable mean and stddev.\"\"\"\n\n def __init__(self,\n mean_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=1e-5),\n stddev_initializer=tf.keras.initializers.TruncatedNormal(\n mean=-3., stddev=0.1),\n mean_regularizer=None,\n stddev_regularizer=None,\n mean_constraint=None,\n stddev_constraint='softplus',\n seed=None,\n **kwargs):\n \"\"\"Constructs the initializer.\"\"\"\n super(TrainableNormal, self).__init__(**kwargs)\n self.mean_initializer = get(mean_initializer)\n self.stddev_initializer = get(stddev_initializer)\n self.mean_regularizer = regularizers.get(mean_regularizer)\n self.stddev_regularizer = regularizers.get(stddev_regularizer)\n self.mean_constraint = constraints.get(mean_constraint)\n self.stddev_constraint = constraints.get(stddev_constraint)\n self.seed = seed\n\n def build(self, shape, dtype=None):\n if dtype is None:\n dtype = self.dtype\n\n self.mean = self.add_weight(\n 'mean',\n shape=shape,\n initializer=self.mean_initializer,\n regularizer=self.mean_regularizer,\n constraint=None,\n dtype=dtype,\n trainable=True)\n self.stddev = self.add_weight(\n 'stddev',\n shape=shape,\n initializer=self.stddev_initializer,\n regularizer=self.stddev_regularizer,\n constraint=None,\n dtype=dtype,\n trainable=True)\n self.built = True\n\n def __call__(self, shape, dtype=None):\n if not self.built:\n self.build(shape, dtype)\n mean = self.mean\n if self.mean_constraint:\n mean = self.mean_constraint(mean)\n stddev = self.stddev\n if self.stddev_constraint:\n stddev = self.stddev_constraint(stddev)\n return generated_random_variables.Independent(\n generated_random_variables.Normal(loc=mean, scale=stddev).distribution,\n reinterpreted_batch_ndims=len(shape))\n\n def get_config(self):\n return {\n 'mean_initializer':\n serialize(self.mean_initializer),\n 'stddev_initializer':\n serialize(self.stddev_initializer),\n 'mean_regularizer':\n regularizers.serialize(self.mean_regularizer),\n 'stddev_regularizer':\n regularizers.serialize(self.stddev_regularizer),\n 'mean_constraint':\n constraints.serialize(self.mean_constraint),\n 'stddev_constraint':\n constraints.serialize(self.stddev_constraint),\n 'seed': self.seed,\n }\n\n\nclass TrainableHeNormal(TrainableNormal):\n \"\"\"Trainable normal initialized per He et al. 2015, given a ReLU nonlinearity.\n\n The distribution is initialized to a Normal scaled by `sqrt(2 / fan_in)`,\n where `fan_in` is the number of input units. A ReLU nonlinearity is assumed\n for this initialization scheme.\n\n References:\n He K, Zhang X, Ren S, Sun J. Delving deep into rectifiers: Surpassing\n human-level performance on imagenet classification. In Proceedings of the\n IEEE international conference on computer vision 2015 (pp. 1026-1034).\n https://arxiv.org/abs/1502.01852\n \"\"\"\n\n def __init__(self, seed=None):\n super(TrainableHeNormal, self).__init__(\n mean_initializer=tf.keras.initializers.he_normal(seed),\n seed=seed)\n\n def get_config(self):\n return {\n 'seed': self.seed,\n }\n\n\nclass TrainableGlorotNormal(TrainableNormal):\n \"\"\"Trainable normal initialized per Glorot and Bengio, 2010.\n\n The distribution is initialized to a Normal scaled by `sqrt(2 / fan_in +\n fan_out)`, where `fan_in` is the number of input units and `fan_out` is the\n number of output units.\n\n References:\n Glorot X, Bengio Y. Understanding the difficulty of training deep\n feedforward neural networks. In Proceedings of the thirteenth international\n conference on artificial intelligence and statistics 2010 Mar 31 (pp.\n 249-256). http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf\n \"\"\"\n\n def __init__(self, seed=None):\n super(TrainableGlorotNormal, self).__init__(\n mean_initializer=tf.keras.initializers.GlorotNormal(seed),\n seed=seed)\n\n def get_config(self):\n return {\n 'seed': self.seed,\n }\n\n\nclass RandomSign(tf.keras.initializers.Initializer):\n \"\"\"Initializer that generates tensors initialized to +/- 1.\n\n Attributes:\n probs: probability of +1.\n dtype: tensorflow dtype.\n seed: A Python integer. Used to create random seeds. See\n `tf.set_random_seed`\n \"\"\"\n\n def __init__(self, probs=1.0, seed=None, dtype=tf.float32):\n self.probs = probs\n self.seed = seed\n self.dtype = dtype\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n bernoulli = tfp.distributions.Bernoulli(probs=self.probs,\n dtype=dtype)\n return 2. * bernoulli.sample(shape, self.seed) - 1.\n\n def get_config(self):\n return {\n 'dtype': self.dtype.name,\n 'seed': self.seed,\n 'probs': self.probs\n }\n\n\n# Compatibility aliases, following tf.keras\n\n# pylint: disable=invalid-name\nscaled_normal_std_dev = ScaledNormalStdDev\ntrainable_half_cauchy = TrainableHalfCauchy\ntrainable_normal = TrainableNormal\ntrainable_he_normal = TrainableHeNormal\ntrainable_glorot_normal = TrainableGlorotNormal\nrandom_sign = RandomSign\n# pylint: enable=invalid-name\n\n# Utility functions, following tf.keras\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(config, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n config,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name='initializers')\n\n\ndef get(identifier, value=None):\n \"\"\"Getter for loading from strings; falls back to Keras as needed.\"\"\"\n if value is None:\n value = identifier\n if identifier is None:\n return None\n elif isinstance(identifier, dict):\n try:\n return deserialize(identifier)\n except ValueError:\n pass\n elif isinstance(identifier, six.string_types):\n config = {'class_name': str(identifier), 'config': {}}\n try:\n return deserialize(config)\n except ValueError:\n pass\n elif callable(identifier):\n return identifier\n return tf.keras.initializers.get(value)\n", "# coding=utf-8\n# Copyright 2019 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Variational inference for ResNet-20 on CIFAR-10.\n\nThis script performs variational inference with a few notable techniques:\n\n1. Normal prior whose mean is tied at the variational posterior's. This makes\n the KL penalty only penalize the weight posterior's standard deviation and\n not its mean. The prior's standard deviation is a fixed hyperparameter.\n2. Fully factorized normal variational distribution (Blundell et al., 2015).\n3. Flipout for lower-variance gradients in convolutional layers (Wen et al.,\n 2018) but only applied to the second convolution in each residual block\n (Ovadia et al., 2019).\n4. Variational dropout (local reparameterization) for lower-variance gradients\n in dense layers (Kingma et al., 2015).\n5. Replace batch normalization with SELU activation and fixup initialization\n (Ovadia et al., 2019; Heek and Kalchbrenner, 2019).\n6. Learning rate schedule from [Keras' CIFAR-10 example](\n https://keras.io/examples/cifar10_resnet/).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport edward2 as ed\nfrom edward2.baselines.cifar10 import utils\n\nfrom six.moves import range\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\n\nflags.DEFINE_integer('seed', 42, 'Random seed.')\nflags.DEFINE_string('output_dir', None, 'Output directory.')\nflags.DEFINE_integer('train_epochs', 200, 'Number of training epochs.')\n# TODO(trandustin): batch size, init_learning_rate, and prior_stddev are from\n# Ovadia et al.(2019). Tune this given changes.\nflags.DEFINE_integer('batch_size', 107, 'Batch size.')\nflags.DEFINE_float('init_learning_rate', 0.001189, 'Learning rate.')\nflags.DEFINE_float('prior_stddev', 0.127, 'Fixed stddev for weight prior.')\nflags.DEFINE_boolean('batch_norm', False, 'Whether to apply batchnorm.')\nFLAGS = flags.FLAGS\n\n\nclass NormalKLDivergenceWithTiedMean(tf.keras.regularizers.Regularizer):\n \"\"\"KL with normal prior whose mean is fixed at the variational posterior's.\"\"\"\n\n def __init__(self, stddev=1., scale_factor=1.):\n \"\"\"Constructs regularizer.\"\"\"\n self.stddev = stddev\n self.scale_factor = scale_factor\n\n def __call__(self, x):\n \"\"\"Computes regularization given an ed.Normal random variable as input.\"\"\"\n if not isinstance(x, ed.RandomVariable):\n raise ValueError('Input must be an ed.RandomVariable.')\n prior = ed.Independent(\n ed.Normal(loc=x.distribution.mean(), scale=self.stddev).distribution,\n reinterpreted_batch_ndims=len(x.distribution.event_shape))\n regularization = x.distribution.kl_divergence(prior.distribution)\n return self.scale_factor * regularization\n\n def get_config(self):\n return {\n 'stddev': self.stddev,\n 'scale_factor': self.scale_factor,\n }\n\n\ndef resnet_layer(inputs,\n filters,\n kernel_size=3,\n strides=1,\n activation=None,\n depth=20,\n batch_norm=True,\n bayesian=False,\n prior_stddev=1.,\n dataset_size=None):\n \"\"\"2D Convolution-Batch Normalization-Activation stack builder.\n\n Args:\n inputs: tf.Tensor.\n filters: Number of filters for Conv2D.\n kernel_size: Kernel dimensions for Conv2D.\n strides: Stride dimensinons for Conv2D.\n activation: tf.keras.activations.Activation.\n depth: ResNet depth.\n batch_norm: Whether to apply batch normalization.\n bayesian: Whether to apply Bayesian layers.\n prior_stddev: Standard deviation of weight priors.\n dataset_size: Total number of examples in an epoch.\n\n Returns:\n tf.Tensor.\n \"\"\"\n if bayesian:\n def fixup_init(shape, dtype=None):\n \"\"\"Simplified form of fixup initialization (Zhang et al., 2019).\"\"\"\n return (tf.keras.initializers.he_normal()(shape, dtype=dtype) *\n depth**(-1/4))\n conv = ed.layers.Conv2DVariationalDropout(\n filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer=ed.initializers.TrainableNormal(\n mean_initializer=fixup_init),\n kernel_regularizer=NormalKLDivergenceWithTiedMean(\n stddev=prior_stddev, scale_factor=1./dataset_size))\n else:\n conv = tf.keras.layers.Conv2D(\n filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=tf.keras.regularizers.l2(1e-4))\n\n x = inputs\n logging.info('Applying conv layer.')\n x = conv(x)\n if batch_norm:\n x = tf.keras.layers.BatchNormalization()(x)\n if activation is not None:\n x = tf.keras.layers.Activation(activation)(x)\n return x\n\n\ndef resnet_v1(input_shape,\n depth,\n num_classes,\n batch_norm,\n prior_stddev,\n dataset_size):\n \"\"\"Builds ResNet v1.\n\n Args:\n input_shape: tf.Tensor.\n depth: ResNet depth.\n num_classes: Number of output classes.\n batch_norm: Whether to apply batch normalization.\n prior_stddev: Standard deviation of weight priors.\n dataset_size: Total number of examples in an epoch.\n\n Returns:\n tf.keras.Model.\n \"\"\"\n num_res_blocks = (depth - 2) // 6\n filters = 16\n if (depth - 2) % 6 != 0:\n raise ValueError('depth must be 6n+2 (e.g. 20, 32, 44).')\n\n layer = functools.partial(resnet_layer,\n depth=depth,\n dataset_size=dataset_size,\n prior_stddev=prior_stddev)\n\n logging.info('Starting ResNet build.')\n inputs = tf.keras.layers.Input(shape=input_shape)\n x = layer(inputs,\n filters=filters,\n activation='selu')\n for stack in range(3):\n for res_block in range(num_res_blocks):\n logging.info('Starting ResNet stack #%d block #%d.', stack, res_block)\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = layer(x,\n filters=filters,\n strides=strides,\n activation='selu',\n batch_norm=batch_norm)\n y = layer(y,\n filters=filters,\n activation=None,\n batch_norm=batch_norm,\n bayesian=True)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match changed dims\n x = layer(x,\n filters=filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_norm=False)\n x = tf.keras.layers.add([x, y])\n x = tf.keras.layers.Activation('selu')(x)\n filters *= 2\n\n # v1 does not use BN after last shortcut connection-ReLU\n x = tf.keras.layers.AveragePooling2D(pool_size=8)(x)\n x = tf.keras.layers.Flatten()(x)\n x = ed.layers.DenseVariationalDropout(\n num_classes,\n kernel_initializer='trainable_he_normal',\n kernel_regularizer=NormalKLDivergenceWithTiedMean(\n stddev=prior_stddev,\n scale_factor=1./dataset_size))(x)\n\n outputs = tf.keras.layers.Lambda(\n lambda inputs: ed.Categorical(logits=inputs))(x)\n return tf.keras.models.Model(inputs=inputs, outputs=outputs)\n\n\ndef get_metrics(model, dataset_size):\n \"\"\"Get metrics for the model.\"\"\"\n\n def negative_log_likelihood(y_true, y_pred):\n y_true = tf.squeeze(y_true)\n return -y_pred.distribution.log_prob(y_true)\n\n def accuracy(y_true, y_pred):\n \"\"\"Accuracy.\"\"\"\n del y_pred # unused arg\n y_true = tf.squeeze(y_true)\n return tf.equal(tf.argmax(input=model.output.distribution.logits, axis=1),\n tf.cast(y_true, tf.int64))\n\n def log_marginal(y_true, y_pred):\n \"\"\"Log-marginal likelihood.\"\"\"\n del y_pred # unused arg\n y_true = tf.squeeze(y_true)\n return model.output.distribution.log_prob(y_true)\n\n def kl(y_true, y_pred):\n \"\"\"KL divergence.\"\"\"\n del y_true, y_pred # unused arg\n return sum(model.losses) * dataset_size\n\n def elbo(y_true, y_pred):\n \"\"\"Evidence lower bound.\"\"\"\n return log_marginal(y_true, y_pred) * dataset_size - kl(y_true, y_pred)\n\n return negative_log_likelihood, accuracy, log_marginal, kl, elbo\n\n\ndef main(argv):\n del argv # unused arg\n tf.io.gfile.makedirs(FLAGS.output_dir)\n tf.random.set_seed(FLAGS.seed)\n\n dataset_size = 40000\n dataset_train, ds_info = utils.load_dataset(tfds.Split.TRAIN, with_info=True)\n dataset_test = utils.load_dataset(tfds.Split.TEST)\n dataset_train = dataset_train.repeat().shuffle(10 * FLAGS.batch_size).batch(\n FLAGS.batch_size)\n validation_steps = 100\n dataset_test = dataset_test.take(FLAGS.batch_size * validation_steps).repeat(\n ).batch(FLAGS.batch_size)\n\n model = resnet_v1(input_shape=ds_info.features['image'].shape,\n depth=20,\n num_classes=ds_info.features['label'].num_classes,\n batch_norm=FLAGS.batch_norm,\n prior_stddev=FLAGS.prior_stddev,\n dataset_size=dataset_size)\n negative_log_likelihood, accuracy, log_marginal, kl, elbo = get_metrics(\n model, dataset_size)\n\n model.compile(tf.keras.optimizers.Adam(FLAGS.init_learning_rate),\n loss=negative_log_likelihood,\n metrics=[elbo, log_marginal, kl, accuracy])\n logging.info('Model input shape: %s', model.input_shape)\n logging.info('Model output shape: %s', model.output_shape)\n logging.info('Model number of weights: %s', model.count_params())\n\n tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir=FLAGS.output_dir,\n write_graph=False)\n lr_scheduler = utils.make_lr_scheduler(FLAGS.init_learning_rate)\n model.fit(dataset_train,\n steps_per_epoch=dataset_size // FLAGS.batch_size,\n epochs=FLAGS.train_epochs,\n validation_data=dataset_test,\n validation_steps=validation_steps,\n callbacks=[tensorboard_cb, lr_scheduler])\n\n logging.info('Saving model to output_dir.')\n model_filename = FLAGS.output_dir + '/model.ckpt'\n model.save_weights(model_filename)\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "tensorflow.compat.v2.keras.initializers.get", "tensorflow.compat.v2.keras.initializers.he_normal", "tensorflow.compat.v2.random.truncated_normal", "tensorflow.compat.v2.keras.initializers.TruncatedNormal", "tensorflow.compat.v2.keras.initializers.GlorotNormal", "tensorflow.compat.v2.keras.utils.serialize_keras_object" ], [ "tensorflow.compat.v2.keras.layers.AveragePooling2D", "tensorflow.compat.v2.keras.layers.Flatten", "tensorflow.compat.v2.keras.models.Model", "tensorflow.compat.v2.keras.layers.BatchNormalization", "tensorflow.compat.v2.keras.initializers.he_normal", "tensorflow.compat.v2.cast", "tensorflow.compat.v2.io.gfile.makedirs", "tensorflow.compat.v2.keras.optimizers.Adam", "tensorflow.compat.v2.random.set_seed", "tensorflow.compat.v2.keras.layers.add", "tensorflow.compat.v2.squeeze", "tensorflow.compat.v2.keras.layers.Input", "tensorflow.compat.v2.argmax", "tensorflow.compat.v2.keras.layers.Activation", "tensorflow.compat.v2.keras.regularizers.l2", "tensorflow.compat.v2.keras.callbacks.TensorBoard" ] ]
3778/icd-prediction-mimic
[ "fb8dfc3140e6cf690690b04eddc735f4f20612cf" ]
[ "MIMIC_train_baselines.py" ]
[ "# Copyright 2020, 37.78 Tecnologia Ltda.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n## Train Logistic Regression and Constant models\n\nimport argparse\nimport tensorflow as tf\n\nfrom constants import DATA_DIR, SAVE_DIR\nimport datasets\nimport feature_extraction as fx\nimport model_functions as fun\nimport utils\n\ndef main(args):\n\n save_path = SAVE_DIR + args.MODEL_NAME\n\n # Clear session\n tf.keras.backend.clear_session()\n\n # Load data and embeddings\n mimic = datasets.MIMIC_Dataset()\n mimic.load_preprocessed()\n mimic.split()\n\n # Get model class\n model = utils.get_model(args)\n\n if args.MODEL_NAME == 'lr':\n tfidf = fx.TFIDF(args)\n tfidf.fit(mimic)\n tfidf.transform(mimic)\n\n # Instantiate callback\n f1_callback = fun.f1_callback_save(model, validation_data=(tfidf.x_val, mimic.y_val),\n best_name= save_path)\n\n callbacks = [f1_callback] \n\n \n # Fit\n model.fit(tfidf.x_train, mimic.y_train, validation_data=(tfidf.x_val, mimic.y_val), callbacks=callbacks)\n\n\n # Save model state after last epoch\n if args.save_last_epoch:\n model.save_model(f'{save_path}ep{args.epochs}')\n\n # Restore weights from the best epoch based on F1 val with optimized threshold\n model = utils.get_model(args, load_path = save_path)\n\n # Predict\n y_pred_train = model.predict(tfidf.x_train)\n y_pred_val = model.predict(tfidf.x_val)\n y_pred_test = model.predict(tfidf.x_test)\n\n\n exp = fun.Experiments(y_true = [mimic.y_train, mimic.y_val, mimic.y_test],\n y_pred = [y_pred_train, y_pred_val, y_pred_test])\n\n # Compute best threshold\n exp.sweep_thresholds(subset=[0,1,0])\n\n print(f'''\n Metrics @ {exp.sweep_results['best_threshold']}''')\n # Compute metrics @ best threshold\n exp.metrics(threshold=exp.sweep_results['best_threshold']) \n\n\n elif args.MODEL_NAME == 'cte':\n\n # model.fit(mimic.y_train, most_occ_train=mimic.all_icds_train)\n model.fit(most_occ_train=mimic.all_icds_train) \n\n # Predict\n y_pred_train = model.predict(mimic.x_train, mlb=mimic.mlb)\n y_pred_val = model.predict(mimic.x_val, mlb=mimic.mlb)\n y_pred_test = model.predict(mimic.x_test, mlb=mimic.mlb)\n\n exp = fun.Experiments(y_true = [mimic.y_train, mimic.y_val, mimic.y_test],\n y_pred = [y_pred_train, y_pred_val, y_pred_test])\n\n print(f\"\"\"\n Metrics @ {args.k}\"\"\")\n # Compute metrics @ k\n exp.metrics(k=args.k) \n\n\n\ndef arg_parser():\n\n parser = argparse.ArgumentParser(description='Train model for MIMIC-III dataset and compute metrics.')\n parser.add_argument('-model', type=str, dest='MODEL_NAME', choices=['lr', 'cte'], default = 'lr',help='Model for training.')\n parser.add_argument('-epochs', type=int, dest='epochs', default=10, help='Number of epochs.')\n parser.add_argument('-tfidf_maxfeatures', type=int, dest='max_features', default=20000, help='Max features for TF-IDF.')\n parser.add_argument('-batch_size', type=int, dest='batch_size', default=32, help='Batch Size.')\n parser.add_argument('-lr', type=float, dest='lr', default=0, help='Learning Rate. 0 for article optimized value.')\n parser.add_argument('-k', type=int, dest='k', default=15, help='Fixed k-size of predictions for Constant Model.')\n parser.add_argument('-save_last_epoch', type=bool, dest='save_last_epoch', default=False, help='Also save model state at last epoch (additionally to best epoch)')\n parser.add_argument('--verbose', type=int, dest='verbose', default=2, help='Verbose when training.')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n\n args = arg_parser()\n\n main(args)" ]
[ [ "tensorflow.keras.backend.clear_session" ] ]
rosydavis/rdavis_msee_project_csun2017
[ "d23159d19b5b3ea47ddd4a0f9684477346560fc2" ]
[ "move_images.py" ]
[ "# File: move_images.py\n# Author: Rosy Davis, [email protected]\n# Last modified: 2017 Nov. 28\n#\n# A utility script to copy DWT images from a folder that keeps them placed by file name\n# (as is true of the source MP3s in the FMA dataset) to folders that split them by dataset\n# split (test, train, val) and genre (folk, hip-hop, et cetera). \n#\n# Note that this does not move the source files, but instead copies them. Wavelet image\n# files are small, and this ensures that the source images remain in place so they can be\n# reused. For example, for the FMA dataset, which has three differently-sized subsets, any\n# training image in the \"small\" dataset will also appear as a training image in the\n# \"large\" dataset. By copying instead of moving, the source image will remain at the path\n# equivalent to the path for the source audio, and can be reused if it is desirable to \n# work with both the small and the large datasets.\n\n# Parse passed-in arguments:\nimport argparse\n\n# File system utilities:\nimport os\nimport shutil\n\n# Used for error checking:\nimport numpy as np\n\n# FMA dataset utilities\nimport fma.utils as fma_utils # Utilities provided for loading and manipulating the\n\t\t\t\t\t\t\t\t\t # Free Music Archive dataset.\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input_dir\", \n\t\t\t\t\thelp = \"Directory of images currently stored at FMA-style paths.\")\nparser.add_argument(\"output_dir\", \n\t\t\t\t\thelp = \"Directory of images to be saved in a by-class hierarchy.\")\nparser.add_argument(\"-z\", \"--size\", \n\t\t\t\t\thelp = \"Specify the dataset size to use\",\n\t\t\t\t\tchoices = [\"small\", \"medium\", \"large\"])\nparser.add_argument(\"-s\", \"--split\", \n\t\t\t\t\thelp = \"Specify the split to use\",\n\t\t\t\t\tchoices = [\"training\", \"validation\", \"test\"])\nparser.add_argument(\"-w\", \"--wavelet\", \n\t\t\t\t\thelp = \"Specify the wavelet type to use\",\n\t\t\t\t\tchoices = [\"dwt\", \"cwt\"])\n\n# By default, generate training data for small dataset:\nrequested_subset = \"small\"\nrequested_split = \"training\"\nrequested_wavelet = \"dwt\"\n\n# Override as necessary from arguments:\nargs = parser.parse_args()\ninput_dir = os.path.join(args.input_dir, '')\noutput_dir = os.path.join(args.output_dir, '')\nif args.size:\n\trequested_subset = args.size\nif args.split:\n\trequested_split = args.split\nif args.wavelet:\n\trequested_wavelet = args.wavelet\n\nif requested_split == \"training\":\n\trequested_split_path = \"train\"\nelif requested_split == \"validation\":\n\trequested_split_path = \"validation\"\nelif requested_split == \"test\":\n\trequested_split_path = \"test\"\n\n\n\n\n\n# Load the metadata files\ntracks = fma_utils.load(input_dir + 'tracks.csv')\nfeatures = fma_utils.load(input_dir + 'features.csv')\n\n# Make sure everything in features is in tracks and vice versa\nnp.testing.assert_array_equal(features.index, tracks.index)\n\n# Use the specified data subset:\nsubset = tracks['set', 'subset'] <= requested_subset\nsplit = tracks['set', 'split'] == requested_split\nrel_track_ids = tracks.loc[subset & split].index\n\ny_values = tracks.loc[subset & split, ('track', 'genre_top')]\nunique_genres = y_values.unique().categories\n\n\n\n\n\n\n# Copy files:\nfor track_id in rel_track_ids:\n\ttry:\n\t\ty_str = y_values.loc[track_id].lower()\n\texcept:\n\t\t# print(\"Skipping {}; bad genre...\".format(track_id))\n\t\tcontinue\n\t\n\ttrackstr = \"{:06d}\".format(track_id)\n\n\ttry:\n\t\tcurr_path = os.path.join(input_dir, \n\t\t\t\t\t\tos.path.join(requested_wavelet,\n\t\t\t\t\t\t\tos.path.join(\"noframe\",\n\t\t\t\t\t\t\t\tos.path.join(trackstr[0:3],\n\t\t\t\t\t\t\t\t\t\t\t \"{}_small.png\".format(trackstr)))))\n\t\tassert(os.path.isfile(curr_path))\n\texcept:\n\t\t# print(\"Skipping {}; file '{}' not found...\".format(track_id, curr_path))\n\t\tcontinue\n\t# print(curr_path) \n\t\n\tnew_path = os.path.join(output_dir, \n\t\t\t\t\tos.path.join(\"byclass\",\n\t\t\t\t\t\t os.path.join(requested_subset,\n\t\t\t\t\t\t\t os.path.join(requested_wavelet,\n\t\t\t\t\t\t\t\t os.path.join(requested_split_path, \n\t\t\t\t\t\t\t\t\t os.path.join(y_str, \"{}.png\".format(trackstr)))))))\n\t# print(new_path) \n\t\n\tdirectory = os.path.dirname(new_path)\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\n\tshutil.copyfile(curr_path, new_path)\n\t\t" ]
[ [ "numpy.testing.assert_array_equal" ] ]
ari-s/XpyY
[ "384500b8112a4475f2df3e736f324ab8724f66c4" ]
[ "inputfilter/csv.py" ]
[ "import numpy,csv\n\ndef csv(infile,delimiter=','):\n '''reads csv with arbitrary delimiter, returns numpy array of strings'''\n with open(infile) as f:\n rv = [ l.strip().split(delimiter) for l in f\n if l.strip() # no empty lines\n and not l.startswith('#') # no comments\n ]\n width = max(map(len,rv)) # make array rectangular\n for l in rv:\n for i in range(len(l),width):\n l.append('')\n return numpy.array(rv).transpose()\n\ndef tsv(infile):\n return csv(infile,'\\t')\n" ]
[ [ "numpy.array" ] ]
jaideepmurkute/Active-Learning---Supervised-Machine-Learning-With-Minimal-Data
[ "ba3f4e471b0a01d87848f5153f2d9f79c0eff6b1" ]
[ "mnist_fashion_lc.py" ]
[ "import sys\r\nimport os\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.model_selection import train_test_split\r\n#For sample ranking function from https://github.com/davefernig/alp\r\nfrom active_learning.active_learning import ActiveLearner\r\nfrom keras.datasets import fashion_mnist\r\n\r\nfrom collections import Counter\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom copy import deepcopy\r\n\r\n\r\ndef main():\r\n classifier_random=[LogisticRegression(solver='lbfgs',multi_class='multinomial',max_iter=1000) for i in range(10)]\r\n classifier_active = [LogisticRegression(solver='lbfgs',multi_class='multinomial',max_iter=1000) for i in range(10)]\r\n \r\n k = 0\r\n active_results = {'least_confident':[]}\r\n passive_results = []\r\n\r\n (X_train_set, y_train_set), (X_test_set, y_test_set) = fashion_mnist.load_data()\r\n\r\n x_train, x_test, y_train, y_test = train_test_split(X_train_set,y_train_set)\r\n\r\n x_train = x_train.reshape(x_train.shape[0],x_train.shape[1]*x_train.shape[2])\r\n\r\n x_test = x_test.reshape(x_test.shape[0],x_test.shape[1]*x_test.shape[2])\r\n \r\n X_labeled, X_unlabeled, y_labeled, y_oracle = train_test_split(x_train,y_train,test_size = 0.99)\r\n \r\n for model in classifier_random:\r\n model.classes_ = np.arange(10)\r\n model.fit(X_labeled, y_labeled)\r\n \r\n for model in classifier_active:\r\n model.classes_ = np.arange(10)\r\n model.fit(X_labeled, y_labeled)\r\n \r\n X_labeled_rand = deepcopy(X_labeled)\r\n y_labeled_rand = deepcopy(y_labeled)\r\n X_labeled_active = deepcopy(X_labeled)\r\n y_labeled_active = deepcopy(y_labeled)\r\n \r\n batch_size = 32\r\n \r\n new_sample_size = [32]*20\r\n\r\n seen_examples_count = 32\r\n for new_sample_size in new_sample_size:\r\n seen_examples_count = seen_examples_count + new_sample_size\r\n num_samples.append(new_sample_size)\r\n \r\n random_queries = np.random.choice(X_unlabeled.shape[0], new_sample_size, replace=False)\r\n \r\n X_labeled_rand = np.concatenate((X_labeled_rand, X_unlabeled[random_queries, :]))\r\n y_labeled_rand = np.concatenate((y_labeled_rand, y_oracle[random_queries]))\r\n \r\n predictions = []\r\n for model in classifier_random:\r\n model.fit(X_labeled_rand, y_labeled_rand)\r\n predictions.append(model.predict(X_test))\r\n\r\n prediction_stack = np.stack(predictions)\r\n commitee_decision = np.apply_along_axis(\\\r\n lambda x: Counter(x).most_common()[0][0],\\\r\n 0, prediction_stack)\r\n matches = np.sum(commitee_decision == y_test)\r\n average_accuracy = matches / np.shape(X_test)[0]\r\n passive_results.append(average_accuracy)\r\n\r\n \r\n al_obj = ActiveLearner(strategy='least_confident')\r\n for model in classifier_active:\r\n model.classes_ = np.arange(10)\r\n indexes = al_obj.rank(classifier_active, X_unlabeled, new_sample_size)\r\n \r\n X_labeled_active = np.concatenate((X_labeled_active, X_unlabeled[indexes, :]))\r\n y_labeled_active = np.concatenate((y_labeled_active, y_oracle[indexes]))\r\n\r\n predictions = []\r\n \r\n for model in classifier_active:\r\n model.fit(X_labeled_active, y_labeled_active)\r\n curr_pred = model.predict(X_test)\r\n predictions.append(curr_pred)\r\n \r\n prediction_stack = np.stack(predictions)\r\n commitee_decision = np.apply_along_axis(\\\r\n lambda x: Counter(x).most_common()[0][0],\\\r\n 0, prediction_stack)\r\n matches = np.sum(commitee_decision == y_test)\r\n average_accuracy = matches / np.shape(X_test)[0]\r\n active_results['least_confident'].append(average_accuracy)\r\n \r\n k = k + 1\r\n\r\n np.savetxt('./misc/random_model_accuracy.txt', passive_results)\r\n np.savetxt('./misc/active_model_accuracy.txt', active_results['least_confident'])\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n" ]
[ [ "sklearn.linear_model.LogisticRegression", "numpy.random.choice", "numpy.arange", "sklearn.model_selection.train_test_split", "numpy.stack", "numpy.concatenate", "numpy.shape", "numpy.savetxt", "numpy.sum" ] ]
jiafeng5513/BinocularNet
[ "c26262cef69f99f9db832ec5610cc03bf50aed88", "c26262cef69f99f9db832ec5610cc03bf50aed88", "c26262cef69f99f9db832ec5610cc03bf50aed88", "c26262cef69f99f9db832ec5610cc03bf50aed88" ]
[ "comparisons/SfmLeaner_pytorch/kitti_eval/depth_evaluation_utils.py", "comparisons/depth_from_video_in_the_wild/train.py", "evision_model/models/DepthNet.py", "evision_model/DataFlow/sequence_folders.py" ]
[ "# Mostly based on the code written by Clement Godard:\n# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py\nimport numpy as np\nfrom collections import Counter\nfrom path import Path\nfrom scipy.misc import imread\nfrom tqdm import tqdm\nimport datetime\n\n\nclass test_framework_KITTI(object):\n def __init__(self, root, test_files, seq_length=3, min_depth=1e-3, max_depth=100, step=1, use_gps=True):\n self.root = root\n self.min_depth, self.max_depth = min_depth, max_depth\n self.use_gps = use_gps\n self.calib_dirs, self.gt_files, self.img_files, self.displacements, self.cams = read_scene_data(self.root,\n test_files,\n seq_length,\n step,\n self.use_gps)\n\n def __getitem__(self, i):\n tgt = imread(self.img_files[i][0]).astype(np.float32)\n depth = generate_depth_map(self.calib_dirs[i], self.gt_files[i], tgt.shape[:2], self.cams[i])\n return {'tgt': tgt,\n 'ref': [imread(img).astype(np.float32) for img in self.img_files[i][1]],\n 'path':self.img_files[i][0],\n 'gt_depth': depth,\n 'displacements': np.array(self.displacements[i]),\n 'mask': generate_mask(depth, self.min_depth, self.max_depth)\n }\n\n def __len__(self):\n return len(self.img_files)\n\n\n###############################################################################\n# EIGEN\n\ndef getXYZ(lat, lon, alt):\n \"\"\"Helper method to compute a R(3) pose vector from an OXTS packet.\n Unlike KITTI official devkit, we use sinusoidal projection (https://en.wikipedia.org/wiki/Sinusoidal_projection)\n instead of mercator as it is much simpler.\n Initially Mercator was used because it renders nicely for Odometry vizualisation, but we don't need that here.\n In order to avoid problems for potential other runs closer to the pole in the future,\n we stick to sinusoidal which keeps the distances cleaner than mercator (and that's the only thing we want here)\n See https://github.com/utiasSTARS/pykitti/issues/24\n \"\"\"\n er = 6378137. # earth radius (approx.) in meters\n scale = np.cos(lat * np.pi / 180.)\n tx = scale * lon * np.pi * er / 180.\n ty = er * lat * np.pi / 180.\n tz = alt\n t = np.array([tx, ty, tz])\n return t\n\n\ndef get_displacements_from_GPS(root, date, scene, indices, tgt_index, precision_warning_threshold=2):\n \"\"\"gets displacement magntidues between middle frame and other frames, this is, to a scaling factor\n the mean output PoseNet should have for translation. Since the scaling is the same factor for depth maps and\n for translations, it will be used to determine how much predicted depth should be multiplied to.\"\"\"\n\n first_pose = None\n displacements = []\n oxts_root = root/date/scene/'oxts'\n if len(indices) == 0:\n return 0\n reordered_indices = [indices[tgt_index]] + [*indices[:tgt_index]] + [*indices[tgt_index + 1:]]\n already_warned = False\n for index in reordered_indices:\n oxts_data = np.genfromtxt(oxts_root/'DataFlow'/'{:010d}.txt'.format(index))\n\n if not already_warned:\n position_precision = oxts_data[23]\n if position_precision > precision_warning_threshold:\n print(\"Warning for scene {} frame {} : bad position precision from oxts ({:.2f}m). \"\n \"You might want to get displacements from speed\".format(scene, index, position_precision))\n already_warned = True\n\n lat, lon, alt = oxts_data[:3]\n pose = getXYZ(lat, lon, alt)\n if first_pose is None:\n first_pose = pose\n else:\n displacements.append(np.linalg.norm(pose - first_pose))\n return displacements\n\n\ndef get_displacements_from_speed(root, date, scene, indices, tgt_index):\n \"\"\"get displacement magnitudes by integrating over speed values.\n Might be a good alternative if the GPS is not good enough\"\"\"\n if len(indices) == 0:\n return []\n oxts_root = root/date/scene/'oxts'\n with open(oxts_root/'timestamps.txt') as f:\n timestamps = np.array([datetime.datetime.strptime(ts[:-3], \"%Y-%m-%d %H:%M:%S.%f\").timestamp() for ts in f.read().splitlines()])\n speeds = np.zeros((len(indices), 3))\n for i, index in enumerate(indices):\n oxts_data = np.genfromtxt(oxts_root/'DataFlow'/'{:010d}.txt'.format(index))\n speeds[i] = oxts_data[[6,7,10]]\n displacements = np.zeros((len(indices), 3))\n # Perform the integration operation, using trapezoidal method\n for i0, (i1, i2) in enumerate(zip(indices, indices[1:])):\n displacements[i0 + 1] = displacements[i0] + 0.5*(speeds[i0] + speeds[i0 + 1]) * (timestamps[i1] - timestamps[i2])\n # Set the origin of displacements at tgt_index\n displacements -= displacements[tgt_index]\n # Finally, get the displacement magnitude relative to tgt and discard the middle value (which is supposed to be 0)\n displacements_mag = np.linalg.norm(displacements, axis=1)\n return np.concatenate([displacements_mag[:tgt_index], displacements_mag[tgt_index + 1:]])\n\n\ndef read_scene_data(data_root, test_list, seq_length=3, step=1, use_gps=True):\n data_root = Path(data_root)\n gt_files = []\n calib_dirs = []\n im_files = []\n cams = []\n displacements = []\n demi_length = (seq_length - 1) // 2\n shift_range = step * np.arange(-demi_length, demi_length + 1)\n\n print('getting test metadata ... ')\n for sample in tqdm(test_list):\n tgt_img_path = data_root/sample\n date, scene, cam_id, _, index = sample[:-4].split('/')\n\n scene_length = len(tgt_img_path.parent.files('*.png'))\n\n ref_indices = shift_range + np.clip(int(index), step*demi_length, scene_length - step*demi_length - 1)\n\n ref_imgs_path = [tgt_img_path.dirname()/'{:010d}.png'.format(i) for i in ref_indices]\n vel_path = data_root/date/scene/'velodyne_points'/'DataFlow'/'{}.bin'.format(index[:10])\n\n if tgt_img_path.isfile():\n gt_files.append(vel_path)\n calib_dirs.append(data_root/date)\n im_files.append([tgt_img_path,ref_imgs_path])\n cams.append(int(cam_id[-2:]))\n\n args = (data_root, date, scene, ref_indices, demi_length)\n if use_gps:\n displacements.append(get_displacements_from_GPS(*args))\n else:\n displacements.append(get_displacements_from_speed(*args))\n else:\n print('{} missing'.format(tgt_img_path))\n\n return calib_dirs, gt_files, im_files, displacements, cams\n\n\ndef load_velodyne_points(file_name):\n # adapted from https://github.com/hunse/kitti\n points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)\n points[:,3] = 1\n return points\n\n\ndef read_calib_file(path):\n # taken from https://github.com/hunse/kitti\n float_chars = set(\"0123456789.e+- \")\n data = {}\n with open(path, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n value = value.strip()\n data[key] = value\n if float_chars.issuperset(value):\n # try to cast to float array\n try:\n data[key] = np.array(list(map(float, value.split(' '))))\n except ValueError:\n # casting error: DataFlow[key] already eq. value, so pass\n pass\n\n return data\n\n\ndef sub2ind(matrixSize, rowSub, colSub):\n m, n = matrixSize\n return rowSub * (n-1) + colSub - 1\n\n\ndef generate_depth_map(calib_dir, velo_file_name, im_shape, cam=2):\n # load calibration files\n cam2cam = read_calib_file(calib_dir/'calib_cam_to_cam.txt')\n velo2cam = read_calib_file(calib_dir/'calib_velo_to_cam.txt')\n velo2cam = np.hstack((velo2cam['R'].reshape(3,3), velo2cam['T'][..., np.newaxis]))\n velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))\n\n # compute projection matrix velodyne->image plane\n R_cam2rect = np.eye(4)\n R_cam2rect[:3,:3] = cam2cam['R_rect_00'].reshape(3,3)\n P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3,4)\n P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)\n\n # load velodyne points and remove all behind image plane (approximation)\n # each row of the velodyne DataFlow is forward, left, up, reflectance\n velo = load_velodyne_points(velo_file_name)\n velo = velo[velo[:, 0] >= 0, :]\n\n # project the points to the camera\n velo_pts_im = np.dot(P_velo2im, velo.T).T\n velo_pts_im[:, :2] = velo_pts_im[:,:2] / velo_pts_im[:,-1:]\n\n # check if in bounds\n # use minus 1 to get the exact same value as KITTI matlab code\n velo_pts_im[:, 0] = np.round(velo_pts_im[:,0]) - 1\n velo_pts_im[:, 1] = np.round(velo_pts_im[:,1]) - 1\n val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)\n val_inds = val_inds & (velo_pts_im[:,0] < im_shape[1]) & (velo_pts_im[:,1] < im_shape[0])\n velo_pts_im = velo_pts_im[val_inds, :]\n\n # project to image\n depth = np.zeros((im_shape))\n depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]\n\n # find the duplicate points and choose the closest depth\n inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])\n dupe_inds = [item for item, count in Counter(inds).items() if count > 1]\n for dd in dupe_inds:\n pts = np.where(inds == dd)[0]\n x_loc = int(velo_pts_im[pts[0], 0])\n y_loc = int(velo_pts_im[pts[0], 1])\n depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()\n depth[depth < 0] = 0\n return depth\n\n\ndef generate_mask(gt_depth, min_depth, max_depth):\n mask = np.logical_and(gt_depth > min_depth,\n gt_depth < max_depth)\n # crop used by Garg ECCV16 to reprocude Eigen NIPS14 results\n # if used on gt_size 370x1224 produces a crop of [-218, -3, 44, 1180]\n gt_height, gt_width = gt_depth.shape\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\n\n crop_mask = np.zeros(mask.shape)\n crop_mask[crop[0]:crop[1],crop[2]:crop[3]] = 1\n mask = np.logical_and(mask, crop_mask)\n return mask\n", "# coding=utf-8\n# Copyright 2020 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A training loop for the various models in this directory.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport math\nimport os\nimport random\nimport time\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n# from depth_from_video_in_the_wild import models\nimport model\n\ngfile = tf.gfile\nMAX_TO_KEEP = 1000000 # Maximum number of checkpoints to keep.\n\nflags.DEFINE_string('data_dir', None, 'Preprocessed DataFlow.')\nflags.DEFINE_string('file_extension', 'png', 'Image DataFlow file extension.')\nflags.DEFINE_float('learning_rate', 1e-4, 'Adam learning rate.')\nflags.DEFINE_float('reconstr_weight', 0.85, 'Frame reconstruction loss weight.')\nflags.DEFINE_float('ssim_weight', 3.0, 'SSIM loss weight.')\nflags.DEFINE_float('smooth_weight', 1e-2, 'Smoothness loss weight.')\nflags.DEFINE_float('depth_consistency_loss_weight', 0.01, 'Depth consistency loss weight')\nflags.DEFINE_integer('batch_size', 4, 'The size of a sample batch')\nflags.DEFINE_integer('img_height', 128, 'Input frame height.')\nflags.DEFINE_integer('img_width', 416, 'Input frame width.')\nflags.DEFINE_integer('queue_size', 2000, 'Items in queue. Use smaller number for local debugging.')\nflags.DEFINE_integer('seed', 8964, 'Seed for random number generators.')\nflags.DEFINE_float('weight_reg', 1e-2, 'The amount of weight regularization to apply. '\n 'This has no effect on the ResNet-based encoder architecture.')\nflags.DEFINE_string('checkpoint_dir', None, 'Directory to save models checkpoints.')\nflags.DEFINE_integer('train_steps', int(1e6), 'Number of training steps.')\nflags.DEFINE_integer('summary_freq', 100, 'Save summaries every N steps.')\nflags.DEFINE_bool('debug', False, 'If true, one training step is performed and the results are dumped to a folder for debugging.')\nflags.DEFINE_string('input_file', 'train', 'Input file name')\nflags.DEFINE_float('rotation_consistency_weight', 1e-3, 'Weight of rotation cycle consistency loss.')\nflags.DEFINE_float('translation_consistency_weight', 1e-2, 'Weight of thanslation consistency loss.')\nflags.DEFINE_integer('foreground_dilation', 8, 'Dilation of the foreground mask (in pixels).')\nflags.DEFINE_boolean('learn_intrinsics', True, 'Whether to learn camera intrinsics.')\nflags.DEFINE_boolean('boxify', True, 'Whether to convert segmentation masks to bounding boxes.')\nflags.DEFINE_string('imagenet_ckpt', None, 'Path to an imagenet checkpoint to intialize from.')\n\nFLAGS = flags.FLAGS\nflags.mark_flag_as_required('data_dir')\nflags.mark_flag_as_required('checkpoint_dir')\n\n\ndef load(filename):\n with gfile.Open(filename) as f:\n return np.load(io.BytesIO(f.read()))\n\n\ndef _print_losses(dir1):\n for f in gfile.ListDirectory(dir1):\n if 'loss' in f:\n print('----------', f, end=' ')\n f1 = os.path.join(dir1, f)\n t1 = load(f1).astype(float)\n print(t1)\n\n\ndef main(_):\n # Fixed seed for repeatability\n seed = FLAGS.seed\n tf.set_random_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n if not gfile.Exists(FLAGS.checkpoint_dir):\n gfile.MakeDirs(FLAGS.checkpoint_dir)\n\n train_model = model.Model(\n boxify=FLAGS.boxify,\n data_dir=FLAGS.data_dir,\n file_extension=FLAGS.file_extension,\n is_training=True,\n foreground_dilation=FLAGS.foreground_dilation,\n learn_intrinsics=FLAGS.learn_intrinsics,\n learning_rate=FLAGS.learning_rate,\n reconstr_weight=FLAGS.rgb_weight,\n smooth_weight=FLAGS.depth_smoothing_weight,\n ssim_weight=FLAGS.ssim_weight,\n translation_consistency_weight=FLAGS.translation_consistency_weight,\n rotation_consistency_weight=FLAGS.rotation_consistency_weight,\n batch_size=FLAGS.batch_size,\n img_height=FLAGS.img_height,\n img_width=FLAGS.img_width,\n weight_reg=FLAGS.weight_reg,\n depth_consistency_loss_weight=FLAGS.depth_consistency_loss_weight,\n queue_size=FLAGS.queue_size,\n input_file=FLAGS.input_file)\n\n _train(train_model, FLAGS.checkpoint_dir, FLAGS.train_steps, FLAGS.summary_freq)\n\n if FLAGS.debug:\n _print_losses(os.path.join(FLAGS.checkpoint_dir, 'debug'))\n\n\ndef _train(train_model, checkpoint_dir, train_steps, summary_freq):\n \"\"\"Runs a trainig loop.\"\"\"\n saver = train_model.saver\n sv = tf.train.Supervisor(logdir=checkpoint_dir, save_summaries_secs=0,\n saver=None)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with sv.managed_session(config=config) as sess:\n logging.info('Attempting to resume training from %s...', checkpoint_dir)\n checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n logging.info('Last checkpoint found: %s', checkpoint)\n if checkpoint:\n saver.restore(sess, checkpoint)\n elif FLAGS.imagenet_ckpt:\n logging.info('Restoring pretrained weights from %s', FLAGS.imagenet_ckpt)\n train_model.imagenet_init_restorer.restore(sess, FLAGS.imagenet_ckpt)\n\n logging.info('Training...')\n start_time = time.time()\n last_summary_time = time.time()\n steps_per_epoch = train_model.reader.steps_per_epoch\n step = 1\n while step <= train_steps:\n fetches = {\n 'train': train_model.train_op,\n 'global_step': train_model.global_step,\n }\n if step % summary_freq == 0:\n fetches['loss'] = train_model.total_loss\n fetches['summary'] = sv.summary_op\n\n if FLAGS.debug:\n fetches.update(train_model.exports)\n\n results = sess.run(fetches)\n global_step = results['global_step']\n\n if step % summary_freq == 0:\n sv.summary_writer.add_summary(results['summary'], global_step)\n train_epoch = math.ceil(global_step / steps_per_epoch)\n train_step = global_step - (train_epoch - 1) * steps_per_epoch\n this_cycle = time.time() - last_summary_time\n last_summary_time += this_cycle\n logging.info(\n 'Epoch: [%2d] [%5d/%5d] time: %4.2fs (%ds total) loss: %.3f',\n train_epoch, train_step, steps_per_epoch, this_cycle,\n time.time() - start_time, results['loss'])\n\n if FLAGS.debug:\n debug_dir = os.path.join(checkpoint_dir, 'debug')\n if not gfile.Exists(debug_dir):\n gfile.MkDir(debug_dir)\n for name, tensor in results.iteritems():\n if name == 'summary':\n continue\n s = io.BytesIO()\n filename = os.path.join(debug_dir, name)\n np.save(s, tensor)\n with gfile.Open(filename, 'w') as f:\n f.write(s.getvalue())\n return\n\n # steps_per_epoch == 0 is intended for debugging, when we run with a\n # single image for sanity check\n if steps_per_epoch == 0 or step % steps_per_epoch == 0:\n logging.info('[*] Saving checkpoint to %s...', checkpoint_dir)\n saver.save(sess, os.path.join(checkpoint_dir, 'models'),\n global_step=global_step)\n\n # Setting step to global_step allows for training for a total of\n # train_steps even if the program is restarted during training.\n step = global_step + 1\n\n\nif __name__ == '__main__':\n app.run(main)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nA network for predicting depth map.\n\nbased on \"Depth from video in the wild\", \"SfmLearner-PyTorch\" and \"struct2depth\"\n\nU-Net liekd encoder-decoder architecture\n\ncode by jiafeng5513\n\nNOTE:\n 1. TensorFlow 的默认顺序是 [N H W C], PyTorch的默认顺序是 [N C H W]\n 2. DepthNet输入一张张三通道图片,假设每张图片大小为[h,w],batch size =4,则输入张量为[4,3,h,w]\n 3. 该文件的main函数仅供DepthNet进行shape检查\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n\nclass residual_block_A(nn.Module):\n def __init__(self, channel):\n super(residual_block_A, self).__init__()\n # torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)\n self.conv1 = nn.Sequential(torch.nn.Conv2d(in_channels=channel, out_channels=channel, kernel_size=3, stride=1,\n padding=1), nn.BatchNorm2d(channel), nn.ReLU(inplace=True))\n self.conv2 = nn.Sequential(torch.nn.Conv2d(in_channels=channel, out_channels=channel, kernel_size=3, stride=1,\n padding=1), nn.BatchNorm2d(channel))\n\n def forward(self, input_tensor):\n conv_out_1 = self.conv1(input_tensor)\n conv_out_2 = self.conv2(conv_out_1)\n pre_out = input_tensor + conv_out_2\n out = torch.nn.functional.relu(pre_out, inplace=True)\n return out\n\n\nclass residual_block_B(nn.Module):\n def __init__(self, input_channel, output_channel, stride):\n super(residual_block_B, self).__init__()\n self.input_channel = input_channel\n self.output_channel = output_channel\n self.stride = stride\n self.conv1 = nn.Sequential(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=3,\n stride=stride, padding=1), nn.BatchNorm2d(output_channel), nn.ReLU(inplace=True))\n self.conv2 = nn.Sequential(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=3,\n stride=1, padding=1), nn.BatchNorm2d(output_channel))\n self.conv3 = nn.Sequential(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=1,\n stride=stride))\n self.maxpool = nn.MaxPool2d((stride, stride), stride=(stride, stride))\n\n def forward(self, input_tensor):\n # input_tensor:[b,input_channel,h,w] 4,64,32,104\n conv_out_1 = self.conv1(input_tensor) # [b,output_channel,h/2,w/2] 4,128,16,52\n conv_out_2 = self.conv2(conv_out_1) # [b,output_channel,h/2,w/2] 4,128,16,52\n if self.input_channel==self.output_channel:\n if self.stride == 1:\n shortcut = input_tensor\n else:\n shortcut =self.maxpool(input_tensor)\n else:\n shortcut = self.conv3(input_tensor)\n pre_out = shortcut + conv_out_2\n out = torch.nn.functional.relu(pre_out, inplace=True)\n return out\n\n\nclass encoder_module(nn.Module):\n def __init__(self):\n super(encoder_module, self).__init__()\n self.conv1 = nn.Sequential(torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7,\n stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(inplace=True))\n self.maxpool = nn.MaxPool2d((3, 3), stride=(2, 2), padding=1)\n self.residual_block_A1 = residual_block_A(channel=64)\n self.residual_block_A2 = residual_block_A(channel=64)\n\n self.residual_block_first_B1 = residual_block_B(input_channel=64, output_channel=128, stride=2)\n self.residual_block_A3 = residual_block_A(channel=128)\n\n self.residual_block_first_B2 = residual_block_B(input_channel=128, output_channel=256, stride=2)\n self.residual_block_A4 = residual_block_A(channel=256)\n\n self.residual_block_first_B3 = residual_block_B(input_channel=256, output_channel=512, stride=2)\n self.residual_block_A5 = residual_block_A(channel=512)\n pass\n\n def forward(self, image):\n # image:[b,3,h,w]\n econv1 = self.conv1(image) # [b,64,h/2,w/2]\n maxpool_out = self.maxpool(econv1) # [b,64,h/4,w/4]\n residual_block_A1_out = self.residual_block_A1(maxpool_out) # [b,64,h/4,w/4]\n econv2 = self.residual_block_A2(residual_block_A1_out) # [b,64,h/4,w/4]\n\n residual_block_first_B1_out = self.residual_block_first_B1(econv2) # [b,128,h/8,w/8]\n econv3 = self.residual_block_A3(residual_block_first_B1_out) # [b,128,h/8,w/8]\n\n residual_block_first_B2_out = self.residual_block_first_B2(econv3) # [b,256,h/16,w/16]\n econv4 = self.residual_block_A4(residual_block_first_B2_out) # [b,256,h/16,w/16]\n\n residual_block_first_B3_out = self.residual_block_first_B3(econv4) # [b,512,h/32,w/32]\n econv5 = self.residual_block_A5(residual_block_first_B3_out) # [b,512,h/32,w/32]\n\n return econv5, econv4, econv3, econv2, econv1\n\n\nclass DepthNet(nn.Module):\n def __init__(self):\n super(DepthNet, self).__init__()\n self.encoder = encoder_module()\n self.uconv_5 = nn.Sequential(torch.nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=3,\n stride=2, padding=1, output_padding=1), nn.ReLU(inplace=True))\n self.conv_5 = nn.Sequential(torch.nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3,\n stride=1,padding=1), nn.ReLU(inplace=True))\n self.uconv_4 = nn.Sequential(torch.nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3,\n stride=2, padding=1, output_padding=1), nn.ReLU(inplace=True))\n self.conv_4 = nn.Sequential(torch.nn.Conv2d(in_channels=256, out_channels=128, kernel_size=3,\n stride=1, padding=1), nn.ReLU(inplace=True))\n self.uconv_3 = nn.Sequential(torch.nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=3,\n stride=2, padding=1, output_padding=1), nn.ReLU(inplace=True))\n self.conv_3 = nn.Sequential(torch.nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3,\n stride=1, padding=1), nn.ReLU(inplace=True))\n self.uconv_2 = nn.Sequential(torch.nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=3,\n stride=2, padding=1, output_padding=1), nn.ReLU(inplace=True))\n self.conv_2 = nn.Sequential(torch.nn.Conv2d(in_channels=96, out_channels=32, kernel_size=3,\n stride=1, padding=1), nn.ReLU(inplace=True))\n self.uconv_1 = nn.Sequential(torch.nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=3,\n stride=2, padding=1, output_padding=1), nn.ReLU(inplace=True))\n self.conv_1 = nn.Sequential(torch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3,\n stride=1, padding=1), nn.ReLU(inplace=True))\n self.conv_0 = nn.Sequential(torch.nn.Conv2d(in_channels=16, out_channels=1, kernel_size=3,\n stride=1, padding=1), nn.Softplus())\n\n def forward(self, image):\n # inage: [b,3,h,w] [4, 3, 128, 416]\n econv5, econv4, econv3, econv2, econv1 = self.encoder(image)\n # econv5:[b,512,h/32,w/32]\n # econv4:[b,256,h/16,w/16] [4, 256, 8, 26]\n # econv3:[b,128,h/8,w/8] [4, 128, 16, 52]\n # econv2:[b,64,h/4,w/4] [4, 64, 32, 104]\n # econv1:[b,64,h/2,w/2] [4, 64, 64, 208]\n unconv5 = self.uconv_5(econv5) # [b,256,h/16,w/16] [4, 256, 8, 26]\n x5 = torch.cat([econv4, unconv5], dim=1) # [b,512,h/16,w/16] [4, 512, 8, 26]\n iconv5 = self.conv_5(x5) # [b,256,h/16,w/16] [4, 256, 8, 26]\n\n unconv4 = self.uconv_4(iconv5) # [b,128,h/8,w/8] [4, 128, 16, 52]\n x4 = torch.cat([econv3, unconv4], dim=1) # [b,256,h/8,w/8] [4, 256, 16, 52]\n iconv4 = self.conv_4(x4) # [b,128,h/8,w/8] [4, 128, 16, 52]\n\n unconv3 = self.uconv_3(iconv4) # [b,64,h/4,w/4] [4, 64, 32, 104]\n x3 = torch.cat([econv2, unconv3], dim=1) # [b,128,h/4,w/4] [4, 128, 32, 104]\n iconv3 = self.conv_3(x3) # [b,64,h/4,w/4] [4, 64, 32, 104]\n\n unconv2 = self.uconv_2(iconv3) # [b,32,h/2,w/2] [4, 32, 64, 208]\n x2 = torch.cat([econv1, unconv2], dim=1) # [b,96,h/2,w/2] [4, 96, 64, 208]\n iconv2 = self.conv_2(x2) # [b,32,h/2,w/2] [4, 32, 64, 208]\n\n unconv1 = self.uconv_1(iconv2) # [b,16,h,w] [4, 16, 128, 416]\n iconv1 = self.conv_1(unconv1) # [b,16,h,w] [4, 16, 128, 416]\n\n out = self.conv_0(iconv1) # [b,1,h,w] [4, 1, 128, 416]\n return out\n\n\nif __name__ == '__main__':\n model = DepthNet()\n model = model.cuda()\n model.eval()\n\n image = torch.randn(4, 3, 128, 416) # 输入尺寸 [N C H W]\n image = image.cuda()\n with torch.no_grad():\n depth_map = model(image)\n\n print(depth_map.shape)\n", "import torch.utils.data as data\nimport numpy as np\nfrom imageio import imread\nfrom path import Path\nimport random\n\n\ndef load_as_float(path):\n return imread(path).astype(np.float32)\n\n\nclass SequenceFolder(data.Dataset):\n \"\"\"A sequence DataFlow loader where the files are arranged in this way:\n root/scene_1/0000000.jpg\n root/scene_1/0000001.jpg\n ..\n root/scene_1/cam.txt\n root/scene_2/0000000.jpg\n .\n\n transform functions must take in a list a images and a numpy array (usually intrinsics matrix)\n \"\"\"\n\n def __init__(self, root, seed=None, train=True, sequence_length=3, transform=None, target_transform=None):\n np.random.seed(seed)\n random.seed(seed)\n self.root = Path(root)\n scene_list_path = self.root/'train.txt' if train else self.root/'val.txt'\n self.scenes = [self.root/folder[:-1] for folder in open(scene_list_path)]\n self.transform = transform\n self.crawl_folders(sequence_length)\n\n def crawl_folders(self, sequence_length):\n sequence_set = [] # 把所有的图片组织称样本序列,存到这个里面\n demi_length = (sequence_length-1)//2\n shifts = list(range(-demi_length, demi_length + 1))\n shifts.pop(demi_length)\n for scene in self.scenes: # 每个场景有自己的内参\n intrinsics = np.genfromtxt(scene/'cam.txt').astype(np.float32).reshape((3, 3))\n imgs = sorted(scene.files('*.jpg')) # 场景中所有的图片\n if len(imgs) < sequence_length: # 如果这个场景的素有图片装不满一个样本,只能跳过这个场景\n continue\n for i in range(len(imgs)-sequence_length+1):\n sample = {'intrinsics': intrinsics, 'imgs': []}\n for j in range(sequence_length):\n sample['imgs'].append(imgs[i+j])\n sequence_set.append(sample)\n random.shuffle(sequence_set)\n self.samples = sequence_set\n\n def __getitem__(self, index):\n sample = self.samples[index]\n imgs = [load_as_float(img) for img in sample['imgs']]\n if self.transform is not None:\n imgs, intrinsics = self.transform(imgs, np.copy(sample['intrinsics']))\n else:\n intrinsics = np.copy(sample['intrinsics'])\n return imgs, intrinsics\n\n def __len__(self):\n return len(self.samples)\n" ]
[ [ "numpy.dot", "numpy.fromfile", "numpy.logical_and", "numpy.arange", "numpy.eye", "numpy.linalg.norm", "numpy.cos", "numpy.concatenate", "numpy.round", "scipy.misc.imread", "numpy.array", "numpy.zeros", "numpy.where" ], [ "tensorflow.compat.v1.ConfigProto", "numpy.random.seed", "numpy.save", "tensorflow.compat.v1.set_random_seed", "tensorflow.compat.v1.train.latest_checkpoint", "tensorflow.compat.v1.train.Supervisor" ], [ "torch.nn.Softplus", "torch.nn.ConvTranspose2d", "torch.cat", "torch.randn", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.functional.relu", "torch.no_grad", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ], [ "numpy.copy", "numpy.random.seed", "numpy.genfromtxt" ] ]
DFNaiff/Dissertation
[ "8db72a0e588042a582053625ec58cde6a661f2a9" ]
[ "tests_dissertation/source1d/test1a_mcmc.py" ]
[ "# -*- coding: utf-8 -*-\nimport sys\nsys.path.insert(0,\"../../src2\")\nimport math\nimport functools\nimport time\n\nimport torch\nimport numpy as np\nfrom scipy.special import gamma\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport emcee\n\nfrom source_1d_likelihood_fn import compute_log_likelihood_2\n\nnp.random.seed(100)\ntorch.manual_seed(100)\n#%%\ndef logit_t(x,a=0,b=1):\n return torch.log(((x-a)/(b-a))/(1.0-(x-a)/(b-a)))\ndef sigmoid(x,a=0,b=1):\n return (b-a)*1.0/(1.0+np.exp(-x)) + a\ndef dsigmoid(x,a=0,b=1):\n return (b-a)*np.exp(x)/((1+np.exp(x))**2)\ndef exp(x):\n return np.exp(x)\ndef dexp(x):\n return np.exp(x)\n\ndef unwarped_logjoint_np(x0,Ts,q0,rho):\n ll = compute_log_likelihood_2(x0,Ts,q0,rho)\n ll += -np.log(1+(q0/10.0)**2)\n ll += -np.log(1+(rho/0.1)**2)\n return ll\n\ndef logjoint_np(x):\n x0,Ts,q0,rho = x[0],x[1],x[2],x[3]\n ll = unwarped_logjoint_np(sigmoid(x0),sigmoid(Ts,b=0.4),\n exp(q0),exp(rho)) + \\\n np.log(dsigmoid(x0)) + np.log(dsigmoid(Ts,b=0.4)) + \\\n np.log(dexp(q0)) + np.log(dexp(rho))\n return ll\n\ncounter=0\ndef logjoint_emcee(x):\n global counter\n counter += 1\n print(counter)\n return logjoint_np(x)\n\n#%%\nndim, nwalkers = 4, 10\np0 = [np.random.rand(ndim) for i in range(nwalkers)]\n\nsampler = emcee.EnsembleSampler(nwalkers, ndim, logjoint_emcee)\nsampler.run_mcmc(p0, 10000)\nnp.savez(\"testheat_1a_emcee\",sampler=sampler)\n#%%\n" ]
[ [ "numpy.log", "numpy.savez", "numpy.random.seed", "torch.manual_seed", "torch.log", "numpy.random.rand", "numpy.exp" ] ]
Rensvandeschoot/automated-systematic-review
[ "fe06a570a806e1f14d3de5186511a04edf851cf3" ]
[ "asreview/models/embedding.py" ]
[ "import gzip\nimport io\nfrom multiprocessing import Process, Queue, cpu_count\nfrom pathlib import Path\nfrom urllib.request import urlopen\n\nimport numpy as np\n\nfrom asreview.utils import get_data_home\n\n\nEMBEDDING_EN = {\n \"url\": \"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.vec.gz\", # noqa\n \"name\": 'fasttext.cc.en.300.vec'\n}\n\n\ndef _embedding_reader(filename, input_queue, block_size=1000):\n \"\"\" Process that reads the word embeddings from a file.\n\n Parameters\n ----------\n filename: str\n File of trained embedding vectors.\n input_queue: Queue\n Queue to store jobs in.\n block_size: int\n Number of lines for each job.\n \"\"\"\n\n with open(filename, 'r', encoding='utf-8', newline='\\n') as f:\n # Throw away the first line, since we don't care about the dimensions.\n f.readline()\n\n i_line = 0\n buffer = []\n # Read the embedding file line by line.\n for line in f:\n i_line += 1\n buffer.append(line)\n # If the buffer is full, write it to the queue.\n if i_line == block_size:\n input_queue.put(buffer)\n i_line = 0\n buffer = []\n if i_line > 0:\n input_queue.put(buffer)\n\n # Put the string \"DONE\" in the queue, to ensure that the\n # worker processes finish.\n\n input_queue.put(\"DONE\")\n\n\ndef _embedding_worker(input_queue, output_queue, emb_vec_dim, word_index=None):\n \"\"\" Process that reads the word embeddings from a file.\n\n Parameters\n ----------\n input_queue: Queue\n Queue in which the jobs are submitted.\n output_queue: Queue\n Queue to store the embedding in dictionary form.\n emb_vec_dim: int\n Dimension of each embedding vector.\n word_index: dict\n Dictionary of the sample embedding.\n \"\"\"\n\n badInput = False\n badValues = {}\n while True:\n embedding = {}\n buffer = input_queue.get()\n if buffer == \"DONE\":\n break\n\n for line in buffer:\n line = line.rstrip()\n values = line.split(' ')\n\n if len(values) != emb_vec_dim + 1:\n if not badInput:\n print(\"Error: bad input in embedding vector.\")\n badInput = True\n badValues = values\n break\n else:\n word = values[0]\n if word_index is not None and word not in word_index:\n continue\n coefs = values[1:emb_vec_dim + 1]\n\n # store the results\n embedding[word] = np.asarray(coefs, dtype=np.float32)\n output_queue.put(embedding)\n\n # We removed the \"DONE\" from the input queue, so put it back in for\n # the other processes.\n input_queue.put(\"DONE\")\n\n # Store the results in the output queue\n if badInput:\n output_queue.put({\"ErrorBadInputValues\": badValues})\n output_queue.put(\"DONE\")\n\n\ndef _embedding_aggregator(output_queue, n_worker):\n \"\"\" Process that aggregates the results of the workers.\n This should be the main/original process.\n\n Parameters\n ----------\n output_queue: Queue\n This queue is the output queue of the workers.\n n_worker: int\n The number of worker processes.\n\n Returns\n -------\n Aggregated embedding dictionary.\n \"\"\"\n\n embedding = {}\n\n num_done = 0\n while num_done < n_worker:\n new_embedding = output_queue.get()\n if new_embedding == \"DONE\":\n num_done += 1\n else:\n embedding.update(new_embedding)\n\n return embedding\n\n\ndef download_embedding(url=EMBEDDING_EN['url'], name=EMBEDDING_EN['name'],\n data_home=None, verbose=1):\n \"\"\"Download word embedding file.\n\n Download word embedding file, unzip the file and save to the\n file system.\n\n Parameters\n ----------\n url: str\n The URL of the gzipped word embedding file\n name: str\n The filename of the embedding file.\n data_home: str\n The location of the ASR datasets. Default `asreview.utils.get_data_home()`\n verbose: int\n The verbosity. Default 1.\n\n \"\"\"\n\n if data_home is None:\n data_home = get_data_home()\n\n out_fp = Path(data_home, name)\n\n if verbose:\n print(f'download {url}')\n\n r = urlopen(url)\n compressed_file = io.BytesIO(r.read())\n\n if verbose:\n print(f'save to {out_fp}')\n\n decompressed_file = gzip.GzipFile(fileobj=compressed_file)\n\n with open(out_fp, 'wb') as out_file:\n for line in decompressed_file:\n out_file.write(line)\n\n\ndef load_embedding(fp, word_index=None, n_jobs=None, verbose=1):\n \"\"\"Load embedding matrix from file.\n\n The embedding matrix needs to be stored in the\n FastText format.\n\n Parameters\n ----------\n fp: str\n File path of the trained embedding vectors.\n word_index: dict\n Sample word embeddings.\n n_jobs: int\n Number of processes to parse the embedding (+1 process for reading).\n verbose: int\n The verbosity. Default 1.\n\n\n Returns\n -------\n dict:\n The embedding weights stored in a dict with the word as key and\n the weights as values.\n \"\"\"\n\n # Maximum number of jobs in the queue.\n queue_size = 500\n\n # Set the number of reader processes to use.\n if n_jobs is None:\n n_jobs = 1\n elif n_jobs == -1:\n n_jobs = cpu_count()-1\n\n input_queue = Queue(queue_size)\n output_queue = Queue()\n\n with open(fp, 'r', encoding='utf-8', newline='\\n') as f:\n n_words, emb_vec_dim = list(map(int, f.readline().split(' ')))\n\n if verbose == 1:\n print(f\"Reading {n_words} vectors with {emb_vec_dim} dimensions.\")\n\n worker_procs = []\n p = Process(target=_embedding_reader, args=(fp, input_queue),\n daemon=True)\n worker_procs.append(p)\n for _ in range(n_jobs):\n p = Process(\n target=_embedding_worker,\n args=(input_queue, output_queue, emb_vec_dim, word_index),\n daemon=True)\n worker_procs.append(p)\n\n # Start workers.\n for proc in worker_procs:\n proc.start()\n embedding = _embedding_aggregator(output_queue, n_jobs)\n\n # Merge dictionaries of workers\n\n # Join workers\n for proc in worker_procs:\n proc.join()\n\n if \"ErrorBadInputValues\" in embedding:\n badValues = embedding[\"ErrorBadInputValues\"]\n raise ValueError(f\"Check embedding matrix, bad format: {badValues}\")\n\n if verbose == 1:\n print(f\"Found {len(embedding)} word vectors.\")\n\n return embedding\n\n\ndef sample_embedding(embedding, word_index, verbose=1):\n \"\"\"Sample embedding matrix\n\n Parameters\n ----------\n embedding: dict\n A dictionary with the words and embedding vectors.\n word_index: dict\n A word_index like the output of Keras Tokenizer.word_index.\n verbose: int\n The verbosity. Default 1.\n\n Returns\n -------\n (np.ndarray, list):\n The embedding weights strored in a two dimensional\n numpy array and a list with the corresponding words.\n \"\"\"\n\n n_words, emb_vec_dim = len(word_index), len(next(iter(embedding.values())))\n\n if verbose == 1:\n print(f\"Creating matrix with {n_words} vectors \"\n f\"with dimension {emb_vec_dim}.\")\n\n # n+1 because 0 is preserved in the tokenizing process.\n embedding_matrix = np.zeros((n_words + 1, emb_vec_dim))\n\n for word, i in word_index.items():\n coefs = embedding.get(word)\n if coefs is not None:\n embedding_matrix[i] = coefs\n if verbose == 1:\n print('Shape of embedding matrix: ', embedding_matrix.shape)\n\n return embedding_matrix\n" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
joyjeni/-Learn-Artificial-Intelligence-with-TensorFlow
[ "8ae05456241a3ead3dcb83dd315797380d7acacf" ]
[ "section3/snippets.py" ]
[ "import tensorflow as tf\n\n# ===============================================\n# Previously was snippets.py of: 3_2_RNNs\n# ===============================================\n\n# i = input_gate, j = new_input, f = forget_gate, o = output_gate\n# Get 4 copies of feeding [inputs, m_prev] through the \"Sigma\" diagram.\n# Note that each copy has its own distinct set of weights.\nlstm_matrix = self._linear1([inputs, m_prev])\ni, j, f, o = tf.split(\n value=lstm_matrix, num_or_size_splits=4, axis=1)\n# Feed each of the gates through a sigmoid.\ni = sigmoid(i)\nf = sigmoid(f + self._forget_bias)\no = sigmoid(o)\n\nc = f * c_prev + i * self._activation(j)\nm = o * self._activation(c)\n\nnew_state = LSTMStateTuple(c, m)\nreturn m, new_state\n\n# ===============================================\n# RNN illustration\n# ===============================================\n\nhidden_size = 32\n\n\ndef rnn_step(x, h_prev):\n # Project inputs to each have dimension hidden_size.\n combined_inputs = tf.layers.Dense(hidden_size)(tf.concat([x, h_prev], axis=1))\n # Compute the next hidden state.\n h = tf.tanh(combined_inputs)\n return h\n\n\n# ===============================================\n# Bidirectional RNNs\n# ===============================================\noutputs_tuple, final_state_tuple = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=tf.nn.rnn_cell.LSTMCell(128),\n cell_bw=tf.nn.rnn_cell.LSTMCell(128),\n inputs=inputs,\n dtype=tf.float32)\n# Concatenate the forward and backward outputs.\n# Shape: (batch_size, max_seq_len, 2 * state_size)\noutputs = tf.concat(outputs_tuple, -1)\n\n# ===============================================\n# Stacked RNNs\n# ===============================================\n\n\ndef lstm_cell():\n return tf.nn.rnn_cell.LSTMCell(128)\n\n\ncell = tf.nn.rnn_cell.MultiRNNCell([\n lstm_cell() for _ in range(2)])\noutputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\n\n" ]
[ [ "tensorflow.nn.dynamic_rnn", "tensorflow.concat", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.layers.Dense", "tensorflow.tanh", "tensorflow.split" ] ]
colinzuo/MLAPP_Solution
[ "6d4bab23455169310547462fe2fc2cb71a915ef0" ]
[ "practice/toolbox/knn.py" ]
[ "import numpy as np\r\n\r\nfrom toolbox.sqDistance import *\r\nfrom toolbox.oneOfK import *\r\n\r\n\r\nclass KnnModel():\r\n def fit(self, X, y, K, C=None):\r\n self.X = X\r\n self.y = y\r\n self.K = K\r\n if C is not None:\r\n self.C = C\r\n else:\r\n self.C = np.size(np.unique(y))\r\n\r\n def predict(self, Xtest):\r\n yhat, yprob = knnClassify(self.X, self.y, Xtest, self.K, self.C)\r\n return yhat, yprob\r\n\r\n\r\ndef knnClassify(Xtrain, ytrain, Xtest, K, C):\r\n Ntrain = Xtrain.shape[0]\r\n Nclasses = C\r\n if K > Ntrain:\r\n print(\"reducing K = %d to Ntrain = %d\", K, Ntrain-1)\r\n K = Ntrain - 1\r\n dst = sqDistance(Xtest, Xtrain)\r\n ypred = np.zeros(Xtest.shape[0])\r\n if K == 1:\r\n closest = np.argmin(dst, axis=1)\r\n ypred = ytrain[closest]\r\n ypredProb, _ = oneOfK(ypred, Nclasses)\r\n else:\r\n closest = np.argsort(dst, axis=1)\r\n ypredProb = np.zeros((Xtest.shape[0], Nclasses))\r\n for i in range(Xtest.shape[0]):\r\n labels = ytrain[closest[i, 0:K]]\r\n hist, bin_edges = np.histogram(labels, bins=np.arange(1, Nclasses+2), density=True)\r\n ypredProb[i, :] = hist\r\n max = np.argmax(ypredProb, axis=1)\r\n ypred = max + 1\r\n ypred = ypred[:, np.newaxis]\r\n return ypred, ypredProb\r\n\r\n\r\nif __name__ == '__main__':\r\n Xtrain = np.array([[1, 2], [11, 12], [21, 22], [3, 4], [13, 14], [23, 24]])\r\n Xtest = np.array([[2, 3], [12, 13], [22, 23]])\r\n ytrain = np.array([1, 2, 3, 1, 2, 3])\r\n ypred, ypredProb = knnClassify(Xtrain, ytrain, Xtest, 1, C=3)\r\n print(\"Done\")" ]
[ [ "numpy.unique", "numpy.arange", "numpy.argmax", "numpy.argmin", "numpy.argsort", "numpy.array", "numpy.zeros" ] ]
cwaluga/singularities_dolfin
[ "dd379f71f384717a63906fd701df542a1603b03b" ]
[ "src/extrapolate.py" ]
[ "#! /usr/bin/env python\n\n\"\"\"\nExtrapolation of correction parameters.\n\"\"\"\n\n__author__ = \"Christian Waluga ([email protected])\"\n__copyright__ = \"Copyright (c) 2013 %s\" % __author__\n\nfrom dolfin import *\nfrom correction import *\nfrom meshtools import *\nfrom singular import *\nimport math\n\ndef extrapolate_gamma_least_squares(h, g, angle):\n\n from scipy.optimize import leastsq\n p = 2.0 - 2.0*pi/angle\n fitfunc = lambda c, x: c[0] + c[1]*x**p\n errfunc = lambda c, x, y: (y - fitfunc(c, x))/x\n cinit = [g[-1] , 0.0, 0.0]\n c = leastsq(errfunc, cinit, args = (h, g), full_output = 1)\n return c[0][0], lambda x: fitfunc(c[0], x)\n\n\ndef extrapolate_gamma_romberg(h, g, angle):\n\n import numpy as np\n\n N = len(h)-1\n T = np.zeros((N,N))\n \n # compute Aitken-Neville tableau\n p = 2.0 - 2.0*pi/angle\n for i in range(0, N):\n T[i,0] = (h[i]**p * g[i+1] - h[i+1]**p * g[i])/(h[i]**p - h[i+1]**p)\n for i in range(1, N):\n for k in range(1, i+1):\n T[i,k] = T[i,k-1] + (T[i,k-1] - T[i-1,k-1])/((h[i-k]/h[i])**(p+1) - 1.0)\n\n return T[N-1,N-1], T\n\n\ndef extrapolate_gamma_richardson(h, g, angle):\n\n p = 2.0 - 2.0*pi/angle\n return g[-2] + (g[-1] - g[-2])/(1.0-(h[-1]/h[-2])**p)\n\n\ndef extrapolate_gamma(corner, angle, corner_mesh, func, method, maxit, \\\n refine_method, extrapolation, start_at, maxlevel, initial_gamma):\n\n if corner_mesh.size(2) is 0: return 0.0\n \n if refine_method == 'bulirsch':\n \n # refine meshes according to Bulirsch-series (1, 1/2, 1/3, 1/4, 1/6, 1/8, ...)\n meshes = [corner_mesh,refine(corner_mesh),refine3(corner_mesh)]\n \n for i in xrange(3, maxlevel):\n meshes.append(refine(meshes[-2]))\n\n elif refine_method == 'midpoint':\n\n # refine meshes by simple subdivision (1, 1/2, 1/4, 1/8, 1/16, ...)\n meshes = [corner_mesh]\n\n for i in xrange(1, maxlevel):\n meshes.append(refine(meshes[-1]))\n\n mesh = meshes[0]\n min_angle = find_min_angle(mesh, corner)\n\n # compute gammas using one-level algorithm\n if initial_gamma is None:\n initial_gamma = evaluate_fit(corner_mesh.size(2), angle, func == math.sin)\n g = compute_gammas(meshes, angle, min_angle, corner, initial_gamma = initial_gamma, \\\n maxit = maxit, func = func, method = method)\n \n import numpy as np\n \n h = [mesh.hmin() for mesh in meshes]\n\n x = np.asarray(h)\n y = np.asarray(g)\n\n if extrapolation == 'none':\n gamma_asymptotic = g[-1] # just use value computed on the highest level\n\n elif extrapolation == 'least-squares': # extrapolate by a least-squares fit\n gamma_asymptotic, fitfunc = extrapolate_gamma_least_squares(x[start_at:], y[start_at:], angle)\n \n elif extrapolation == 'romberg':\n gamma_asymptotic, tableau = extrapolate_gamma_romberg(x[start_at:], y[start_at:], angle)\n\n elif extrapolation == 'richardson':\n gamma_asymptotic = extrapolate_gamma_richardson(x[start_at:], y[start_at:], angle)\n\n # plot gamma\n if False: # just for debugging\n gammaa, fitfunc = extrapolate_gamma_least_squares(x[start_at:], y[start_at:], angle)\n\n import pylab\n fig = pylab.figure()\n plt, = pylab.semilogx(1./x, y, 'k*')\n xx = np.linspace(h[-1], h[0], 100)\n yy = fitfunc(xx)\n pylab.ylim((min(g)-0.05,max(g)+0.05))\n plt, = pylab.semilogx(1./xx, yy, 'r-')\n plt, = pylab.semilogx(1./xx, gamma_asymptotic*np.ones((len(xx),1)), 'b-')\n plt, = pylab.semilogx(1./xx, initial_gamma*np.ones((len(xx),1)), 'g-')\n pylab.savefig('output/gamma-{0}-{1}.pdf'.format(corner[0],corner[1]))\n\n return gamma_asymptotic, (h,g)\n\n\ndef extrapolate_gammas(corners, angles, corner_meshes, method = 'one-level-exact', maxit = 20, \\\n refine_method = 'bulirsch', extrapolation = 'least-squares', start_at = 3, \\\n maxlevel = 10, funcs = None, initial_gamma = None):\n\n g_asympt, data = [], []\n \n if funcs is None: # set all corners to Dirichlet by default\n funcs = [ math.sin for c in corners ]\n\n # for each corner, compute gamma\n for i in range(len(corners)):\n\n corner = corners[i]\n angle = angles[i]\n corner_mesh = corner_meshes[i]\n\n if method == 'fit':\n g, d = evaluate_fit(corner_mesh.size(2), angle, funcs[i] == math.sin), None\n else:\n g, d = extrapolate_gamma(corner, angle, corner_mesh, method = method, maxit = maxit, \\\n refine_method = refine_method, extrapolation = extrapolation, \\\n start_at = start_at, maxlevel = maxlevel, func = funcs[i], \\\n initial_gamma = initial_gamma)\n \n g_asympt.append(g)\n data.append(d)\n\n return g_asympt, data\n" ]
[ [ "numpy.asarray", "scipy.optimize.leastsq", "numpy.zeros", "numpy.linspace" ] ]
ananyashreyjain/astropy
[ "a8b8d4c4d2dcc9be28385600f56066cef92a38ad" ]
[ "astropy/utils/iers/tests/test_iers.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport os\nimport urllib.request\n\nimport pytest\nimport numpy as np\n\nfrom ....tests.helper import assert_quantity_allclose, catch_warnings\nfrom .. import iers\nfrom .... import units as u\nfrom ....table import QTable\nfrom ....time import Time, TimeDelta\nfrom ....utils.exceptions import AstropyWarning\n\nFILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)\n\ntry:\n iers.IERS_A.open('finals2000A.all') # check if IERS_A is available\nexcept OSError:\n HAS_IERS_A = False\nelse:\n HAS_IERS_A = True\n\nIERS_A_EXCERPT = os.path.join(os.path.dirname(__file__), 'iers_a_excerpt')\n\n\nclass TestBasic():\n \"\"\"Basic tests that IERS_B returns correct values\"\"\"\n\n def test_simple(self):\n iers.IERS.close()\n assert iers.IERS.iers_table is None\n iers_tab = iers.IERS.open()\n assert iers.IERS.iers_table is not None\n assert isinstance(iers.IERS.iers_table, QTable)\n assert iers_tab['UT1_UTC'].unit is u.second\n assert iers_tab['PM_x'].unit is u.arcsecond\n assert iers_tab['PM_y'].unit is u.arcsecond\n jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])\n jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])\n ut1_utc = iers_tab.ut1_utc(jd1, jd2)\n assert isinstance(ut1_utc, u.Quantity)\n assert ut1_utc.unit is u.second\n # IERS files change at the 0.1 ms level; see gh-6981\n assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,\n 0.4131816, 0.41328895] * u.s,\n atol=0.1*u.ms)\n # should be future-proof; surely we've moved to another planet by then\n with pytest.raises(IndexError):\n ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)\n # also check it returns the right status\n ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)\n assert np.all(status2 == iers.FROM_IERS_B)\n ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)\n assert status4 == iers.TIME_BEYOND_IERS_RANGE\n\n # check it works via Time too\n t = Time(jd1, jd2, format='jd', scale='utc')\n ut1_utc3 = iers_tab.ut1_utc(t)\n assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,\n 0.4131816, 0.41328895] * u.s,\n atol=0.1*u.ms)\n\n # Table behaves properly as a table (e.g. can be sliced)\n assert len(iers_tab[:2]) == 2\n\n def test_open_filename(self):\n iers.IERS.close()\n iers.IERS.open(iers.IERS_B_FILE)\n assert iers.IERS.iers_table is not None\n assert isinstance(iers.IERS.iers_table, QTable)\n iers.IERS.close()\n with pytest.raises(FILE_NOT_FOUND_ERROR):\n iers.IERS.open('surely this does not exist')\n\n def test_open_network_url(self):\n iers.IERS_A.close()\n iers.IERS_A.open(\"file:\" + urllib.request.pathname2url(IERS_A_EXCERPT))\n assert iers.IERS_A.iers_table is not None\n assert isinstance(iers.IERS_A.iers_table, QTable)\n iers.IERS_A.close()\n\n\nclass TestIERS_AExcerpt():\n def test_simple(self):\n # Test the IERS A reader. It is also a regression tests that ensures\n # values do not get overridden by IERS B; see #4933.\n iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)\n\n assert iers_tab['UT1_UTC'].unit is u.second\n assert 'P' in iers_tab['UT1Flag']\n assert 'I' in iers_tab['UT1Flag']\n assert 'B' in iers_tab['UT1Flag']\n assert np.all((iers_tab['UT1Flag'] == 'I') |\n (iers_tab['UT1Flag'] == 'P') |\n (iers_tab['UT1Flag'] == 'B'))\n\n assert iers_tab['dX_2000A'].unit is u.marcsec\n assert iers_tab['dY_2000A'].unit is u.marcsec\n assert 'P' in iers_tab['NutFlag']\n assert 'I' in iers_tab['NutFlag']\n assert 'B' in iers_tab['NutFlag']\n assert np.all((iers_tab['NutFlag'] == 'P') |\n (iers_tab['NutFlag'] == 'I') |\n (iers_tab['NutFlag'] == 'B'))\n\n assert iers_tab['PM_x'].unit is u.arcsecond\n assert iers_tab['PM_y'].unit is u.arcsecond\n assert 'P' in iers_tab['PolPMFlag']\n assert 'I' in iers_tab['PolPMFlag']\n assert 'B' in iers_tab['PolPMFlag']\n assert np.all((iers_tab['PolPMFlag'] == 'P') |\n (iers_tab['PolPMFlag'] == 'I') |\n (iers_tab['PolPMFlag'] == 'B'))\n\n t = Time([57053., 57054., 57055.], format='mjd')\n ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)\n assert status[0] == iers.FROM_IERS_B\n assert np.all(status[1:] == iers.FROM_IERS_A)\n # These values are *exactly* as given in the table, so they should\n # match to double precision accuracy.\n assert_quantity_allclose(ut1_utc,\n [-0.4916557, -0.4925323, -0.4934373] * u.s,\n atol=0.1*u.ms)\n\n\n dcip_x,dcip_y, status = iers_tab.dcip_xy(t, return_status=True)\n assert status[0] == iers.FROM_IERS_B\n assert np.all(status[1:] == iers.FROM_IERS_A)\n # These values are *exactly* as given in the table, so they should\n # match to double precision accuracy.\n print(dcip_x)\n print(dcip_y)\n assert_quantity_allclose(dcip_x,\n [-0.086, -0.093, -0.087] * u.marcsec,\n atol=1.*u.narcsec)\n assert_quantity_allclose(dcip_y,\n [0.094, 0.081, 0.072] * u.marcsec,\n atol=1*u.narcsec)\n\n pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)\n assert status[0] == iers.FROM_IERS_B\n assert np.all(status[1:] == iers.FROM_IERS_A)\n assert_quantity_allclose(pm_x,\n [0.003734, 0.004581, 0.004623] * u.arcsec,\n atol=0.1*u.marcsec)\n assert_quantity_allclose(pm_y,\n [0.310824, 0.313150, 0.315517] * u.arcsec,\n atol=0.1*u.marcsec)\n\n # Table behaves properly as a table (e.g. can be sliced)\n assert len(iers_tab[:2]) == 2\n\n\[email protected](str('not HAS_IERS_A'))\nclass TestIERS_A():\n\n def test_simple(self):\n \"\"\"Test that open() by default reads a 'finals2000A.all' file.\"\"\"\n # Ensure we remove any cached table (gh-5131).\n iers.IERS_A.close()\n iers_tab = iers.IERS_A.open()\n jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])\n jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])\n ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)\n assert np.all(status == iers.FROM_IERS_B)\n assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,\n 0.4131816, 0.41328895] * u.s,\n atol=0.1*u.ms)\n ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)\n assert status2 == iers.TIME_BEYOND_IERS_RANGE\n\n tnow = Time.now()\n\n ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)\n assert status3 == iers.FROM_IERS_A_PREDICTION\n assert ut1_utc3 != 0.\n\n\nclass TestIERS_Auto():\n\n def setup_class(self):\n \"\"\"Set up useful data for the tests.\n \"\"\"\n self.N = 40\n self.ame = 30.0\n self.iers_a_file_1 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-02-30-test')\n self.iers_a_file_2 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-04-30-test')\n self.iers_a_url_1 = os.path.normpath('file://' + os.path.abspath(self.iers_a_file_1))\n self.iers_a_url_2 = os.path.normpath('file://' + os.path.abspath(self.iers_a_file_2))\n self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)\n\n def teardown_method(self, method):\n \"\"\"Run this after every test.\n \"\"\"\n iers.IERS_Auto.close()\n\n def test_interpolate_error_formatting(self):\n \"\"\"Regression test: make sure the error message in\n IERS_Auto._check_interpolate_indices() is formatted correctly.\n \"\"\"\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):\n with iers.conf.set_temp('auto_max_age', self.ame):\n with pytest.raises(ValueError) as err:\n iers_table = iers.IERS_Auto.open()\n delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)\n assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)\n\n def test_auto_max_age_none(self):\n \"\"\"Make sure that iers.INTERPOLATE_ERROR's advice about setting\n auto_max_age = None actually works.\n \"\"\"\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):\n with iers.conf.set_temp('auto_max_age', None):\n iers_table = iers.IERS_Auto.open()\n delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)\n assert isinstance(delta, np.ndarray)\n assert delta.shape == (self.N,)\n assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)\n\n def test_auto_max_age_minimum(self):\n \"\"\"Check that the minimum auto_max_age is enforced.\n \"\"\"\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):\n with iers.conf.set_temp('auto_max_age', 5.0):\n with pytest.raises(ValueError) as err:\n iers_table = iers.IERS_Auto.open()\n delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)\n assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'\n\n @pytest.mark.remote_data\n def test_no_auto_download(self):\n with iers.conf.set_temp('auto_download', False):\n t = iers.IERS_Auto.open()\n assert type(t) is iers.IERS_B\n\n @pytest.mark.remote_data\n def test_simple(self):\n\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):\n\n dat = iers.IERS_Auto.open()\n assert dat['MJD'][0] == 57359.0 * u.d\n assert dat['MJD'][-1] == 57539.0 * u.d\n\n # Pretend we are accessing at a time 7 days after start of predictive data\n predictive_mjd = dat.meta['predictive_mjd']\n dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d\n\n # Look at times before and after the test file begins. 0.1292905 is\n # the IERS-B value from MJD=57359. The value in\n # finals2000A-2016-02-30-test has been replaced at this point.\n assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)\n assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)\n\n # Now pretend we are accessing at time 60 days after start of predictive data.\n # There will be a warning when downloading the file doesn't give new data\n # and an exception when extrapolating into the future with insufficient data.\n dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d\n assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)\n with catch_warnings(iers.IERSStaleWarning) as warns:\n with pytest.raises(ValueError) as err:\n dat.ut1_utc(Time(60000, format='mjd').jd)\n assert 'interpolating from IERS_Auto using predictive values' in str(err)\n assert len(warns) == 1\n assert 'IERS_Auto predictive values are older' in str(warns[0].message)\n\n # Warning only if we are getting return status\n with catch_warnings(iers.IERSStaleWarning) as warns:\n dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)\n assert len(warns) == 1\n assert 'IERS_Auto predictive values are older' in str(warns[0].message)\n\n # Now set auto_max_age = None which says that we don't care how old the\n # available IERS-A file is. There should be no warnings or exceptions.\n with iers.conf.set_temp('auto_max_age', None):\n with catch_warnings(iers.IERSStaleWarning) as warns:\n dat.ut1_utc(Time(60000, format='mjd').jd)\n assert not warns\n\n # Now point to a later file with same values but MJD increased by\n # 60 days and see that things work. dat._time_now is still the same value\n # as before, i.e. right around the start of predictive values for the new file.\n # (In other words this is like downloading the latest file online right now).\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):\n\n # Look at times before and after the test file begins. This forces a new download.\n assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)\n assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)\n\n # Now the time range should be different.\n assert dat['MJD'][0] == 57359.0 * u.d\n assert dat['MJD'][-1] == (57539.0 + 60) * u.d\n" ]
[ [ "numpy.all", "numpy.arange", "numpy.array" ] ]
huangyuyao/bevutils
[ "24e5c4954b17ed58e27697447ab667c65f59b7e0" ]
[ "bevutils/layers/perspective_transformer.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom ..functional import epipolar as E\n\nclass PerspectiveTransformerLayer(nn.Module):\n\n def __init__(self, bv_size, pv_size, intrinsics, translate_z = -10.0, rotation_order='xyz', device='cuda:0', dtype=torch.float32):\n '''\n `translate_z` is a hyperparameter to be chose in range (-Inf, 1.0), the perspective view will be roughly scaled (1-translate_z) times.\n '''\n super(PerspectiveTransformerLayer, self).__init__()\n self.dtype = dtype\n self.dev = torch.device(device) if device else None\n self.rot_order = rotation_order\n self.bv_size, self.pv_size = bv_size, pv_size\n self.register_buffer('intrinsics', self._prepare_intrinsics(intrinsics))\n self.register_buffer('inv_intrinsics', torch.inverse(self.intrinsics))\n self.register_buffer('n', torch.tensor([[0], [0], [1]], device=self.dev, dtype=self.dtype))\n self.register_buffer('tz', torch.tensor([translate_z], device=self.dev, dtype=self.dtype))\n self.register_buffer('bv_grid', self._prepare_coord_grid(*bv_size))\n bv_pivot, pv_pivot = self._prepare_pivots(bv_size, pv_size, self.inv_intrinsics)\n self.register_buffer('bv_pivot', bv_pivot)\n self.register_buffer('pv_pivot', pv_pivot)\n\n def _prepare_intrinsics(self, intrinsics):\n if isinstance(intrinsics, list) or isinstance(intrinsics, np.array):\n intrinsics = torch.tensor(intrinsics, requires_grad=False, device=self.dev, dtype=self.dtype)\n assert isinstance(intrinsics, torch.Tensor)\n assert intrinsics.shape == (3, 3)\n return intrinsics\n \n def _prepare_pivots(self, bv_size, pv_size, inv_intrinsics):\n bv_pivot = torch.tensor([[bv_size[1]/2.0], [bv_size[0]], [1.0]], device=self.dev, dtype=self.dtype)\n pv_pivot = torch.tensor([[pv_size[1]/2.0], [pv_size[0]], [1.0]], device=self.dev, dtype=self.dtype)\n bv_pivot = inv_intrinsics @ bv_pivot\n pv_pivot = inv_intrinsics @ pv_pivot\n return bv_pivot, pv_pivot\n\n def _prepare_coord_grid(self, H, W):\n xgrid = torch.arange(W, requires_grad=False, device=self.dev, dtype=self.dtype).repeat(H, 1).view((H, W, 1, 1))\n ygrid = torch.arange(H, requires_grad=False, device=self.dev, dtype=self.dtype).unsqueeze_(1).repeat(1, W).view(H, W, 1, 1)\n grid = torch.cat((xgrid, ygrid, torch.ones_like(xgrid, device=self.dev, dtype=self.dtype)), dim=-2)\n return grid\n\n def forward(self, pv, rx=0.0, ry=0.0, rz=0.0):\n '''\n REFERENCES:\n - Homography: refers to https://en.wikipedia.org/wiki/Homography_(computer_vision)\n - Bilinear Interpolation: refers to https://medium.com/@shanlins/spatial-transformer-networks-stn-and-its-implementation-2638d58d41f8\n '''\n B, C, Hp, Wp, Hb, Wb = *pv.shape, *self.bv_size\n # get constrained homography\n R = E.torch.make_rotation_matrix(rx, ry, rz, self.rot_order, device=pv.device, dtype=self.dtype)\n H = E.torch.make_constrained_homography(R, self.tz, self.intrinsics, self.inv_intrinsics, self.bv_pivot, self.pv_pivot)\n # get coordinates on perspective view for each grid: `pv_coord` with shape (B, Hb, Wb, 2, 1)\n bv_grid = self.bv_grid.expand(B, Hb, Wb, 3, 1)\n pv_coord = torch.matmul(H[:, None, None, :, :], bv_grid)\n pv_coord = pv_coord[:, :, :, 0:2, :] / pv_coord[:, :, :, 2:3, :]\n # gather pixels acoording to `pv_coord`\n x = pv_coord[:,None,:,:,0,0] # with shape (B, 1, Hb, Wb)\n y = pv_coord[:,None,:,:,1,0]\n mask = (~((x >= 0) & (x < Wp) & (y >= 0) & (y < Hp))).expand(B, C, Hb, Wb)\n x0 = x.clamp_(0, Wp-2).to(torch.long)\n y0 = y.clamp_(0, Hp-2).to(torch.long)\n offset_00 = y0 * Wp + x0\n offset_01 = offset_00 + 1\n offset_10 = offset_00 + Wp\n offset_11 = offset_10 + 1\n pv = pv.view(B, C, Hp*Wp) # with shape (B, C, Hp*Wp)\n pvmap = [\n torch.gather(pv, -1, offset_00.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),\n torch.gather(pv, -1, offset_01.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),\n torch.gather(pv, -1, offset_10.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),\n torch.gather(pv, -1, offset_11.expand(B, C, Hb, Wb).view(B, C, Hb*Wb))] # pv maps: with shape (B, C, Hb*Wb)\n # combine pv pixels\n x0, x1, y0, y1 = (x - x0.to(self.dtype)), ((x0+1).to(self.dtype) - x), (y - y0.to(self.dtype)), ((y0+1).to(self.dtype) - y)\n weights = [(x1 * y1), (x0 * y1), (x1 * y0), (x0 * y0)] # weight : with shape (B, 1, Hb, Wb)\n bvmap = sum([w.expand(B, C, Hb, Wb) * p.view(B, C, Hb, Wb) for w, p in zip(weights, pvmap)]) # bvmap with shape (B, C, Hb, Wb)\n #__import__('pdb').set_trace()\n bvmap[mask] = 0.0\n return bvmap\n" ]
[ [ "torch.inverse", "torch.tensor", "torch.matmul", "torch.arange", "torch.device", "torch.ones_like" ] ]
danielballan/suitcase-tiff
[ "eb401cd4f2f1bd637ec23c10472e0579f0cefc66" ]
[ "suitcase/tiff/tests.py" ]
[ "from . import export\nimport numpy\nfrom numpy.testing import assert_array_equal\nimport pytest\nimport tifffile\n\nexpected = numpy.ones((10, 10))\n\n\[email protected]('stack_images', [True, False])\ndef test_export(tmp_path, example_data, stack_images):\n ''' runs a test using the plan that is passed through to it\n\n ..note::\n\n Due to the `events_data` `pytest.fixture` this will run multiple tests\n each with a range of detectors and a range of event_types. see\n `suitcase.utils.conftest` for more info\n\n '''\n\n collector = example_data()\n artifacts = export(collector, tmp_path, file_prefix='',\n stack_images=stack_images)\n\n for filename in artifacts['stream_data']:\n actual = tifffile.imread(str(filename))\n if len(actual.shape) == 3:\n for img in actual:\n assert_array_equal(img, expected)\n else:\n assert_array_equal(actual, expected)\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.ones" ] ]
yjf18340/webots
[ "7c35a359848bafe81fe0229ac2ed587528f4c73e" ]
[ "projects/samples/robotbenchmark/visual_tracking/controllers/visual_tracking/visual_tracking.py" ]
[ "\"\"\"Sample Webots controller for the visual tracking benchmark.\"\"\"\n\nfrom controller import Robot, Node\nimport base64\nimport os\nimport sys\nimport tempfile\n\ntry:\n import numpy as np\nexcept ImportError:\n sys.exit(\"Warning: 'numpy' module not found. Please check the Python modules installation instructions \" +\n \"at 'https://www.cyberbotics.com/doc/guide/using-python'.\")\ntry:\n import cv2\nexcept ImportError:\n sys.exit(\"Warning: 'cv2' module not found. Please check the Python modules installation instructions \" +\n \"at 'https://www.cyberbotics.com/doc/guide/using-python'.\")\n\n\ndef cleanup():\n \"\"\"Remove device image files.\"\"\"\n # Ignore errors if file doesn't exist.\n try:\n os.remove(deviceImagePath + '/display.jpg')\n except OSError:\n pass\n try:\n os.remove(deviceImagePath + '/camera.jpg')\n except OSError:\n pass\n\n\ndef sendDeviceImage(robot, device):\n \"\"\"Send the rendering device image to the robot window.\"\"\"\n if device.getNodeType() == Node.DISPLAY:\n deviceName = 'display'\n fileName = deviceName + '.jpg'\n device.imageSave(None, deviceImagePath + '/' + fileName)\n elif device.getNodeType() == Node.CAMERA:\n deviceName = 'camera'\n fileName = deviceName + '.jpg'\n device.saveImage(deviceImagePath + '/' + fileName, 80)\n else:\n return\n with open(deviceImagePath + '/' + fileName, 'rb') as f:\n fileString = f.read()\n fileString64 = base64.b64encode(fileString).decode()\n robot.wwiSendText(\"image[\" + deviceName + \"]:data:image/jpeg;base64,\" + fileString64)\n f.close()\n\n\n# Set path to store temporary device images\ndeviceImagePath = os.getcwd()\ntry:\n imageFile = open(deviceImagePath + \"/image.jpg\", 'w')\n imageFile.close()\nexcept IOError:\n deviceImagePath = tempfile.gettempdir()\n\n# Get pointer to the robot.\nrobot = Robot()\n\n# Set the controller time step based on the current world's time step.\ntimestep = int(robot.getBasicTimeStep() * 4)\n\n# Get camera motors.\npanHeadMotor = robot.getMotor('PRM:/r1/c1/c2-Joint2:12')\ntiltHeadMotor = robot.getMotor('PRM:/r1/c1/c2/c3-Joint2:13')\n# Other camera motor not used in this controller.\n# tiltNeckMotor = robot.getMotor('PRM:/r1/c1-Joint2:11')\n\n# Initialize motors in order to use velocity control instead of position control.\npanHeadMotor.setPosition(float('+inf'))\ntiltHeadMotor.setPosition(float('+inf'))\n# Set initial motors velocity.\npanHeadMotor.setVelocity(0.0)\ntiltHeadMotor.setVelocity(0.0)\n\n# Get and enable the camera device.\ncamera = robot.getCamera('PRM:/r1/c1/c2/c3/i1-FbkImageSensor:F1')\ncamera.enable(timestep)\nwidth = camera.getWidth()\nheight = camera.getHeight()\n\n# Get the display device.\n# The display can be used to visually show the tracked position.\ndisplay = robot.getDisplay('display')\n# Show camera image in the display background.\ndisplay.attachCamera(camera)\ndisplay.setColor(0xFF0000)\n\n# Variables needed to draw the target on the display.\ntargetPoint = []\ntargetRadius = 0\n\n# Main loop: perform a simulation step until the simulation is over.\nwhile robot.step(timestep) != -1:\n # Remove previously detected blob info from the display if needed.\n if targetPoint:\n # Erase the previous drawing by setting the pixels alpha value to 0 (transparent).\n display.setAlpha(0.0)\n radius = targetRadius\n if radius < 5:\n # Minimum red dot size.\n radius = 5\n size = 2 * radius + 1\n display.fillRectangle(targetPoint[0] - radius,\n targetPoint[1] - radius, size, size)\n\n # Send the camera image to the robot window.\n # sendDeviceImage(robot, camera)\n\n # Get camera image.\n rawString = camera.getImage()\n\n # Create mask for yellow pixels based on the camera image.\n index = 0\n maskRGB = np.zeros([height, width], np.uint8)\n for j in range(0, height):\n for i in range(0, width):\n # Camera image pixel format\n if sys.version_info.major > 2: # Python 3 code\n b = rawString[index]\n g = rawString[index + 1]\n r = rawString[index + 2]\n else: # Python 2.7 code\n b = ord(rawString[index])\n g = ord(rawString[index + 1])\n r = ord(rawString[index + 2])\n index += 4\n # Yellow color threshold.\n if b < 50 and g > 180 and r > 180:\n maskRGB[j][i] = True\n\n # Find blobs contours in the mask.\n contours = cv2.findContours(maskRGB.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n # Only proceed if at least one blob is found.\n if not contours:\n continue\n\n # Choose the largest blob.\n blob = max(contours, key=cv2.contourArea)\n\n # Compute the minimum enclosing circle and centroid of the blob.\n ((x, y), radius) = cv2.minEnclosingCircle(blob)\n targetPoint = [int(x), int(y)]\n targetRadius = int(radius)\n\n # Show detected blob in the display: draw the circle and centroid.\n display.setAlpha(1.0)\n if targetRadius > 0:\n display.setColor(0x00FFFF)\n display.drawOval(targetPoint[0], targetPoint[1], targetRadius, targetRadius)\n display.setColor(0xFF0000)\n display.fillOval(int(targetPoint[0]), int(targetPoint[1]), 5, 5)\n # Send the display image to the robot window.\n sendDeviceImage(robot, display)\n\n # Move the head and camera in order to center the target object.\n # Compute distance in pixels between the target point and the center.\n dx = targetPoint[0] - width / 2\n dy = targetPoint[1] - height / 2\n # The speed factor 1.5 has been chosen empirically.\n panHeadMotor.setVelocity(-1.5 * dx / width)\n tiltHeadMotor.setVelocity(-1.5 * dy / height)\n\n# Cleanup code.\ncleanup()\n" ]
[ [ "numpy.zeros" ] ]
tpedelose/apls
[ "5afcadb1e75e5b2f0c0e0c8be4419251f61f23e7" ]
[ "apls/apls_utils.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 6 14:05:30 2019\n\n@author: avanetten\n\"\"\"\n\nimport osmnx_funcs\nimport numpy as np\nfrom osgeo import gdal, ogr, osr\nimport scipy.spatial\nimport geopandas as gpd\nimport rasterio as rio\nimport affine as af\nimport shapely\nimport time\nimport os\nimport sys\nimport cv2\nimport skimage\nimport subprocess\nimport matplotlib.pyplot as plt\nfrom math import sqrt, radians, cos, sin, asin\n# import logging\n\n# add apls path and import apls_tools\npath_apls_src = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(path_apls_src)\n\n\n###############################################################################\ndef pixelToGeoCoord(xPix, yPix, inputRaster, sourceSR='', geomTransform='',\n targetSR=''):\n '''From spacenet geotools'''\n # If you want to gauruntee lon lat output, specify TargetSR otherwise, geocoords will be in image geo reference\n # targetSR = osr.SpatialReference()\n # targetSR.ImportFromEPSG(4326)\n # Transform can be performed at the polygon level instead of pixel level\n\n if targetSR == '':\n performReprojection = False\n targetSR = osr.SpatialReference()\n targetSR.ImportFromEPSG(4326)\n else:\n performReprojection = True\n\n if geomTransform == '':\n srcRaster = gdal.Open(inputRaster)\n geomTransform = srcRaster.GetGeoTransform()\n\n source_sr = osr.SpatialReference()\n source_sr.ImportFromWkt(srcRaster.GetProjectionRef())\n\n geom = ogr.Geometry(ogr.wkbPoint)\n xOrigin = geomTransform[0]\n yOrigin = geomTransform[3]\n pixelWidth = geomTransform[1]\n pixelHeight = geomTransform[5]\n\n xCoord = (xPix * pixelWidth) + xOrigin\n yCoord = (yPix * pixelHeight) + yOrigin\n geom.AddPoint(xCoord, yCoord)\n\n if performReprojection:\n if sourceSR == '':\n srcRaster = gdal.Open(inputRaster)\n sourceSR = osr.SpatialReference()\n sourceSR.ImportFromWkt(srcRaster.GetProjectionRef())\n coord_trans = osr.CoordinateTransformation(sourceSR, targetSR)\n geom.Transform(coord_trans)\n\n return (geom.GetX(), geom.GetY())\n\n\n###############################################################################\ndef nodes_near_point(x, y, kdtree, kd_idx_dic, x_coord='x', y_coord='y',\n n_neighbors=-1,\n radius_m=150,\n verbose=False):\n \"\"\"\n Get nodes near the given point.\n\n Notes\n -----\n if n_neighbors < 0, query based on distance,\n else just return n nearest neighbors\n\n Arguments\n ---------\n x : float\n x coordinate of point\n y: float\n y coordinate of point\n kdtree : scipy.spatial.kdtree\n kdtree of nondes in graph\n kd_idx_dic : dict\n Dictionary mapping kdtree entry to node name\n x_coord : str\n Name of x_coordinate, can be 'x' or 'lon'. Defaults to ``'x'``.\n y_coord : str\n Name of y_coordinate, can be 'y' or 'lat'. Defaults to ``'y'``.\n n_neighbors : int\n Neareast number of neighbors to return. If < 0, ignore.\n Defaults to ``-1``.\n radius_meters : float\n Radius to search for nearest neighbors\n Returns\n -------\n kd_idx_dic, kdtree, arr : tuple\n kd_idx_dic maps kdtree entry to node name\n kdree is the actual kdtree\n arr is the numpy array of node positions\n \"\"\"\n\n point = [x, y]\n\n # query kd tree for nodes of interest\n if n_neighbors > 0:\n node_names, idxs_refine, dists_m_refine = _query_kd_nearest(\n kdtree, kd_idx_dic, point, n_neighbors=n_neighbors)\n else:\n node_names, idxs_refine, dists_m_refine = _query_kd_ball(\n kdtree, kd_idx_dic, point, radius_m)\n\n if verbose:\n print((\"subgraph node_names:\", node_names))\n\n # get subgraph\n # G_sub = G_.subgraph(node_names)\n\n return node_names, dists_m_refine # G_sub\n\n\n###############################################################################\ndef _nodes_near_origin(G_, node, kdtree, kd_idx_dic,\n x_coord='x', y_coord='y', radius_m=150, verbose=False):\n '''Get nodes a given radius from the desired node. G_ should be the \n maximally simplified graph'''\n\n # get node coordinates\n n_props = G_.node[node]\n x0, y0 = n_props[x_coord], n_props[y_coord]\n point = [x0, y0]\n\n # query kd tree for nodes of interest\n node_names, idxs_refine, dists_m_refine = _query_kd_ball(\n kdtree, kd_idx_dic, point, radius_m)\n if verbose:\n print((\"subgraph node_names:\", node_names))\n\n # get subgraph\n # G_sub = G_.subgraph(node_names)\n\n return node_names, dists_m_refine # G_sub\n\n\n###############################################################################\ndef G_to_kdtree(G_, x_coord='x', y_coord='y', verbose=False):\n \"\"\"\n Create kd tree from node positions.\n\n Notes\n -----\n (x, y) = (lon, lat)\n kd_idx_dic maps kdtree entry to node name:\n kd_idx_dic[i] = n (n in G.nodes())\n x_coord can be in utm (meters), or longitude\n\n Arguments\n ---------\n G_ : networkx graph\n Input networkx graph, with nodes assumed to have a dictioary of\n properties that includes position\n x_coord : str\n Name of x_coordinate, can be 'x' or 'lon'. Defaults to ``'x'``.\n y_coord : str\n Name of y_coordinate, can be 'y' or 'lat'. Defaults to ``'y'``.\n\n Returns\n -------\n kd_idx_dic, kdtree, arr : tuple\n kd_idx_dic maps kdtree entry to node name\n kdree is the actual kdtree\n arr is the numpy array of node positions\n \"\"\"\n\n nrows = len(G_.nodes())\n ncols = 2\n kd_idx_dic = {}\n arr = np.zeros((nrows, ncols))\n # populate node array\n t1 = time.time()\n for i, n in enumerate(G_.nodes()):\n n_props = G_.nodes[n]\n if x_coord == 'lon':\n lat, lon = n_props['lat'], n_props['lon']\n x, y = lon, lat\n else:\n x, y = n_props[x_coord], n_props[y_coord]\n\n arr[i] = [x, y]\n kd_idx_dic[i] = n\n\n # now create kdtree from numpy array\n kdtree = scipy.spatial.KDTree(arr)\n if verbose:\n print(\"Time to create k-d tree:\", time.time() - t1, \"seconds\")\n return kd_idx_dic, kdtree, arr\n\n\n###############################################################################\ndef _query_kd_nearest(kdtree, kd_idx_dic, point, n_neighbors=10,\n distance_upper_bound=10000, keep_point=True):\n '''\n Query the kd-tree for neighbors\n Return nearest node names, distances, nearest node indexes\n If not keep_point, remove the origin point from the list\n '''\n\n dists_m, idxs = kdtree.query(point, k=n_neighbors,\n distance_upper_bound=distance_upper_bound)\n\n idxs_refine = list(np.asarray(idxs))\n # print(\"apls_utils.query_kd_neareast - idxs_refilne:\", idxs_refine)\n # print(\"apls_utils.query_kd_neareast - dists_m_refilne:\", dists_m)\n dists_m_refine = list(dists_m)\n node_names = [kd_idx_dic[i] for i in idxs_refine]\n\n return node_names, idxs_refine, dists_m_refine\n\n\n###############################################################################\ndef _query_kd_ball(kdtree, kd_idx_dic, point, r_meters, keep_point=True):\n '''\n Query the kd-tree for neighbors within a distance r of the point\n Return nearest node names, distances, nearest node indexes\n if not keep_point, remove the origin point from the list\n '''\n\n dists_m, idxs = kdtree.query(point, k=500, distance_upper_bound=r_meters)\n # keep only points within distance and greaater than 0?\n if not keep_point:\n f0 = np.where((dists_m <= r_meters) & (dists_m > 0))\n else:\n f0 = np.where((dists_m <= r_meters))\n idxs_refine = list(np.asarray(idxs)[f0])\n dists_m_refine = list(dists_m[f0])\n node_names = [kd_idx_dic[i] for i in idxs_refine]\n\n return node_names, idxs_refine, dists_m_refine\n\n\n###############################################################################\ndef _get_graph_extent(G_):\n '''min and max x and y'''\n xall = [G_.node[n]['x'] for n in G_.nodes()]\n yall = [G_.node[n]['y'] for n in G_.nodes()]\n xmin, xmax = np.min(xall), np.max(xall)\n ymin, ymax = np.min(yall), np.max(yall)\n dx, dy = xmax-xmin, ymax-ymin\n return xmin, xmax, ymin, ymax, dx, dy\n\n\n###############################################################################\ndef _latlon2pixel(lat, lon, input_raster='', targetsr='', geom_transform=''):\n '''\n Convert latitude, longitude coords to pixexl coords.\n From spacenet geotools\n '''\n\n sourcesr = osr.SpatialReference()\n sourcesr.ImportFromEPSG(4326)\n\n geom = ogr.Geometry(ogr.wkbPoint)\n # geom.AddPoint(lon, lat)\n geom.AddPoint(lat, lon)\n\n if targetsr == '':\n src_raster = gdal.Open(input_raster)\n targetsr = osr.SpatialReference()\n targetsr.ImportFromWkt(src_raster.GetProjectionRef())\n coord_trans = osr.CoordinateTransformation(sourcesr, targetsr)\n if geom_transform == '':\n src_raster = gdal.Open(input_raster)\n transform = src_raster.GetGeoTransform()\n else:\n transform = geom_transform\n\n x_origin = transform[0]\n # print(x_origin)\n y_origin = transform[3]\n # print(y_origin)\n pixel_width = transform[1]\n # print(pixel_width)\n pixel_height = transform[5]\n # print(pixel_height)\n geom.Transform(coord_trans)\n # print(geom.GetPoint())\n x_pix = (geom.GetPoint()[0] - x_origin) / pixel_width\n y_pix = (geom.GetPoint()[1] - y_origin) / pixel_height\n\n return (x_pix, y_pix)\n\n\n###############################################################################\ndef _wmp2pixel(x, y, input_raster='', targetsr='', geom_transform=''):\n '''\n Convert wmp coords to pixexl coords.\n '''\n\n sourcesr = osr.SpatialReference()\n sourcesr.ImportFromEPSG(3857)\n\n geom = ogr.Geometry(ogr.wkbPoint)\n geom.AddPoint(x, y)\n\n if targetsr == '':\n src_raster = gdal.Open(input_raster)\n targetsr = osr.SpatialReference()\n targetsr.ImportFromWkt(src_raster.GetProjectionRef())\n coord_trans = osr.CoordinateTransformation(sourcesr, targetsr)\n if geom_transform == '':\n src_raster = gdal.Open(input_raster)\n transform = src_raster.GetGeoTransform()\n else:\n transform = geom_transform\n\n x_origin = transform[0]\n # print(x_origin)\n y_origin = transform[3]\n # print(y_origin)\n pixel_width = transform[1]\n # print(pixel_width)\n pixel_height = transform[5]\n # print(pixel_height)\n geom.Transform(coord_trans)\n # print(geom.GetPoint())\n x_pix = (geom.GetPoint()[0] - x_origin) / pixel_width\n y_pix = (geom.GetPoint()[1] - y_origin) / pixel_height\n\n return (x_pix, y_pix)\n\n\n###############################################################################\ndef _set_pix_coords(G_, im_test_file=''):\n '''Get pixel coords. Update G_ and get control_points, and graph_coords'''\n\n if len(G_.nodes()) == 0:\n return G_, [], []\n\n control_points, cp_x, cp_y = [], [], []\n for n in G_.nodes():\n u_x, u_y = G_.nodes[n]['x'], G_.nodes[n]['y']\n control_points.append([n, u_x, u_y])\n lat, lon = G_.nodes[n]['lat'], G_.nodes[n]['lon']\n if len(im_test_file) > 0:\n pix_x, pix_y = _latlon2pixel(lat, lon, input_raster=im_test_file)\n else:\n print(\"set_pix_coords(): oops, no image file\")\n pix_x, pix_y = 0, 0\n # update G_\n G_.nodes[n]['pix_col'] = pix_x\n G_.nodes[n]['pix_row'] = pix_y\n G_.nodes[n]['x_pix'] = pix_x\n G_.nodes[n]['y_pix'] = pix_y\n # add to arrays\n cp_x.append(pix_x)\n cp_y.append(pix_y)\n # get line segements in pixel coords\n seg_endpoints = []\n for (u, v) in G_.edges():\n ux, uy = G_.nodes[u]['pix_col'], G_.nodes[u]['pix_row']\n vx, vy = G_.nodes[v]['pix_col'], G_.nodes[v]['pix_row']\n seg_endpoints.append([(ux, uy), (vx, vy)])\n gt_graph_coords = (cp_x, cp_y, seg_endpoints)\n\n return G_, control_points, gt_graph_coords\n\n\n###############################################################################\ndef convertTo8Bit(rasterImageName, outputRaster,\n outputPixType='Byte',\n outputFormat='GTiff',\n rescale_type='rescale',\n percentiles=[2, 98]):\n '''\n This does a relatively poor job of converting to 8bit, as opening in qgis\n the images look very different.\n rescale_type = [clip, rescale]\n if resceale, each band is rescaled to its own min and max\n if clip, scaling is done sctricly between 0 65535\n '''\n\n srcRaster = gdal.Open(rasterImageName)\n nbands = srcRaster.RasterCount\n if nbands == 3:\n cmd = ['gdal_translate', '-ot', outputPixType, '-of', outputFormat,\n '-co', '\"PHOTOMETRIC=rgb\"']\n else:\n cmd = ['gdal_translate', '-ot', outputPixType, '-of', outputFormat]\n\n for bandId in range(srcRaster.RasterCount):\n bandId = bandId+1\n band = srcRaster.GetRasterBand(bandId)\n if rescale_type == 'rescale':\n bmin = band.GetMinimum()\n bmax = band.GetMaximum()\n # if not exist minimum and maximum values\n if bmin is None or bmax is None:\n (bmin, bmax) = band.ComputeRasterMinMax(1)\n # else, rescale\n band_arr_tmp = band.ReadAsArray()\n bmin = np.percentile(band_arr_tmp.flatten(), percentiles[0])\n bmax = np.percentile(band_arr_tmp.flatten(), percentiles[1])\n\n else:\n bmin, bmax = 0, 65535\n\n cmd.append('-scale_{}'.format(bandId))\n cmd.append('{}'.format(bmin))\n cmd.append('{}'.format(bmax))\n cmd.append('{}'.format(0))\n cmd.append('{}'.format(255))\n\n cmd.append(rasterImageName)\n cmd.append(outputRaster)\n print(cmd)\n subprocess.call(cmd)\n\n return\n\n\n###############################################################################\n# def edit_node_props(props, new):\n# pass\n\n\n###############################################################################\ndef create_buffer_geopandas(inGDF, buffer_distance_meters=2,\n buffer_cap_style=1, dissolve_by='class',\n projectToUTM=True, verbose=False):\n \"\"\"\n Create a buffer around the lines of the geojson\n\n Arguments\n ---------\n inGDF : geodataframe\n Geodataframe from a SpaceNet geojson.\n buffer_distance_meters : float\n Width of buffer around geojson lines. Formally, this is the distance\n to each geometric object. Optional. Defaults to ``2``.\n buffer_cap_style : int\n Cap_style of buffer, see: (https://shapely.readthedocs.io/en/stable/manual.html#constructive-methods)\n Defaults to ``1`` (round).\n dissolve_by : str\n Method for differentiating rows in geodataframe, and creating unique\n mask values. Defaults to ``'class'``.\n projectToUTM : bool\n Switch to project gdf to UTM coordinates. Defaults to ``True``.\n verbose : bool\n Switch to print relevant values. Defaults to ``False``.\n\n Returns\n -------\n gdf_buffer : geopandas dataframe\n Dataframe created from geojson\n\n \"\"\"\n\n # inGDF = gpd.read_file(geoJsonFileName)\n if len(inGDF) == 0:\n return []\n\n # if we want a geojson instead of gdf for input\n # try:\n # inGDF = gpd.read_file(geoJsonFileName)\n # except:\n # return []\n\n # Transform gdf Roadlines into UTM so that Buffer makes sense\n if projectToUTM:\n tmpGDF = osmnx_funcs.project_gdf(inGDF, inGDF.crs)\n else:\n tmpGDF = inGDF\n\n if verbose:\n print(\"inGDF.columns:\", tmpGDF.columns)\n gdf_utm_buffer = tmpGDF.copy()\n\n # perform Buffer to produce polygons from Line Segments\n gdf_utm_buffer['geometry'] = tmpGDF.buffer(buffer_distance_meters,\n cap_style=buffer_cap_style)\n\n gdf_utm_dissolve = gdf_utm_buffer.dissolve(by=dissolve_by)\n gdf_utm_dissolve.crs = gdf_utm_buffer.crs\n if projectToUTM:\n gdf_buffer = gdf_utm_dissolve.to_crs(inGDF.crs)\n else:\n gdf_buffer = gdf_utm_dissolve\n if verbose:\n print(\"gdf_buffer['geometry'].values[0]:\",\n gdf_buffer['geometry'].values[0])\n\n # add the dissolve_by column back into final gdf, since it's now the index\n gdf_buffer[dissolve_by] = gdf_buffer.index.values\n\n return gdf_buffer\n\n\n###############################################################################\ndef _get_road_buffer(geoJson, im_vis_file, output_raster,\n buffer_meters=2, burnValue=1,\n # max_mask_val=1,\n buffer_cap_style=6,\n useSpacenetLabels=False,\n plot_file='', figsize=(11, 3), fontsize=6,\n dpi=800, show_plot=False,\n valid_road_types=set([]), verbose=False):\n '''\n Wrapper around create_buffer_geopandas(), with plots\n Get buffer around roads defined by geojson and image files\n valid_road_types serves as a filter of valid types (no filter if len==0)\n https://wiki.openstreetmap.org/wiki/Key:highway\n valid_road_types = set(['motorway', 'trunk', 'primary', 'secondary',\n 'tertiary',\n 'motorway_link', 'trunk_link', 'primary_link',\n 'secondary_link', 'tertiary_link',\n 'unclassified', 'residential', 'service' ])\n '''\n\n # get buffer\n\n # filter out roads of the wrong type\n try:\n inGDF_raw = gpd.read_file(geoJson)\n except:\n mask_gray = np.zeros(cv2.imread(im_vis_file, 0).shape)\n cv2.imwrite(output_raster, mask_gray)\n return [], []\n\n if useSpacenetLabels:\n inGDF = inGDF_raw\n # use try/except to handle empty label files\n try:\n inGDF['type'] = inGDF['road_type'].values\n inGDF['class'] = 'highway'\n inGDF['highway'] = 'highway'\n except:\n pass\n\n else:\n # filter out roads of the wrong type\n if (len(valid_road_types) > 0) and (len(inGDF_raw) > 0):\n if 'highway' in inGDF_raw.columns:\n inGDF = inGDF_raw[inGDF_raw['highway'].isin(valid_road_types)]\n # set type tag\n inGDF['type'] = inGDF['highway'].values\n inGDF['class'] = 'highway'\n else:\n inGDF = inGDF_raw[inGDF_raw['type'].isin(valid_road_types)]\n # set highway tag\n inGDF['highway'] = inGDF['type'].values\n\n if verbose:\n print(\"gdf.type:\", inGDF['type'])\n if len(inGDF) != len(inGDF_raw):\n print(\"len(inGDF), len(inGDF_raw)\",\n len(inGDF), len(inGDF_raw))\n print(\"gdf['type']:\", inGDF['type'])\n else:\n inGDF = inGDF_raw\n try:\n inGDF['type'] = inGDF['highway'].values\n inGDF['class'] = 'highway'\n except:\n pass\n\n gdf_buffer = create_buffer_geopandas(inGDF,\n buffer_distance_meters=buffer_meters,\n buffer_cap_style=buffer_cap_style,\n dissolve_by='class',\n projectToUTM=True)\n\n # make sure gdf is not null\n if len(gdf_buffer) == 0:\n mask_gray = np.zeros(cv2.imread(im_vis_file, 0).shape)\n cv2.imwrite(output_raster, mask_gray)\n # create label image\n else:\n gdf_to_array(gdf_buffer, im_vis_file, output_raster,\n burnValue=burnValue)\n # load mask\n mask_gray = cv2.imread(output_raster, 0)\n # mask_gray = np.clip(mask_gray, 0, max_mask_val)\n\n if plot_file:\n\n fig, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4, figsize=figsize)\n\n # road lines\n try:\n gdfRoadLines = gpd.read_file(geoJson)\n gdfRoadLines.plot(ax=ax0, marker='o', color='red')\n except:\n ax0.imshow(mask_gray)\n ax0.axis('off')\n ax0.set_aspect('equal')\n ax0.set_title('Unfiltered Roads from GeoJson', fontsize=fontsize)\n\n # first show raw image\n im_vis = cv2.imread(im_vis_file, 1)\n img_mpl = cv2.cvtColor(im_vis, cv2.COLOR_BGR2RGB)\n ax1.imshow(img_mpl)\n ax1.axis('off')\n ax1.set_title('Raw Image', fontsize=fontsize)\n\n # plot mask\n ax2.imshow(mask_gray)\n ax2.axis('off')\n ax2.set_title('Roads Mask (' + str(np.round(buffer_meters))\n + ' meter buffer)', fontsize=fontsize)\n\n # plot combined\n ax3.imshow(img_mpl)\n # overlay mask\n # set zeros to nan\n z = mask_gray.astype(float)\n z[z == 0] = np.nan\n # change palette to orange\n palette = plt.cm.gray\n palette.set_over('orange', 1.0)\n ax3.imshow(z, cmap=palette, alpha=0.4,\n norm=matplotlib.colors.Normalize(vmin=0.5, vmax=0.9, clip=False))\n ax3.set_title('Raw Image + Buffered Roads', fontsize=fontsize)\n ax3.axis('off')\n\n #plt.axes().set_aspect('equal', 'datalim')\n\n # plt.tight_layout()\n plt.savefig(plot_file, dpi=dpi)\n if not show_plot:\n plt.close()\n\n return mask_gray, gdf_buffer\n\n\n##############################################################################\ndef gdf_to_array(gdf, im_file, output_raster, burnValue=150,\n mask_burn_val_key='', compress=True, NoData_value=0,\n verbose=False):\n \"\"\"\n Create buffer around geojson for desired geojson feature, save as mask\n\n Notes\n -----\n https://gis.stackexchange.com/questions/260736/how-to-burn-a-different-value-for-each-polygon-in-a-json-file-using-gdal-rasteri/260737\n\n\n Arguments\n ---------\n image_path : gdf\n Input geojson\n im_file : str\n Path to image file corresponding to gdf.\n output_raster : str\n Output path of saved mask (should end in .tif).\n burnValue : int\n Value to burn to mask. Superceded by mask_burn_val_key.\n Defaults to ``150``.\n mask_burn_val_key : str\n Column name in gdf to use for mask burning. Supercedes burnValue.\n Defaults to ``''`` (in which case burnValue is used).\n compress : bool\n Switch to compress output raster. Defaults to ``True``.\n NoData_value : int\n Value to assign array if no data exists. If this value is <0\n (e.g. -9999), a null value will show in the image. Defaults to ``0``.\n verbose : bool\n Switch to print relevant values. Defaults to ``False``.\n\n Returns\n -------\n None\n \"\"\"\n\n gdata = gdal.Open(im_file)\n\n # set target info\n if compress:\n target_ds = gdal.GetDriverByName('GTiff').Create(output_raster,\n gdata.RasterXSize,\n gdata.RasterYSize, 1,\n gdal.GDT_Byte,\n ['COMPRESS=LZW'])\n else:\n target_ds = gdal.GetDriverByName('GTiff').Create(output_raster,\n gdata.RasterXSize,\n gdata.RasterYSize, 1,\n gdal.GDT_Byte)\n\n target_ds.SetGeoTransform(gdata.GetGeoTransform())\n if verbose:\n print(\"gdata.GetGeoTransform():\", gdata.GetGeoTransform())\n\n # set raster info\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(gdata.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n if verbose:\n print(\"target_ds:\", target_ds)\n\n band = target_ds.GetRasterBand(1)\n band.SetNoDataValue(NoData_value)\n\n outdriver = ogr.GetDriverByName('MEMORY')\n outDataSource = outdriver.CreateDataSource('memData')\n tmp = outdriver.Open('memData', 1)\n outLayer = outDataSource.CreateLayer(\"states_extent\", raster_srs,\n geom_type=ogr.wkbMultiPolygon)\n # burn\n burnField = \"burn\"\n idField = ogr.FieldDefn(burnField, ogr.OFTInteger)\n outLayer.CreateField(idField)\n featureDefn = outLayer.GetLayerDefn()\n for j, geomShape in enumerate(gdf['geometry'].values):\n if verbose:\n print(j, \"geomshape:\", geomShape)\n outFeature = ogr.Feature(featureDefn)\n outFeature.SetGeometry(ogr.CreateGeometryFromWkt(geomShape.wkt))\n if len(mask_burn_val_key) > 0:\n burnVal = int(gdf[mask_burn_val_key].values[j])\n if verbose:\n print(\"burnVal:\", burnVal)\n else:\n burnVal = burnValue\n outFeature.SetField(burnField, burnVal)\n outLayer.CreateFeature(outFeature)\n # if verbose:\n # print (\"outFeature:\", outFeature)\n outFeature = 0\n\n if len(mask_burn_val_key) > 0:\n gdal.RasterizeLayer(target_ds, [1], outLayer,\n options=[\"ATTRIBUTE=%s\" % burnField])\n else:\n gdal.RasterizeLayer(target_ds, [1], outLayer, burn_values=[burnVal])\n\n outLayer = 0\n outDatSource = 0\n tmp = 0\n return\n\n\n###############################################################################\ndef geojson_to_arr(image_path, geojson_path, mask_path_out_gray,\n buffer_distance_meters=2, buffer_cap_style=1,\n dissolve_by='speed_mph', mask_burn_val_key='burnValue',\n min_burn_val=0, max_burn_val=255,\n verbose=False):\n \"\"\"\n Create buffer around geojson for desired geojson feature, save as mask\n\n Arguments\n ---------\n image_path : str\n Path to input image corresponding to the geojson file.\n geojson_path : str\n Path to geojson file.\n mask_path_out_gray : str\n Output path of saved mask (should end in .tif).\n buffer_distance_meters : float\n Width of buffer around geojson lines. Formally, this is the distance\n to each geometric object. Optional. Defaults to ``2``.\n buffer_cap_style : int\n Cap_style of buffer, see: (https://shapely.readthedocs.io/en/stable/manual.html#constructive-methods)\n Defaults to ``1`` (round).\n dissolve_by : str\n Method for differentiating rows in geodataframe, and creating unique\n mask values. Defaults to ``'speed_m/s'``.\n mask_burn_value : str\n Column to name burn value in geodataframe. Defaults to ``'burnValue'``.\n min_burn_val : int\n Minimum value to burn to mask. Rescale all values linearly with this\n minimum value. If <= 0, ignore. Defaultst to ``0``.\n max_burn_val : int\n Maximum value to burn to mask. Rescale all values linearly with this\n maxiumum value. If <= 0, ignore. Defaultst to ``256``.\n verbose : bool\n Switch to print relevant values. Defaults to ``False``.\n\n Returns\n -------\n gdf_buffer : geopandas dataframe\n Dataframe created from geojson\n \"\"\"\n\n # get gdf_buffer\n try:\n inGDF = gpd.read_file(geojson_path)\n except TypeError:\n print(\"Empty mask for path:\", geojson_path)\n # create emty mask\n h, w = cv2.imread(image_path, 0).shape[:2]\n mask_gray = np.zeros((h, w)).astype(np.uint8)\n skimage.io.imsave(mask_path_out_gray, mask_gray)\n # cv2.imwrite(mask_path_out, mask_gray)\n return []\n\n gdf_buffer = create_buffer_geopandas(\n inGDF, buffer_distance_meters=buffer_distance_meters,\n buffer_cap_style=buffer_cap_style, dissolve_by=dissolve_by,\n projectToUTM=False, verbose=verbose)\n\n if verbose:\n print(\"gdf_buffer.columns:\", gdf_buffer.columns)\n print(\"gdf_buffer:\", gdf_buffer)\n\n # set burn values\n burn_vals_raw = gdf_buffer[dissolve_by].values.astype(float)\n if verbose:\n print(\"burn_vals_raw:\", burn_vals_raw)\n if (max_burn_val > 0) and (min_burn_val >= 0):\n scale_mult = (max_burn_val - min_burn_val) / np.max(burn_vals_raw)\n # scale_mult = max_burn_val / np.max(burn_vals_raw)\n burn_vals = min_burn_val + scale_mult * burn_vals_raw\n else:\n burn_vals = burn_vals_raw\n if verbose:\n print(\"np.unique burn_vals:\", np.sort(np.unique(burn_vals)))\n gdf_buffer[mask_burn_val_key] = burn_vals\n\n # create mask\n gdf_to_array(gdf_buffer, image_path, mask_path_out_gray,\n mask_burn_val_key=mask_burn_val_key,\n verbose=verbose)\n\n return gdf_buffer\n\n\n###############################################################################\ndef _create_speed_arr(image_path, geojson_path, mask_path_out_gray,\n bin_conversion_func, mask_burn_val_key='burnValue',\n buffer_distance_meters=2, buffer_cap_style=1,\n dissolve_by='speed_m/s', bin_conversion_key='speed_mph',\n verbose=False):\n '''\n Similar to create_arr_from_geojson()\n Create buffer around geojson for speeds, use bin_conversion_func to\n assign values to the mask\n '''\n\n # get gdf_buffer\n try:\n inGDF = gpd.read_file(geojson_path)\n except:\n print(\"Empty mask for path:\", geojson_path)\n # create emty mask\n h, w = cv2.imread(image_path, 0).shape[:2]\n mask_gray = np.zeros((h, w)).astype(np.uint8)\n skimage.io.imsave(mask_path_out_gray, mask_gray)\n # cv2.imwrite(mask_path_out, mask_gray)\n return []\n\n gdf_buffer = create_buffer_geopandas(\n inGDF, buffer_distance_meters=buffer_distance_meters,\n buffer_cap_style=buffer_cap_style, dissolve_by=dissolve_by,\n projectToUTM=True, verbose=verbose)\n\n # set burn values\n speed_arr = gdf_buffer[bin_conversion_key].values\n burnVals = [bin_conversion_func(s) for s in speed_arr]\n gdf_buffer[mask_burn_val_key] = burnVals\n\n # create mask\n gdf_to_array(gdf_buffer, image_path, mask_path_out_gray,\n mask_burn_val_key=mask_burn_val_key, verbose=verbose)\n\n return gdf_buffer\n\n\n###############################################################################\ndef create_speed_gdf_v0(image_path, geojson_path, mask_path_out_gray,\n bin_conversion_func, mask_burn_val_key='burnValue',\n buffer_distance_meters=2, buffer_cap_style=1,\n dissolve_by='speed_m/s', bin_conversion_key='speed_mph',\n verbose=False):\n '''\n Create buffer around geojson for speeds, use bin_conversion_func to\n assign values to the mask\n '''\n\n # get gdf_buffer\n try:\n inGDF = gpd.read_file(geojson_path)\n except:\n print(\"Empty mask for path:\", geojson_path)\n # create emty mask\n h, w = cv2.imread(image_path, 0).shape[:2]\n mask_gray = np.zeros((h, w)).astype(np.uint8)\n skimage.io.imsave(mask_path_out_gray, mask_gray)\n # cv2.imwrite(mask_path_out, mask_gray)\n return []\n\n # project\n projGDF = osmnx_funcs.project_gdf(inGDF)\n if verbose:\n print(\"inGDF.columns:\", inGDF.columns)\n\n gdf_utm_buffer = projGDF.copy()\n # perform Buffer to produce polygons from Line Segments\n gdf_utm_buffer['geometry'] = gdf_utm_buffer.buffer(buffer_distance_meters,\n buffer_cap_style)\n gdf_utm_dissolve = gdf_utm_buffer.dissolve(by=dissolve_by)\n gdf_utm_dissolve.crs = gdf_utm_buffer.crs\n gdf_buffer = gdf_utm_dissolve.to_crs(inGDF.crs)\n if verbose:\n print(\"gdf_buffer['geometry'].values[0]:\",\n gdf_buffer['geometry'].values[0])\n\n # set burn values\n speed_arr = gdf_buffer[bin_conversion_key].values\n burnVals = [bin_conversion_func(s) for s in speed_arr]\n gdf_buffer[mask_burn_val_key] = burnVals\n\n # create mask\n gdf_to_array(gdf_buffer, image_path, mask_path_out_gray,\n mask_burn_val_key=mask_burn_val_key, verbose=verbose)\n\n return gdf_buffer\n\n\n###############################################################################\ndef convert_array_to_multichannel(in_arr, n_channels=7, burnValue=255,\n append_total_band=False, verbose=False):\n '''Take input array with multiple values, and make each value a unique\n channel. Assume a zero value is background, while value of 1 is the \n first channel, 2 the second channel, etc.'''\n\n h, w = in_arr.shape[:2]\n # scikit image wants it in this format by default\n out_arr = np.zeros((n_channels, h, w), dtype=np.uint8)\n #out_arr = np.zeros((h,w,n_channels), dtype=np.uint8)\n\n for band in range(n_channels):\n val = band + 1\n band_out = np.zeros((h, w), dtype=np.uint8)\n if verbose:\n print(\"band:\", band)\n band_arr_bool = np.where(in_arr == val)\n band_out[band_arr_bool] = burnValue\n out_arr[band, :, :] = band_out\n #out_arr[:,:,band] = band_out\n\n if append_total_band:\n tot_band = np.zeros((h, w), dtype=np.uint8)\n band_arr_bool = np.where(in_arr > 0)\n tot_band[band_arr_bool] = burnValue\n tot_band = tot_band.reshape(1, h, w)\n out_arr = np.concatenate((out_arr, tot_band), axis=0).astype(np.uint8)\n\n if verbose:\n print(\"out_arr.shape:\", out_arr.shape)\n return out_arr\n\n\n# Helper Functions\n###############################################################################\ndef CreateMultiBandGeoTiff(OutPath, Array):\n '''\n Author: Jake Shermeyer\n Array has shape:\n Channels, Y, X?\n '''\n driver = gdal.GetDriverByName('GTiff')\n DataSet = driver.Create(OutPath, Array.shape[2], Array.shape[1],\n Array.shape[0], gdal.GDT_Byte,\n ['COMPRESS=LZW'])\n for i, image in enumerate(Array, 1):\n DataSet.GetRasterBand(i).WriteArray(image)\n del DataSet\n\n return OutPath\n\n\n###############################################################################\ndef geomGeo2geomPixel(geom, affineObject=[], input_raster='',\n gdal_geomTransform=[]):\n '''spacenet utilities v3 geotools.py'''\n # This function transforms a shapely geometry in geospatial coordinates into pixel coordinates\n # geom must be shapely geometry\n # affineObject = rasterio.open(input_raster).affine\n # gdal_geomTransform = gdal.Open(input_raster).GetGeoTransform()\n # input_raster is path to raster to gather georectifcation information\n if not affineObject:\n if input_raster != '':\n affineObject = rio.open(input_raster).transform\n elif gdal_geomTransform != []:\n affineObject = af.Affine.from_gdal(gdal_geomTransform)\n else:\n return geom\n\n affineObjectInv = ~affineObject\n\n geomTransform = shapely.affinity.affine_transform(geom,\n [affineObjectInv.a,\n affineObjectInv.b,\n affineObjectInv.d,\n affineObjectInv.e,\n affineObjectInv.xoff,\n affineObjectInv.yoff]\n )\n\n return geomTransform\n\n\n###############################################################################\ndef geomPixel2geomGeo(geom, affineObject=[], input_raster='', gdal_geomTransform=[]):\n '''spacenet utilities v3 geotools.py'''\n # This function transforms a shapely geometry in pixel coordinates into geospatial coordinates\n # geom must be shapely geometry\n # affineObject = rasterio.open(input_raster).affine\n # gdal_geomTransform = gdal.Open(input_raster).GetGeoTransform()\n # input_raster is path to raster to gather georectifcation information\n if not affineObject:\n if input_raster != '':\n affineObject = rio.open(input_raster).transform\n elif gdal_geomTransform != []:\n affineObject = af.Affine.from_gdal(gdal_geomTransform)\n else:\n return geom\n\n geomTransform = shapely.affinity.affine_transform(geom,\n [affineObject.a,\n affineObject.b,\n affineObject.d,\n affineObject.e,\n affineObject.xoff,\n affineObject.yoff]\n )\n\n return geomTransform\n\n\n###############################################################################\n# Haversine formula example in Python\n# Author: Wayne Dyck\n# def distance_haversine(lat1, lon1, lat2, lon2, earth_radius_km=6371):\n# #lat1, lon1 = origin\n# #lat2, lon2 = destination\n#\n# dlat = math.radians(lat2-lat1)\n# dlon = math.radians(lon2-lon1)\n# a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\n# * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\n# c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n# d = earth_radius_km * c\n#\n# return d\n\n\n###############################################################################\ndef _haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points in m\n on the earth (specified in decimal degrees)\n http://stackoverflow.com/questions/15736995/how-can-i-\n quickly-estimate-the-distance-between-two-latitude-longitude-points\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = list(map(radians, [lon1, lat1, lon2, lat2]))\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n m = 1000. * km\n return m\n\n###############################################################################\n\n\ndef get_gsd(im_test_file):\n '''return gsd in meters'''\n srcImage = gdal.Open(im_test_file)\n geoTrans = srcImage.GetGeoTransform()\n ulX = geoTrans[0]\n ulY = geoTrans[3]\n # xDist = geoTrans[1]\n yDist = geoTrans[5]\n # rtnX = geoTrans[2]\n # rtnY = geoTrans[4]\n\n # get haversine distance\n # dx = _haversine(ulX, ulY, ulX+xDist, ulY) #haversine(lon1, lat1, lon2, lat2)\n # haversine(lon1, lat1, lon2, lat2)\n dy = _haversine(ulX, ulY, ulX, ulY+yDist)\n\n return dy # dx\n\n\n###############################################################################\ndef get_extent(srcFileImage):\n gdata = gdal.Open(srcFileImage)\n geo = gdata.GetGeoTransform()\n # data = gdata.ReadAsArray()\n\n xres = geo[1]\n yres = geo[5]\n # xmin = geo[0]\n # xmax = geo[0] + (xres * gdata.RasterXSize)\n # ymin = geo[3] + (yres * gdata.RasterYSize)\n # ymax = geo[3]\n xmin = geo[0] + xres * 0.5\n xmax = geo[0] + (xres * gdata.RasterXSize) - xres * 0.5\n ymin = geo[3] + (yres * gdata.RasterYSize) + yres * 0.5\n ymax = geo[3] - yres * 0.5\n\n return xmin, ymin, xmax, ymax\n\n\n###############################################################################\ndef get_pixel_dist_from_meters(im_test_file, len_meters):\n '''For the input image, we want a buffer or other distance in meters,\n this function determines the pixel distance by calculating the GSD'''\n gsd = get_gsd(im_test_file)\n pix_width = max(1, np.rint(len_meters/gsd))\n\n return gsd, pix_width\n\n\n###############################################################################\ndef get_unique(seq, idfun=None):\n '''https://www.peterbe.com/plog/uniqifiers-benchmark'''\n # order preserving\n if idfun is None:\n def idfun(x): return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n # in old Python versions:\n # if seen.has_key(marker)\n # but in new ones:\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n return result\n\n\n###############################################################################\ndef _get_node_positions(G_, x_coord='x', y_coord='y'):\n '''Get position array for all nodes'''\n nrows = len(G_.nodes())\n ncols = 2\n arr = np.zeros((nrows, ncols))\n # populate node array\n for i, n in enumerate(G_.nodes()):\n n_props = G_.node[n]\n x, y = n_props[x_coord], n_props[y_coord]\n arr[i] = [x, y]\n return arr\n" ]
[ [ "numpy.min", "numpy.asarray", "numpy.unique", "numpy.rint", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.concatenate", "numpy.max", "numpy.round", "matplotlib.pyplot.close", "numpy.zeros", "numpy.where" ] ]
allydunham/sequence_unet
[ "e0d3d6b73ad79c596130ed6e1a58b41a4ad7e299", "e0d3d6b73ad79c596130ed6e1a58b41a4ad7e299" ]
[ "models/classifier/regularisation.py", "models/classifier/activation.py" ]
[ "\"\"\"\nExperiment testing various regularisations on the Sequence UNET model\n\"\"\"\nimport os\nimport sys\n\nimport utils\nfrom tensorflow.keras import optimizers\n\nfrom proteinnetpy.data import ProteinNetDataset, ProteinNetMap\nfrom proteinnetpy.data import make_length_filter\n\nimport metrics\nimport pn_maps\nfrom seq_unet import sequence_unet\n\ndef load_data(validation=False):\n \"\"\"\n Input data for PSSM top model\n \"\"\"\n if validation:\n pn_path = 'data/proteinnet/casp12/validation'\n else:\n pn_path = 'data/proteinnet/casp12/training_95'\n\n filter_func = make_length_filter(min_length=32, max_length=2000)\n data = ProteinNetDataset(path=pn_path, preload=False, filter_func=filter_func)\n func = pn_maps.SequenceUNETMapFunction(num_layers=6, threshold=0.01)\n return ProteinNetMap(data, func=func, static=True, filter_errors=True)\n\ndef main():\n \"\"\"Main script\"\"\"\n root = 'models/classifier/regularisation'\n if not os.path.isdir(root):\n os.mkdir(root)\n\n # dropout, kernel, batch\n regularisation = (\n (0, None, False),\n (0.05, None, False),\n (0.1, None, False),\n (0, \"l2\", False),\n (0, None, True),\n (0.05, \"l2\", False),\n (0.05, None, True),\n (0.05, \"l2\", True),\n (0, \"l2\", True),\n )\n\n for dropout, kernel, batch in regularisation:\n model_dir = f\"{root}/d{dropout}_{kernel}_{batch}\"\n\n if os.path.isdir(model_dir):\n print(f\"Model {model_dir} already exists, skipping\", file=sys.stderr)\n continue\n\n model = sequence_unet(filters=32, kernel_size=5, num_layers=6, dropout=dropout,\n kernel_regulariser=kernel, batch_normalisation=batch)\n\n optimiser = optimizers.Adam(lr=0.01, epsilon=0.01)\n loss = metrics.masked_binary_crossentropy\n acc = metrics.masked_accuracy\n model.compile(optimizer=optimiser, loss=loss, metrics=[acc])\n\n # Create sample train script\n command = utils.model_bsub(f\"reg_d{dropout}_{kernel}_{batch}\", model_dir,\n ram=10000, epochs=150, validation_epochs=1,\n checkpoint=None, big_job=True, save_format='tf')\n\n # Use this to setup a model directory for the experiment(s)\n utils.make_experiment_dir(model, model_dir, load_data, command, save_format='tf')\n\nif __name__ == \"__main__\":\n # No argparse as these scripts serve as the config for experiments\n main()\n", "\"\"\"\nExperiment testing various activation functions on the Sequence UNET model\n\"\"\"\nimport os\nimport sys\n\nimport utils\nfrom tensorflow.keras import optimizers\n\nfrom proteinnetpy.data import ProteinNetDataset, ProteinNetMap\nfrom proteinnetpy.data import make_length_filter\n\nimport metrics\nimport pn_maps\nfrom seq_unet import sequence_unet\n\ndef load_data(validation=False):\n \"\"\"\n Input data for PSSM top model\n \"\"\"\n if validation:\n pn_path = 'data/proteinnet/casp12/validation'\n else:\n pn_path = 'data/proteinnet/casp12/training_95'\n\n filter_func = make_length_filter(min_length=32, max_length=2000)\n data = ProteinNetDataset(path=pn_path, preload=False, filter_func=filter_func)\n func = pn_maps.SequenceUNETMapFunction(num_layers=6, threshold=0.01)\n return ProteinNetMap(data, func=func, static=True, filter_errors=True)\n\ndef main():\n \"\"\"Main script\"\"\"\n root = 'models/classifier/activation'\n if not os.path.isdir(root):\n os.mkdir(root)\n\n activations = [\n \"relu\",\n \"elu\",\n \"tanh\",\n \"hard_sigmoid\",\n \"swish\"\n ]\n\n for activation in activations:\n model_dir = f\"{root}/{activation}\"\n\n if os.path.isdir(model_dir):\n print(f\"Model {model_dir} already exists, skipping\", file=sys.stderr)\n continue\n\n model = sequence_unet(filters=32, kernel_size=5, num_layers=6, conv_activation=activation,\n batch_normalisation=True, dropout=0.05)\n\n optimiser = optimizers.Adam(lr=0.01, epsilon=0.01)\n loss = metrics.masked_binary_crossentropy\n acc = metrics.masked_accuracy\n model.compile(optimizer=optimiser, loss=loss, metrics=[acc])\n\n # Create sample train script\n command = utils.model_bsub(f\"{activation}\", model_dir,\n ram=10000, epochs=150, validation_epochs=1,\n checkpoint=None, big_job=True, save_format='tf')\n\n # Use this to setup a model directory for the experiment(s)\n utils.make_experiment_dir(model, model_dir, load_data, command, save_format='tf')\n\nif __name__ == \"__main__\":\n # No argparse as these scripts serve as the config for experiments\n main()\n" ]
[ [ "tensorflow.keras.optimizers.Adam" ], [ "tensorflow.keras.optimizers.Adam" ] ]
eragasa/pypospack
[ "21cdecaf3b05c87acc532d992be2c04d85bfbc22", "21cdecaf3b05c87acc532d992be2c04d85bfbc22", "21cdecaf3b05c87acc532d992be2c04d85bfbc22", "21cdecaf3b05c87acc532d992be2c04d85bfbc22" ]
[ "tests/pyposmat/visualization/Pyposmat3DScatterWithProjections/dev__contours.py", "tests/pyposmat/visualization/Pyposmat3DScatterWithProjections/make_2d_scatter_plot.py", "pypospack/pyposmat/engines/mc_sampler_iterate.py", "pypospack/pyposmat/data/pca_analysis.py" ]
[ "from mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt,numpy as np\nplt.clf()\nfig = plt.figure(1)\nax = fig.gca(projection='3d')\nX, Y, Z = axes3d.get_test_data(0.05)\nax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)\ncset = ax.contourf(X, Y, Z, zdir='z', offset=-100,\n levels=np.linspace(-100,100,1200),cmap=plt.cm.jet)\ncset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=plt.cm.jet)\ncset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=plt.cm.jet)\nax.set_xlabel('X')\nax.set_xlim(-40, 40)\nax.set_ylabel('Y')\nax.set_ylim(-40, 40)\nax.set_zlabel('Z')\nax.set_zlim(-100, 100) \n\nfig.savefig('withcontours.eps')\n\n", "import copy,os\nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nfrom matplotlib import cm\n\nfrom pypospack.pyposmat.data import PyposmatDataFile\nfrom pypospack.pyposmat.data import PyposmatConfigurationFile\nfrom pypospack.pyposmat.visualization import PyposmatDatafileVisualization\n\nclass Pyposmat2DScatterPlot(PyposmatDatafileVisualization):\n\n def __init__(self):\n PyposmatDatafileVisualization.__init__(self)\n\n self.is_pareto = False\n\n def plot(self,x_name,y_name,filename=None):\n fig = plt.figure(figsize=plt.figaspect(1))\n ax= fig.add_subplot(1,1,1)\n\n x = self.df[x_name]\n y = self.df[y_name]\n\n ax.scatter(x,y)\n\n self.set_labels(ax,x_name,y_name)\n\n self.set_axes_limits(ax)\n\n if filename is None:\n plt.show()\n else:\n fig.savefig(filename)\n\n def set_axes_limits(self,ax):\n\n if self.is_pareto:\n ax.set_xlim(left=0)\n ax.set_ylim(left=0)\n\n def get_latex_label(self,name):\n return name\n\n def set_labels(self,ax,x_name,y_name):\n x_latex_label = self.get_latex_label(x_name)\n y_latex_label = self.get_latex_label(y_name)\n\n ax.set_xlabel(x_name)\n ax.set_ylabel(y_name)\n\n\nif __name__ == \"__main__\":\n fn_config = \"pyposmat.config.in\"\n fn_results = \"pyposmat.kde.out\"\n myplot = Pyposmat2DScatterPlot()\n myplot.read_datafile(filename=fn_results)\n myplot.read_configuration(filename=fn_config)\n myplot.is_pareto = True\n myplot.plot(\n x_name='Ni_fcc.c11.abserr',\n y_name='Ni_fcc.c12.abserr'\n )\n", "\"\"\"\n\nVERSION HISTORY:\n 2017 - EJR, original release\n 08/2018 = RSU, EJR, added cluster sampling functionality and logging\n\nTODO:\n 08/2018 - the pyposmat.results.*.out needs to have the sim_ids cleaned up so we can track information regarding rank_id, cluster_id, and iteration_id. This is at approximately line 442. EJR&RSU\n\"\"\"\n\nimport os,shutil,sys\nimport numpy as np\nfrom mpi4py import MPI\nimport pandas as pd\nfrom collections import OrderedDict\n\n# --- pyposmat data format imports\nfrom pypospack.pyposmat.data import PyposmatConfigurationFile\nfrom pypospack.pyposmat.data import PyposmatDataFile\nfrom pypospack.pyposmat.data import PyposmatBadParametersFile\nfrom pypospack.pyposmat.data import PyposmatLogFile\n\n# --- pyposmat analysis algorithms\nfrom pypospack.pyposmat.data import PyposmatDataAnalyzer\n\n# --- pyposmat data samplers\nfrom pypospack.pyposmat.engines import PyposmatMonteCarloSampler\nfrom pypospack.pyposmat.engines import PyposmatFileSampler\nfrom pypospack.pyposmat.engines import PyposmatClusterSampler\n\n# --- pyposmat error classes\nfrom pypospack.exceptions import PyposmatSamplingTypeError\n\nclass PyposmatIterativeSampler(object):\n \"\"\" Iterative Sampler which wraps multiple simulation algorithms.\n\n This class wraps multiple simulation algorithms so that they can be run in an iterative manner.\n Since this class has so many configuration options, the attributes of this class is set\n by a YAML based configuration file. The class PyposmatConfigurationFile aids in the creation\n and reading of these options. These attributes are public and be set programmatically within\n a script.\n\n Notes:\n config_fn = 'data/pyposmat.config.in'\n\n engine = PyposmatIterativeSampler(configuration_filename=config_fn)\n engine.read_configuration_file()\n engine.run_all()\n\n Args:\n configuration_filename(str): the filename of the YAML configuration file\n is_restart(bool,optional): When set to True, this argument controls the restart behavior\n of this class. By default, is set to False\n is_auto(bool,optional): When set to True, this agument will automatically configure the\n class. By default this is set to False, mostly because this software is currently in\n development, and this necessary to to write integration testing\n log_fn(str,optional): This the filename path where to set logging, by default it is set\n as `pyposmat.log` contained in the configurable data directory\n log_to_stdout(bool,optional): When set to True, all log messages will be directed to\n standard out as well as the log file\n\n Attributes:\n mpi_comm(MPI.Intracomm)\n mpi_rank(int)\n mpi_size(int)\n mpi_nprocs(int)\n i_iteration(int)\n n_iterations(int)\n rv_seed(int)\n rv_seeds(np.ndarray)\n configuration_filename = configuration(filename)\n configuration(PyposmatConfigurationFile)\n mc_sampler(PyposmatMonteCarloSampler)\n root_directory(str)\n data_directory(str)\n is_restart(bool)\n start_iteration=0\n \n \"\"\"\n\n parameter_sampling_types = ['parametric','kde','from_file','kde_w_clusters']\n\n def __init__(self,\n configuration_filename,\n is_restart=False,\n is_auto=False,\n log_fn=None,\n log_to_stdout=True):\n\n # formats should not contain a trailing end line chracter\n self.SECTION_HEADER_FORMAT = \"\\n\".join([80*'=',\"{:^80}\",80*\"=\"])\n self.RANK_DIR_FORMAT = 'rank_{}'\n\n self.mpi_comm = None\n self.mpi_rank = None\n self.mpi_size = None\n self.mpi_nprocs = None\n self.i_iteration = None\n self.rv_seed = None\n self.rv_seeds = None\n\n self.configuration_filename = configuration_filename\n self.configuration = None\n self.mc_sampler = None\n\n self.root_directory = os.getcwd()\n self.data_directory = 'data'\n self.is_restart = is_restart\n self.start_iteration = 0\n\n self.log_fn = log_fn\n self.log_to_stdout = log_to_stdout\n self.o_log = None\n self.initialize_logger(log_fn=log_fn,log_to_stdout=log_to_stdout)\n\n if self.is_restart:\n self.delete_mpi_rank_directories()\n\n @property\n def structure_directory(self):\n if self.configuration is None:\n return None\n else:\n d = self.configuration.structures['structure_directory']\n\n if not os.path.isabs(d):\n d = os.path.join(self.root_directory,d)\n\n return d\n\n @property\n def n_iterations(self):\n if self.configuration is None:\n return None\n else:\n return self.configuration.n_iterations\n\n @property\n def qoi_names(self):\n if self.configuration is None:\n return None\n else:\n return self.configuration.qoi_names\n\n @property\n def error_names(self):\n if self.configuration is None:\n return None\n else:\n return self.configuration.error_names\n\n def delete_mpi_rank_directories(self):\n if self.mpi_rank == 0:\n self.log('Deleting previous rank directories')\n mpi_rank_directories = [d for d in os.listdir(self.root_directory) if d.startswith('rank_')]\n for d in mpi_rank_directories:\n try:\n shutil.rmtree(os.path.join(self.root_directory,d))\n except:\n raise\n MPI.COMM_WORLD.Barrier()\n\n def determine_last_iteration_completed(self):\n\n for i in range(self.n_iterations):\n results_fn = os.path.join(self.data_directory,'pyposmat.results.{}.out'.format(i))\n kde_fn = os.path.join(self.data_directory,'pyposmat.kde.{}.out'.format(i+1))\n\n if os.path.isfile(results_fn) and os.path.isfile(kde_fn):\n if self.mpi_rank == 0:\n self.log('iteration {}: is complete'.format(i))\n self.start_iteration = i+1\n else:\n self.start_iteration = i\n break\n\n MPI.COMM_WORLD.Barrier()\n return self.start_iteration\n\n def run_all(self):\n \"\"\"runs all iterations\n\n This method runs all iterations\n\n \"\"\"\n self.setup_mpi_environment()\n\n self.initialize_data_directory()\n\n self.start_iteration = 0\n\n if self.is_restart:\n self.determine_last_iteration_completed()\n\n if self.mpi_rank == 0:\n self.log(\"starting at simulation: {}\".format(self.start_iteration))\n MPI.COMM_WORLD.Barrier()\n\n for i in range(self.start_iteration,self.n_iterations):\n self.i_iteration = i\n\n # log iteration information\n self.log_iteration_information(i_iteration=i)\n\n self.run_simulations(i)\n MPI.COMM_WORLD.Barrier()\n\n\n if self.mpi_rank == 0:\n self.log(\"ALL SIMULATIONS COMPLETE FOR ALL RANKS\")\n self.log(\"MERGING FILES\")\n self.merge_data_files(i)\n self.merge_error_files(i)\n MPI.COMM_WORLD.Barrier()\n\n if self.mpi_rank == 0:\n self.log(\"ANALYZE RESULTS\")\n self.analyze_results(i)\n MPI.COMM_WORLD.Barrier()\n\n if self.mpi_rank == 0:\n self.log(80*'-')\n self.log('JOBCOMPLETE')\n\n def initialize_sampler(self,config_fn,results_fn,mpi_rank=None,mpi_size=None,o_log=None):\n \"\"\" initialize the sampling object \n\n This method initializes the `mc_sampler` attribute with a sampler.\n\n Note:\n This breakout is part of a larger effort within PYPOSPACK, to have \n more object-oriented approach for parametric sampling. The goal \n eventually is to implement an instance of PyposmatBaseSampler, and \n allow users of this software library to be able to extend this \n software by simply extending the base class.\n Args:\n config_fn(str): path to the configuration file\n results_fn(str): path to the results file\n mpi_rank(int,optional): the MPI rank of executing this method\n mpi_size(int,optional): the size of the MPI execution group\n o_log(PyposmatLogFile,str,optional): the log file. If a string is \n passed, then the sampling class will initialize a separate log \n file with the string of path created. If a log file object is \n passed, then sampling object will use that instance of the \n object to log information. By defaut, it will pass the \n attribute, `o_log`.\n \"\"\"\n \n assert type(config_fn) is str\n assert type(results_fn) is str\n assert type(mpi_rank) in [type(None),int]\n assert type(mpi_size) in [type(None),int]\n assert type(o_log) in [type(None),PyposmatLogFile,str]\n\n # check to see if the paths provided are absolute paths\n assert os.path.isabs(config_fn)\n assert os.path.isabs(results_fn)\n\n if mpi_rank is None: mpi_rank = self.mpi_rank\n if mpi_size is None: mpi_size = self.mpi_size\n\n self.mc_sampler = PyposmatMonteCarloSampler(\n filename_in = config_fn,\n filename_out = results_fn,\n mpi_rank = mpi_rank,\n mpi_size = mpi_size,\n o_log=o_log)\n self.mc_sampler.create_base_directories()\n self.mc_sampler.read_configuration_file()\n # we have to be able to find the structure directory\n self.mc_sampler.configuration.structures['structure_directory'] = self.structure_directory\n self.mc_sampler.configure_qoi_manager()\n self.mc_sampler.configure_task_manager()\n self.mc_sampler.configure_pyposmat_datafile_out()\n self.mc_sampler.configure_pyposmat_badparameters_file() \n\n self.log_more_iteration_information()\n\n def initialize_file_sampler(self,\n config_fn,\n results_fn,\n i_iteration=0,\n mpi_rank=None,\n mpi_size=None,\n o_log=None):\n \"\"\" initialize the sampling object \n\n This method initializes the `mc_sampler` attribute with a sampler.\n\n Note:\n This breakout is part of a larger effort within PYPOSPACK, to have \n more object-oriented approach for parametric sampling. The goal \n eventually is to implement an instance of PyposmatBaseSampler, and \n allow users of this software library to be able to extend this \n software by simply extending the base class.\n Args:\n config_fn(str): path to the configuration file\n results_fn(str): path to the results file\n i_iteration(int,optional): the iteration to sample the file from,\n by default this is set to zero.\n mpi_rank(int,optional): the MPI rank of executing this method\n mpi_size(int,optional): the size of the MPI execution group\n o_log(PyposmatLogFile,str,optional): the log file. If a string is \n passed, then the sampling class will initialize a separate log \n file with the string of path created. If a log file object is \n passed, then sampling object will use that instance of the \n object to log information. By defaut, it will pass the \n attribute, `o_log`.\n \"\"\"\n\n assert type(config_fn) is str\n assert type(results_fn) is str\n assert type(mpi_rank) in [type(None),int]\n assert type(mpi_size) in [type(None),int]\n assert type(o_log) in [type(None),PyposmatLogFile,str]\n\n # check to see if the paths provided are absolute paths\n assert os.path.isabs(config_fn)\n assert os.path.isabs(results_fn)\n\n if mpi_rank is None: mpi_rank = self.mpi_rank\n if mpi_size is None: mpi_size = self.mpi_size\n\n # get the absolute path of the datafile we are sampling from\n data_in_fn = None\n if os.path.isabs(self.configuration.sampling_type[i_iteration]['file']):\n data_in_fn = self.configuration.sampling_type[i_iteration]['file']\n else:\n data_in_fn = os.path.join(\n self.root_directory,\n self.configuration.sampling_type[i_iteration]['file']\n )\n\n \n data_out_fn = results_fn\n\n self.mc_sampler = PyposmatFileSampler(\n config_fn = config_fn,\n data_in_fn = data_in_fn,\n data_out_fn = data_out_fn,\n mpi_rank = mpi_rank,\n mpi_size = mpi_size,\n o_log=o_log,\n fullauto=False)\n\n self.mc_sampler.create_base_directories()\n self.mc_sampler.read_configuration_file()\n \n # we have to be able to find the structure directory\n self.mc_sampler.configuration.structures['structure_directory'] = self.structure_directory\n self.mc_sampler.configure_qoi_manager()\n self.mc_sampler.configure_task_manager()\n self.mc_sampler.configure_datafile_out()\n self.mc_sampler.configure_pyposmat_badparameters_file() \n\n self.log_more_iteration_information()\n\n def initialize_rank_directory(self):\n \"\"\" create the rank directory\n\n This method defines the rank directory as an absolute path and stores it in\n the attribute `rank_directory`. If a current directory exists there, then\n it is deleted with alll it's contents and then recreated.\n\n \"\"\"\n rank_directory = os.path.join(self.root_directory,self.RANK_DIR_FORMAT.format(self.mpi_rank))\n \n # find the directory, delete it and it's constants and then recreates ot\n if os.path.isdir(rank_directory):\n shutil.rmtree(rank_directory)\n os.mkdir(rank_directory)\n\n self.rank_directory = rank_directory\n\n def run_simulations(self,i_iteration):\n \"\"\" run simulation for a single iteration\n\n Each rank is given a different execution context so that the disk IO \n don't conflict\n \"\"\"\n self.initialize_rank_directory()\n config_filename = self.configuration_filename\n results_filename = os.path.join(self.rank_directory,'pyposmat.results.out')\n bad_parameters_filename = os.path.join(self.rank_directory,'pyposmat.badparameters.out')\n\n # change execution context for this rank\n os.chdir(self.rank_directory)\n\n # set random seed\n self.determine_rv_seeds()\n self.log_random_seeds(i_iteration=i_iteration)\n\n sampling_type = self.configuration.sampling_type[i_iteration]['type']\n if self.mpi_rank == 0:\n self.log(\"sampling_type={}\".format(sampling_type))\n MPI.COMM_WORLD.Barrier()\n\n # <----- parameter sampling type ---------------------------------------\n if sampling_type== 'parametric':\n self.initialize_sampler(\n config_fn=config_filename,\n results_fn=results_filename,\n mpi_rank=self.mpi_rank,\n mpi_size=self.mpi_size,\n o_log=self.o_log)\n\n self.run_parametric_sampling(i_iteration=i_iteration)\n \n # <----- kde sampling sampling type ---------------------------------------\n elif sampling_type == 'kde':\n self.initialize_sampler(\n config_fn=config_filename,\n results_fn=results_filename,\n mpi_rank=self.mpi_rank,\n mpi_size=self.mpi_size,\n o_log=self.o_log)\n \n self.run_kde_sampling(i_iteration=i_iteration)\n\n # <----- sampling from a file type ---------------------------------------\n # get parameters from file\n elif sampling_type == 'from_file':\n\n self.initialize_file_sampler(\n config_fn=config_filename,\n results_fn=results_filename,\n mpi_rank=self.mpi_rank,\n mpi_size=self.mpi_size,\n o_log=self.o_log)\n \n self.run_file_sampling(i_iteration=i_iteration)\n\n # <----- kde with clusters sampling type ---------------------------------------\n elif sampling_type == 'kde_w_clusters':\n cluster_fn = \"pyposmat.cluster.{}.out\".format(i_iteration)\n pyposmat_datafile_in = os.path.join(\n self.root_directory,\n self.data_directory,\n cluster_fn\n )\n \n _config_filename = os.path.join(\n self.root_directory,\n self.configuration_filename)\n\n # determine number of sims for this rank\n _mc_n_samples = _mc_config['n_samples_per_cluster']\n _n_samples_per_rank = int(_mc_n_samples / self.mpi_size)\n if _mc_n_samples % self.mpi_size > self.mpi_rank:\n _n_samples_per_rank += 1\n\n # initialize sampling object\n o = PyposmatClusterSampler(o_logger=self.log,\n mpi_rank=self.mpi_rank,\n\t\t\t\t mpi_comm=self.mpi_comm,\n\t\t\t\t mpi_size=self.mpi_size)\n o.create_base_directories()\n o.read_configuration_file(filename=_config_filename)\n # check to see if clustered data file exists\n if self.mpi_rank == 0:\n if not os.path.isfile(pyposmat_datafile_in):\n kde_fn = \"pyposmat.kde.{}.out\".format(i_iteration)\n kde_fn = os.path.join(\n self.root_directory,\n self.data_directory,\n kde_fn\n )\n o.write_cluster_file(filename=kde_fn, i_iteration=i_iteration)\n MPI.COMM_WORLD.Barrier()\n\n o.configure_pyposmat_datafile_in(filename=pyposmat_datafile_in)\n # fix relative path to structure databae folder\n _structure_dir = o.configuration.structures['structure_directory']\n o.configuration.structures['structure_directory'] = \\\n os.path.join('..',_structure_dir)\n # finish the rest of the initialization\n o.configure_qoi_manager()\n o.configure_task_manager()\n o.configure_pyposmat_datafile_out()\n MPI.COMM_WORLD.Barrier()\n\n # run simulations\n o.run_simulations(i_iteration=i_iteration,\n n_samples=_mc_n_samples,\n filename=pyposmat_datafile_in)\n MPI.COMM_WORLD.Barrier()\n else:\n error_dict = OrderedDict([\n ('i_iteration',i_iteration),\n ('sampling_type',sampling_type)]\n )\n m = \"unknown parameter sampling type: {}\".format(sampling_type)\n m += \"the valid sampling types are: {}\".format(\",\".join(self.parameter_sampling_types))\n raise PyposmatSamplingTypeError(m,error_dict)\n \n # return to root directory\n os.chdir(self.root_directory)\n\n def initialize_data_directory(self,data_directory=None):\n \"\"\" determine the absolute path of the data directory and create it\n\n This method sets the `data_directory` attribute of the class and creates\n the `data directory` if the data directory already exists.\n\n Args:\n data_directory(str):the path of the data directory, the path can be \n expressed in either a relative path, or an absolute path\n Returns:\n (str) the absolute path of the data directory\n Raises:\n OSError: if the directory is not able to be created\n \n \"\"\"\n\n assert type(data_directory) in [type(None),str]\n assert type(self.data_directory) in [type(None),str]\n\n # determine the data directory path\n if data_directory is None:\n if self.data_directory is None:\n self.data_directory = os.path.join(self.root_directory,'data')\n else:\n if os.path.isabs(self.data_directory):\n self.data_directory = data_directory\n else:\n self.data_directory = os.path.join(self.root_directory,self.data_directory)\n elif os.path.isabs(data_directory):\n # absolute path\n self.data_directory = data_directory\n else:\n # create a absolute path from the relative path\n self.data_directory = os.path.join(self.root_directory,data_directory)\n self.data_directory = os.path.abspath(self.data_directory)\n\n # create data directory\n if self.mpi_rank == 0:\n try:\n os.mkdir(self.data_directory)\n self.log('created the data directory.')\n self.log('\\tdata_directory;{}'.format(self.data_directory))\n except FileExistsError as e:\n self.log('attempted to create data directory, directory already exists.')\n self.log('\\tdata_directory:{}'.format(self.data_directory))\n except OSError as e:\n self.log('attempted to create data directory, cannot create directory.')\n self.log('\\tdata_directory:{}'.format(self.data_directory))\n MPI.COMM_WORLD.Barrier()\n\n def run_parametric_sampling(self,i_iteration):\n \"\"\" run parametric sampling \n\n Args:\n i_iteration(int): what iteration of the sampling is happening\n \"\"\"\n\n assert type(i_iteration) is int\n assert type(self.mc_sampler) is PyposmatMonteCarloSampler\n\n self.mc_sampler.run_simulations(\n i_iteration=i_iteration,\n n_samples=self.determine_number_of_samples_per_rank(i_iteration=i_iteration))\n\n def run_kde_sampling(self,i_iteration):\n \"\"\" run kde sampling\n\n Args:\n i_iteration(int): what iteration of the sampling is happening\n \"\"\"\n is_debug = False\n\n assert type(i_iteration) is int\n assert type(self.mc_sampler) is PyposmatMonteCarloSampler\n\n \n kde_filename = os.path.join(\n self.data_directory,\n 'pyposmat.kde.{}.out'.format(i_iteration)\n )\n n_samples_per_rank = self.determine_number_of_samples_per_rank(i_iteration=i_iteration)\n\n if is_debug:\n print('cwd:{}'.format(os.getcwd()))\n print('mpi_rank:{},kde_filename:{}'.format(self.mpi_rank,kde_filename))\n print('n_samples_per_rank:{}'.format(n_samples_per_rank))\n\n self.mc_sampler.run_simulations(\n i_iteration=i_iteration,\n n_samples=n_samples_per_rank,\n filename=kde_filename\n )\n\n def run_file_sampling(self,i_iteration):\n \"\"\" run file sampling\n\n Args:\n i_iteration(int): the iteration which to sampling for\n \"\"\"\n assert type(i_iteration) is int\n assert type(self.mc_sampler) is PyposmatFileSampler\n\n\n if 'file' in self.configuration.sampling_type[i_iteration]:\n filename = os.path.join(self.root_directory,\n self.configuration.sampling_type[i_iteration]['file'])\n else:\n if os.path.isabs(self.data_directory):\n filename = os.path.join(self.data_directory,\n 'pyposmat.kde.{}.out'.format(i_iteration))\n else:\n filename = os.path,join(self.root_directory,\n self.data_directory,\n 'pyposmat.kde.{}.out'.format(i_iteration))\n\n if self.mpi_rank == 0:\n self.log(80*'-')\n self.log('{:^80}'.format('file sampling'))\n self.log(80*'-')\n self.log('filename_in:{}'.format(filename))\n MPI.COMM_WORLD.Barrier()\n\n self.mc_sampler.run_simulations(\n i_iteration=i_iteration,\n n_samples=self.determine_number_of_samples_per_rank(i_iteration=i_iteration),\n filename=filename)\n\n\n def determine_number_of_samples_per_rank(self,i_iteration,N_samples=None):\n \"\"\" determine the number of samples per rank\n\n The total number of samples needs to be broken up between the ranks, but roughly\n divided the work evenly.\n\n Args:\n i_iteration(int): which iteration we are in the simulation\n N_samples(int,optional): the total number of samples we are using for \n this iteration. If a number is provided, it will override \n the number of simulations specified in the configuration file.\n Returns:\n (int): the number of samples for this rank\n \"\"\"\n\n assert type(i_iteration) is int\n assert type(N_samples) in [type(None),int]\n assert type(self.configuration) is PyposmatConfigurationFile\n\n if N_samples is None:\n N_samples = self.configuration.sampling_type[i_iteration]['n_samples']\n \n N_samples_per_rank = int(N_samples/self.mpi_size)\n if N_samples%self.mpi_size > self.mpi_rank:\n N_samples_per_rank += 1\n\n return N_samples_per_rank\n\n def initialize_logger(self,log_fn=None,log_to_stdout=None):\n \"\"\"initialize log object\n \n Args:\n log_fn(str,optional)\n\n \"\"\"\n\n assert type(log_fn) in [type(None),str]\n assert type(log_to_stdout) in [type(None),bool]\n\n if log_fn is None:\n self.log_fn = os.path.join(self.root_directory, self.data_directory, 'pyposmat.log')\n else:\n self.log_fn = log_fn\n self.o_log = PyposmatLogFile(filename=self.log_fn)\n \n self.log_to_stdout = log_to_stdout\n\n def setup_mpi_environment(self):\n self.mpi_comm = MPI.COMM_WORLD\n self.mpi_rank = self.mpi_comm.Get_rank()\n self.mpi_size = self.mpi_comm.Get_size()\n self.mpi_procname = MPI.Get_processor_name()\n self.log_mpi_environment()\n \n # random seed management\n def determine_rv_seeds(self,seed=None,i_iteration=None):\n \"\"\" set the random variable seed across simulations \n \n Args:\n seed(int,optional)=a seed to determine the rest of the seeds for\n different ranks and iterations.\n \"\"\"\n RAND_INT_LOW = 0\n RAND_INT_HIGH = 2147483647\n\n assert type(seed) in [type(None),int]\n assert type(i_iteration) in [type(None),int]\n\n if type(i_iteration) is type(None):\n i_iteration = self.i_iteration\n\n # set the seed attribute\n if type(seed) is int:\n self.rv_seed == seed\n\n \n # set the seed attribute, if the seed attribute is none\n if self.rv_seed is None:\n self.rv_seed = np.random.randint(\n low=RAND_INT_LOW,\n high=RAND_INT_HIGH)\n \n # if the rv_seed was determined in the script, then all ranks will\n # have the same rv_seed attribute\n np.random.seed(self.rv_seed)\n\n # each rank, will need it's own seed. So we sample from the freshly\n # generated random number generator, which is identical across ranks\n self.rv_seeds = np.random.randint(\n low=0,\n high=2147483647,\n size=(int(self.mpi_size),self.n_iterations)\n )\n\n # now restart the seed for this rank\n np.random.seed(self.rv_seeds[self.mpi_rank,i_iteration])\n\n # logging methods \n def log(self,s):\n if self.log_to_stdout: \n print(s)\n if self.o_log is not None: \n self.o_log.write(s)\n\n def log_iteration_information(self,i_iteration):\n \"\"\"log iteration information\n \n Args:\n i_iteration_id(int):the iteration number\n Returns:\n (str) the log string\n \"\"\"\n if self.mpi_rank == 0:\n s = self.SECTION_HEADER_FORMAT.format(\n 'Begin Iteration {}/{}'.format(i_iteration+1,self.n_iterations))\n self.log(s)\n MPI.COMM_WORLD.Barrier()\n \n #if self.mpi_rank == 0:\n # return \"\\n\".join(s)\n \n def log_more_iteration_information(self): \n #TODO: this logging needs to go into a separate logging method. -EJR\n if self.mpi_rank == 0:\n self.mc_sampler.print_structure_database()\n self.mc_sampler.print_sampling_configuration()\n if self.mpi_rank == 0 and self.i_iteration == 0:\n self.mc_sampler.print_initial_parameter_distribution()\n if self.mpi_rank == 0:\n self.log(80*'-')\n MPI.COMM_WORLD.Barrier()\n\n def log_mpi_environment(self):\n if self.mpi_rank == 0:\n m = [self.SECTION_HEADER_FORMAT.format('MPI communication information')]\n \n m += ['mpi_size={}'.format(self.mpi_size)]\n\n MPI.COMM_WORLD.Barrier()\n\n def log_random_seeds(self,i_iteration):\n if self.mpi_rank == 0:\n self.log(80*'-')\n self.log('{:^80}'.format('GENERATED RANDOM SEEDS'))\n self.log(80*'-')\n self.log('global_seed:{}'.format(str(self.rv_seed)))\n self.log('seeds_for_this_iteration:')\n self.log('{:^8} {:^8}'.format('rank','seed'))\n self.log('{} {}'.format(8*'-',8*'-'))\n MPI.COMM_WORLD.Barrier()\n for i_rank in range(self.mpi_size):\n if self.mpi_rank == i_rank:\n self.log('{:^8} {:>10}'.format(i_rank,self.rv_seeds[i_rank,i_iteration]))\n MPI.COMM_WORLD.Barrier()\n\n def get_results_dict(self):\n rd = OrderedDict()\n rd['mpi'] = OrderedDict()\n rd['mpi']['size'] = self.mpi_size\n\n\n def analyze_data_directories(self,data_dir=None):\n _d = data_dir\n i = 0\n contents = []\n if not os.path.exists(_d): return i, contents\n if not os.path.isdir(_d): return i, contents\n\n while True:\n kde_fn = os.path.join(_d,\"pyposmat.kde.{}.out\".format(i))\n if os.path.exists(kde_fn):\n contents.append(kde_fn)\n else:\n if i > 0:\n contents.append(results_fn)\n break\n\n results_fn = os.path.join(_d,\"pyposmat.results.{}.out\".format(i))\n if os.path.exists(results_fn): pass\n else:break\n i = i + 1\n\n return i, contents\n\n def analyze_rank_directories(self,root_dir=None):\n i = 0\n contents = []\n\n if root_dir is None:\n _d = self.root_directory\n else:\n _d = root_directory\n\n while True:\n rank_dir = os.path.join(_d,\"rank_{}\".format(i))\n if not os.path.exists(rank_dir): \n break\n if not os.path.isdir(rank_dir): \n break\n\n rank_fn = os.path.join(\"rank_{}\".format(i),\"pyposmat.results.out\")\n if not os.path.exists(os.path.join(_d,rank_fn)):\n break\n if not os.path.isfile(os.path.join(_d,rank_fn)):\n break\n else:\n contents.append(rank_fn)\n i = i + 1\n return i, contents\n\n def find_initial_parameters_file(self):\n if 'file' in self.configuration.sampling_type[0]:\n _init_fn =os.path.join(\n self.root_directory,\n self.configuration.sampling_type[0]['file']\n )\n if os.path.exists(_init_fn):\n if os.path.isfile(_init_fn):\n return _init_fn\n else:\n return None\n \n def merge_data_files(self,i_iteration,last_datafile_fn=None,new_datafile_fn=None):\n \"\"\" merge the pyposmat data files\n\n Args:\n i_iteration(int): the current iteration which just finished\n last_datafile_fn(str,optional): the filename of the last dataset in the data directory.\n new_datafile_fn(str,optional): where to output the file results \n \"\"\"\n\n if last_datafile_fn is None:\n last_datafile_fn = os.path.join(self.data_directory,\n 'pyposmat.kde.{}.out'.format(i_iteration))\n\n if new_datafile_fn is None:\n new_datafile_fn = os.path.join(self.data_directory,\n 'pyposmat.results.{}.out'.format(i_iteration))\n\n data_dir = self.data_directory\n rank_dirs = [v for v in os.listdir(self.root_directory) if v.startswith('rank_')]\n filenames = [os.path.join(self.root_directory,v,'pyposmat.results.out') for v in rank_dirs]\n\n data = None\n for i,v in enumerate(filenames):\n data_new = None\n if i == 0:\n data = PyposmatDataFile()\n data.read(filename=v)\n else:\n data_new = PyposmatDataFile()\n data_new.read(filename=v)\n\n data.df = pd.concat([data.df,data_new.df])\n\n nrows = len(data.df)\n \n if self.configuration.sampling_type[i_iteration]['type'] == 'from_file':\n pass\n else:\n sim_id_fmt = '{:0>2}_{:0>6}'\n sim_id_str = [sim_id_fmt.format(i_iteration,i) for i in range(nrows)]\n data.df['sim_id'] = [sim_id_fmt.format(i_iteration,i) for i in range(nrows)]\n\n if self.configuration.sampling_type[i_iteration]['type'] == \"from_file\":\n data_new = PyposmatDataFile()\n data_new.read(filename=filenames[0])\n data_new.df = data.df\n data_new.write(filename=new_datafile_fn)\n else:\n self.log(\"merging with candidates from previous simulations\")\n self.log(\"\\tfilename:{}\".format(last_datafile_fn))\n data_old = PyposmatDataFile()\n try:\n data_old.read(filename=last_datafile_fn)\n data_old.df = pd.concat([data_old.df,data.df])\n data_old.write(filename=new_datafile_fn)\n except FileNotFoundError as e:\n if i_iteration == 0:\n data.write(filename=new_datafile_fn)\n else:\n raise\n\n def merge_error_files(self,i_iteration):\n \"\"\" merge the pyposmat data files\n\n Args:\n i_iteration(int): the current iteration which just finished\n last_datafile_fn(str,optional): the filename of the last dataset in the data directory.\n new_datafile_fn(str,optional): where to output the file results \n \"\"\"\n\n badparameters_fn = os.path.join(self.data_directory,'pyposmat.badparameters.out')\n\n data_dir = self.data_directory\n rank_dirs = [v for v in os.listdir(self.root_directory) if v.startswith('rank_')]\n filenames = [os.path.join(self.root_directory,v,'pyposmat.badparameters.out') for v in rank_dirs]\n\n # consolidate rank directories\n badparameters_new = None\n badparameters_next = None\n for i,v in enumerate(filenames):\n if badparameters_new is None:\n try:\n badparameters_new = PyposmatBadParametersFile(o_config=self.configuration)\n badparameters_new.read(filename=v)\n except FileNotFoundError as e:\n self.log(\"no bad parameters file at {}\".format(v))\n\n else:\n try:\n badparameters_next = PyposmatBadParametersFile(o_config=self.configuration)\n badparameters_next.read(filename=v)\n badparameters_new.df = pd.concat([badparameters_new.df,badparameters_next.df])\n except FileNotFoundError as e:\n self.log(\"no bad parameters file as {}\".format(v))\n\n # determine the sim_id for bad parameters of the sim_id\n if badparameters_new.df is None:\n # no previous bad paramters found\n # TODO: need to implement something here to deal with bad parameters\n pass\n\n else:\n nrows = len(badparameters_new.df)\n sim_id_fmt = '{:0>2}_{:0>6}'\n sim_id_str = [sim_id_fmt.format(i_iteration,i) for i in range(nrows)]\n badparameters_new.df['sim_id'] = sim_id_str\n\n if self.configuration.sampling_type[i_iteration]['type'] == \"from_file\":\n badparameters_new.write(filename=badparameters_fn)\n\n else:\n self.log(\"merging with bad candidates from previous simulations\")\n self.log(\"\\tfilename:{}\".format(badparameters_fn))\n badparameters = PyposmatBadParametersFile(o_config=self.configuration)\n\n try:\n badparameters.read(filename=badparameters_fn)\n badparameters.df = pd.concat([badparameters.df,badparameters_new.df])\n badparameters.write(filename=badparameters_fn)\n except FileNotFoundError as e:\n if i_iteration == 0:\n badparameters_new.write(filename=badparameters_fn)\n else:\n raise\n\n def analyze_results(self,\n i_iteration,\n data_fn=None,\n config_fn=None,\n kde_fn=None,\n analysis_fn=None):\n \"\"\" analyze the results of the simulation\n\n this method analyzes the results of the simulation, and does post simulation\n tasks, such as filtering by qoi performance, pareto optimization, etc.\n\n Args:\n data_fn(str): the path of the data file. By default this is set to none \n where the the file will be determine by i_iteration and internal \n attributes\n config_fn(str): the path of the data file. By default this is set to none \n where the the file will be determine by i_iteration and internal \n attributes\n kde_fn(str): the path of the data file. By default this is set to none \n where the the file will be determine by i_iteration and internal \n attributes\n \"\"\"\n\n if data_fn is None:\n data_fn = os.path.join(\\\n self.root_directory,\n self.data_directory,\n 'pyposmat.results.{}.out'.format(i_iteration))\n if config_fn is None:\n config_fn = os.path.join(\\\n self.root_directory,\n self.configuration_filename)\n if kde_fn is None:\n kde_fn = os.path.join(\\\n self.root_directory,\n self.data_directory,\n 'pyposmat.kde.{}.out'.format(i_iteration+1))\n if analysis_fn is None:\n analysis_fn = os.path.join(\n self.root_directory,\n self.data_directory,\n 'pyposmat.analysis.out')\n\n data_analyzer = PyposmatDataAnalyzer()\n data_analyzer.initialize_configuration(config_fn=config_fn)\n\n data_analyzer.analyze_results_data(i_iteration,filename=data_fn)\n\n assert isinstance(data_analyzer.results_statistics, OrderedDict)\n\n if os.path.isfile(analysis_fn):\n data_analyzer.read_analysis_file(filename=analysis_fn)\n\n self.log(data_analyzer.str__results_descriptive_statistics(\n statistics=data_analyzer.results_statistics\n )\n )\n self.log(\n data_analyzer.str__qoi_filtering_summary()\n )\n\n data_analyzer.write_kde_file(filename=kde_fn)\n data_analyzer.analyze_kde_data(i_iteration,filename=kde_fn)\n\n assert isinstance(data_analyzer.kde_statistics,OrderedDict)\n self.log(data_analyzer.str__kde_descriptive_statistics(\n statistics=data_analyzer.kde_statistics\n )\n )\n\n data_analyzer.update_analysis(i_iteration)\n data_analyzer.write_analysis_file(filename=analysis_fn)\n\n def read_configuration_file(self,filename=None):\n\n assert type(filename) in [type(None),str]\n assert type(self.configuration_filename) in [type(None),str]\n \n if filename is not None:\n self.configuration_filename = filename\n\n if not os.path.isabs(self.configuration_filename):\n self.configuration_filename = os.path.abspath(self.configuration_filename)\n\n self.configuration = PyposmatConfigurationFile()\n self.configuration.read(filename=self.configuration_filename)\n \n if self.mpi_rank == 0:\n self._write_parameter_names()\n self._write_qoi_names()\n self._write_error_names()\n\n def _write_parameter_names(self,parameter_names=None):\n if parameter_names is None: _parameter_names = self.parameter_names\n else: _parameter_names = parameter_names\n\n s = [80*'-']\n s += ['{:^80}'.format('PARAMETER_NAMES')]\n s += [80*'-']\n s += [p for p in _parameter_names]\n\n self.log(\"\\n\".join(s))\n \n def _write_qoi_names(self,qoi_names=None):\n if qoi_names is None: _qoi_names = self.qoi_names\n else: _qoi_names = qoi_names\n\n s = [80*'-']\n s += ['{:^80}'.format('QOI_NAMES')]\n s += [80*'-']\n s += [p for p in _qoi_names]\n\n self.log(\"\\n\".join(s))\n\n def _write_error_names(self,error_names = None):\n if error_names is None: _error_names = self.error_names\n else: _error_names = error_names\n\n s = [80*'-']\n s += ['{:^80}'.format('ERROR_NAMES')]\n s += [80*'-']\n s += [p for p in _error_names]\n\n self.log(\"\\n\".join(s))\n\n\n\n\nif __name__ == \"__main__\":\n import Ni__eam__morse_exp_universal as Ni_eam\n\n #------------------------------------------------------------------------------\n # WRITE CONFIGURATION FILE\n #------------------------------------------------------------------------------\n Ni_eam_configuration = PyposmatConfigurationFile()\n Ni_eam_configuration.qois = Ni_eam.Ni_qoi_db.qois\n Ni_eam_configuration.potential = Ni_eam.Ni_eam_potential_formalism\n Ni_eam_configuration.structures = Ni_eam.Ni_structure_db\n Ni_eam_configuration.sampling_type = Ni_eam.Ni_eam_sampling\n Ni_eam_configuration.sampling_distribution =Ni_eam.Ni_eam_parameter_distribution\n Ni_eam_configuration.write(filename='pypospack.config.in')\n Ni_eam_configuration.read(filename='pypospack.config.in')\n\n pypospack_filename_in = 'pypospack.config.in'\n pyposmat_app = PyposmatIterativeSampler(\n configuration_filename = pypospack_filename_in)\n pyposmat_app.read_configuration_file()\n #pyposmat_app.read_configuration_file(filename=pyposmat_configuration_filename)\n pyposmat_app.run_all()\n", "from collections import OrderedDict\nimport pandas as pd\nfrom sklearn.decomposition import PCA, FastICA, KernelPCA\nfrom sklearn.cross_decomposition import CCA\nfrom pypospack.pyposmat.data.pipeline import BasePipeSegment\n\n\nclass PyposmatPcaAnalysis(BasePipeSegment):\n def __init__(self):\n super().__init__()\n\n def transform_pca(self, abs_cols=None, cols=None, clusters=None, kwargs=None):\n # process arg: cols, clusters\n df = self.select_data(cols=cols, clusters=clusters)\n # process arg: abs_cols\n if abs_cols is not None:\n df = df[abs_cols]\n # process arg: kwargs\n kwargs = self.process_kwargs('pca', kwargs)\n o_pca = PCA(**kwargs)\n\n arr = o_pca.fit_transform(df)\n nrows, ncols = arr.shape\n pca_cols = [\"pca_{}\".format(i) for i in range(ncols)]\n self.pca_names = pca_cols\n pca_df = pd.DataFrame(data=arr, columns=pca_cols)\n self.df = pd.concat([self.df, pca_df], axis=1)\n\n def transform_ica(self, abs_cols=None, cols=None, clusters=None, kwargs=None):\n # process arg: cols, clusters\n df = self.select_data(cols=cols, clusters=clusters)\n # process arg: abs_cols\n if abs_cols is not None:\n df = df[abs_cols]\n # process arg: kwargs\n kwargs = self.process_kwargs('ica', kwargs)\n o_ica = FastICA(**kwargs)\n\n arr = o_ica.fit_transform(df)\n nrows, ncols = arr.shape\n ica_cols = [\"ica_{}\".format(i) for i in range(ncols)]\n self.pca_names = ica_cols\n ica_df = pd.DataFrame(data=arr, columns=ica_cols)\n self.df = pd.concat([self.df, ica_df], axis=1)\n\n def transform_cca(self, abs_cols=None, cols=None, clusters=None, kwargs=None):\n # process arg: cols, clusters\n df = self.select_data(cols=cols, clusters=clusters)\n # process arg: abs_cols\n if abs_cols is not None:\n df = df[abs_cols]\n # process arg: kwargs\n kwargs = self.process_kwargs('cca', kwargs)\n o_cca = CCA(**kwargs)\n\n arr = o_cca.fit_transform(df)\n nrows, ncols = arr.shape\n cca_cols = [\"cca_{}\".format(i) for i in range(ncols)]\n self.pca_names = cca_cols\n cca_df = pd.DataFrame(data=arr, columns=cca_cols)\n self.df = pd.concat([self.df, cca_df], axis=1)\n\n def transform_kernel_pca(self, abs_cols=None, cols=None, clusters=None, kwargs=None):\n # process arg: cols, clusters\n df = self.select_data(cols=cols, clusters=clusters)\n # process arg: abs_cols\n if abs_cols is not None:\n df = df[abs_cols]\n # process arg: kwargs\n kwargs = self.process_kwargs('kernel_pca', kwargs)\n o_kernel_pca = KernelPCA(**kwargs)\n\n arr = o_kernel_pca.fit_transform(df)\n nrows, ncols = arr.shape\n kernel_pca_cols = [\"kernel_pca_{}\".format(i) for i in range(ncols)]\n self.pca_names = kernel_pca_cols\n kernel_pca_df = pd.DataFrame(data=arr, columns=kernel_pca_cols)\n self.df = pd.concat([self.df, kernel_pca_df], axis=1)\n" ]
[ [ "matplotlib.pyplot.clf", "numpy.linspace", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.figaspect", "matplotlib.pyplot.show" ], [ "pandas.concat", "numpy.random.seed", "numpy.random.randint" ], [ "pandas.concat", "sklearn.cross_decomposition.CCA", "sklearn.decomposition.FastICA", "pandas.DataFrame", "sklearn.decomposition.KernelPCA", "sklearn.decomposition.PCA" ] ]
eugene-yang/libact
[ "d86b7b850560138defb7be51986bfafd3d45f81b", "d86b7b850560138defb7be51986bfafd3d45f81b", "d86b7b850560138defb7be51986bfafd3d45f81b" ]
[ "libact/query_strategies/multiclass/hierarchical_sampling.py", "libact/query_strategies/query_by_committee.py", "libact/query_strategies/multilabel/multilabel_with_auxiliary_learner.py" ]
[ "\"\"\" Hierarchical Sampling for Active Learning (HS)\n\nThis module contains a class that implements Hierarchical Sampling for Active\nLearning (HS).\n\n\"\"\"\nfrom __future__ import division\n\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering\n\nfrom libact.base.interfaces import QueryStrategy\nfrom libact.utils import inherit_docstring_from, seed_random_state, zip\n\nNO_NODE = -1\nNO_LABEL = -1\n\n\nclass HierarchicalSampling(QueryStrategy):\n\n \"\"\"Hierarchical Sampling for Active Learning (HS)\n\n HS is an active learning scheme that exploits cluster structure in data.\n The original C++ implementation by the authors can be found at:\n http://www.cs.columbia.edu/~djhsu/code/HS.tar.gz\n\n Parameters\n ----------\n classes: list\n List of distinct classes in data.\n\n active_selecting: {True, False}, optional (default=True)\n False (random selecting): sample weight of a pruning is its number of\n unseen leaves.\n True (active selecting): sample weight of a pruning is its weighted\n error bound.\n\n subsample_qs: {:py:class:`libact.base.interfaces.query_strategies`, None}, optional (default=None)\n Subsample query strategy used to sample a node in the selected pruning.\n RandomSampling is used if None.\n\n random_state : {int, np.random.RandomState instance, None}, optional (default=None)\n If int or None, random_state is passed as parameter to generate\n np.random.RandomState instance. if np.random.RandomState instance,\n random_state is the random number generate.\n\n Attributes\n ----------\n m : int\n number of nodes\n\n classes: list\n List of distinct classes in data.\n\n n : int\n number of leaf nodes\n\n num_class : int\n number of classes\n\n parent : np.array instance, shape = (m)\n parent indices\n\n left_child : np.array instance, shape = (m)\n left child indices\n\n right_child : np.array instance, shape = (m)\n right child indices\n\n size : np.array instance, shape = (m)\n number of leaves in subtree\n\n depth : np.array instance, shape = (m)\n maximum depth in subtree\n\n count : np.array instance, shape = (m, num_class)\n node class label counts\n\n total : np.array instance, shape = (m)\n total node class labels seen (total[i] = Sum_j count[i][j])\n\n lower_bound : np.array instance, shape = (m, num_class)\n upper bounds on true node class label counts\n\n upper_bound : np.array instance, shape = (m, num_class)\n lower bounds on true node class label counts\n\n admissible: np.array instance, shape = (m, num_class)\n flag indicating if (node,label) is admissible\n\n best_label: np.array instance, shape = (m)\n best admissible label\n\n random_states\\_ : np.random.RandomState instance\n The random number generator using.\n\n\n Examples\n --------\n Here is an example of declaring a HierarchicalSampling query_strategy\n object:\n\n .. code-block:: python\n\n from libact.query_strategies import UncertaintySampling\n from libact.query_strategies.multiclass import HierarchicalSampling\n\n sub_qs = UncertaintySampling(\n dataset, method='sm', model=SVM(decision_function_shape='ovr'))\n\n qs = HierarchicalSampling(\n dataset, # Dataset object\n dataset.get_num_of_labels(),\n active_selecting=True,\n subsample_qs=sub_qs\n )\n\n\n References\n ----------\n\n .. [1] Sanjoy Dasgupta and Daniel Hsu. \"Hierarchical sampling for active\n learning.\" ICML 2008.\n \"\"\"\n\n def __init__(self, dataset, classes, active_selecting=True,\n subsample_qs=None, random_state=None):\n super(HierarchicalSampling, self).__init__(dataset)\n X = np.array(next(zip(*self.dataset.get_entries())))\n cluster = AgglomerativeClustering()\n cluster.fit(X)\n childrens = cluster.children_\n\n if subsample_qs is not None:\n if not isinstance(subsample_qs, QueryStrategy):\n raise TypeError(\"subsample_qs has to be a QueryStrategy\")\n self.sub_qs = subsample_qs\n else:\n self.sub_qs = None\n\n self.active_selecting = active_selecting\n self.random_state_ = seed_random_state(random_state)\n self.n = len(childrens) + 1\n self.m = self.n * 2 - 1\n self.num_class = len(classes)\n self.classes = list(classes)\n self.class_id = dict(zip(self.classes, range(self.num_class)))\n\n self.parent = np.full(self.m, NO_NODE, dtype=int)\n self.size = np.zeros(self.m, dtype=int)\n self.depth = np.zeros(self.m, dtype=int)\n for i, (left_child, right_child) in enumerate(childrens):\n parent = i + self.n\n self.parent[left_child] = parent\n self.parent[right_child] = parent\n self.left_child = np.concatenate([np.full(self.n, NO_NODE), childrens[:,0]]).astype(int)\n self.right_child = np.concatenate([np.full(self.n, NO_NODE), childrens[:,1]]).astype(int)\n\n for i in range(self.n):\n node = i\n cur_depth = 0\n while node != NO_NODE:\n assert node >= 0 and node < self.m\n self.size[node] += 1\n self.depth[node] = max(self.depth[node], cur_depth)\n cur_depth += 1\n node = self.parent[node]\n\n self.count = np.zeros((self.m, self.num_class), dtype=int)\n self.total = np.zeros(self.m, dtype=int)\n self.upper_bound = np.ones((self.m, self.num_class), dtype=float)\n self.lower_bound = np.zeros((self.m, self.num_class), dtype=float)\n self.admissible = np.zeros((self.m, self.num_class), dtype=bool)\n self.best_label = np.full(self.m, NO_LABEL, dtype=int)\n self.split = np.zeros(self.m, dtype=bool)\n self.cost = self.size.copy()\n\n self.prunings = [self.m-1]\n\n for i, entry in enumerate(self.dataset.data):\n if entry[1] != None:\n self.update(i, entry[1])\n\n @inherit_docstring_from(QueryStrategy)\n def update(self, entry_id, label):\n if label not in self.class_id:\n raise ValueError(\n 'Unknown class of entry %d: %s, expected: %s' %\n (entry_id, label, list(self.class_id.keys()))\n )\n class_id = self.class_id[label]\n root_pruning = self._find_root_pruning(entry_id)\n self._update(entry_id, class_id, root_pruning)\n self._prune_node(root_pruning)\n\n @inherit_docstring_from(QueryStrategy)\n def make_query(self):\n pruning = self._select_pruning()\n if self.sub_qs is None:\n ask_id = int(self._sample_node(pruning))\n else:\n _, scores = self.sub_qs.make_query(return_score=True)\n leaves = set(self._find_leaves(pruning))\n leaf_scores = [(score, node) for node, score in scores if node in leaves]\n ask_id = max(leaf_scores)[1]\n return ask_id\n\n def report_entry_label(self, entry_id):\n \"\"\"\n Return the best label of the asked entry.\n\n Parameters\n ----------\n entry_id : int\n The index of the sample to ask.\n\n Returns\n -------\n label: object\n The best label of the given sample.\n \"\"\"\n\n pruning = self._find_root_pruning(entry_id)\n return self.classes[self._best_label(pruning)]\n\n def report_all_label(self):\n \"\"\"\n Return the best label of the asked entry.\n\n Parameters\n ----------\n\n Returns\n -------\n labels: list of object, shape=(m)\n The best label of all samples.\n \"\"\"\n\n labels = np.empty(len(self.dataset), dtype=int)\n for pruning in self.prunings:\n best_label = self._best_label(pruning)\n leaves = self._find_leaves(pruning)\n labels[leaves] = best_label\n return labels\n\n def _best_label(self, pruning):\n if self.best_label[pruning] != NO_LABEL:\n return self.best_label[pruning]\n if self.parent[pruning] != NO_NODE:\n return self.best_label[self.parent[pruning]]\n return 0 # default label is 0 if no admissble label for root\n\n def _find_root_pruning(self, entry_id):\n node = entry_id\n while node != NO_NODE and node not in self.prunings:\n node = self.parent[node]\n return node\n\n def _find_leaves(self, node):\n if node == NO_NODE:\n return []\n if self.size[node] == 1:\n return [node]\n return (self._find_leaves(self.left_child[node]) +\n self._find_leaves(self.right_child[node]))\n\n def _select_pruning(self):\n if self.active_selecting:\n sample_weight = []\n for pruning in self.prunings:\n best_label = self.best_label[pruning]\n if best_label == NO_LABEL:\n w = self.size[pruning]\n else:\n w = self.size[pruning] - self.lower_bound[pruning][best_label]\n sample_weight.append(w)\n else:\n sample_weight = self.size[self.prunings] - self.total[self.prunings]\n sample_weight = sample_weight / sum(sample_weight)\n return self.random_state_.choice(self.prunings, p=sample_weight)\n\n def _sample_node(self, node):\n num_unseen_leaves = self.size[node] - self.total[node]\n if num_unseen_leaves == 0:\n return NO_NODE\n if self.size[node] == 1:\n return node\n assert self.left_child[node] != NO_NODE and self.right_child[node] != NO_NODE\n p_left = (self.size[self.left_child[node]] - self.total[self.left_child[node]]) / num_unseen_leaves\n if self.random_state_.rand() < p_left:\n return self._sample_node(self.left_child[node])\n else:\n return self._sample_node(self.right_child[node])\n\n def _update(self, entry_id, label, root_pruning):\n node = entry_id\n while node != NO_NODE:\n self.count[node, label] += 1\n self.total[node] += 1\n assert self.total[node] <= self.size[node]\n\n for l in range(self.num_class):\n frac = self.count[node, l] / self.total[node]\n delta = self._get_delta(frac, node)\n mean = frac * self.size[node]\n err = delta * self.size[node]\n self.lower_bound[node][l] = max(self.count[node][l], mean - err)\n self.upper_bound[node][l] = min(self.size[node] - (self.total[node] - self.count[node, l]), mean + err)\n\n max_count = 0\n for l in range(self.num_class):\n self.admissible[node, l] = True\n for k in range(self.num_class):\n if l != k and self.lower_bound[node, l] <= 2 * self.upper_bound[node, k] - self.size[node]:\n self.admissible[node, l] = False\n if self.admissible[node, l] and self.count[node, l] > max_count:\n max_count = self.count[node, l]\n self.best_label[node] = l\n\n if self.best_label[node] != NO_LABEL:\n basic_cost = self.size[node] - self.lower_bound[node][self.best_label[node]]\n else:\n basic_cost = self.size[node]\n\n if self.size[node] == 1:\n self.cost[node] = basic_cost\n else:\n split_cost = self.cost[self.left_child[node]] + self.cost[self.right_child[node]]\n if split_cost < basic_cost and self.best_label[node] != NO_LABEL:\n self.cost[node] = split_cost\n self.split[node] = True\n else:\n self.cost[node] = basic_cost\n\n if node != root_pruning:\n node = self.parent[node]\n else:\n break\n\n def _prune_node(self, root_pruning):\n self.prunings.remove(root_pruning)\n node_set = [root_pruning]\n while len(node_set) > 0:\n node = node_set.pop()\n if self.split[node]:\n node_set.append(self.left_child[node])\n node_set.append(self.right_child[node])\n else:\n self.prunings.append(node)\n\n def _get_delta(self, frac, node):\n fs_corr = 1.0 - self.total[node] / self.size[node]\n return fs_corr / self.total[node] + \\\n np.sqrt(fs_corr * frac * (1. - frac) / self.total[node])\n", "\"\"\"Query by committee\n\nThis module contains a class that implements Query by committee active learning\nalgorithm.\n\"\"\"\nfrom __future__ import division\n\nimport logging\nimport math\n\nimport numpy as np\n\nfrom libact.base.dataset import Dataset\nfrom libact.base.interfaces import QueryStrategy, ProbabilisticModel\nimport libact.models\nfrom libact.utils import inherit_docstring_from, seed_random_state, zip\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass QueryByCommittee(QueryStrategy):\n\n r\"\"\"Query by committee\n\n Parameters\n ----------\n models : list of :py:mod:`libact.models` instances or str\n This parameter accepts a list of initialized libact Model instances,\n or class names of libact Model classes to determine the models to be\n included in the committee to vote for each unlabeled instance.\n\n disagreement : ['vote', 'kl_divergence'], optional (default='vote')\n Sets the method for measuring disagreement between models.\n 'vote' represents vote entropy.\n kl_divergence requires models being ProbabilisticModel\n\n random_state : {int, np.random.RandomState instance, None}, optional (default=None)\n If int or None, random_state is passed as parameter to generate\n np.random.RandomState instance. if np.random.RandomState instance,\n random_state is the random number generate.\n\n Attributes\n ----------\n students : list, shape = (len(models))\n A list of the model instances used in this algorithm.\n\n random_states\\_ : np.random.RandomState instance\n The random number generator using.\n\n Examples\n --------\n Here is an example of declaring a QueryByCommittee query_strategy object:\n\n .. code-block:: python\n\n from libact.query_strategies import QueryByCommittee\n from libact.models import LogisticRegression\n\n qs = QueryByCommittee(\n dataset, # Dataset object\n models=[\n LogisticRegression(C=1.0),\n LogisticRegression(C=0.1),\n ],\n )\n\n\n References\n ----------\n .. [1] Seung, H. Sebastian, Manfred Opper, and Haim Sompolinsky. \"Query by\n committee.\" Proceedings of the fifth annual workshop on\n Computational learning theory. ACM, 1992.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(QueryByCommittee, self).__init__(*args, **kwargs)\n\n self.disagreement = kwargs.pop('disagreement', 'vote')\n\n\n models = kwargs.pop('models', None)\n if models is None:\n raise TypeError(\n \"__init__() missing required keyword-only argument: 'models'\"\n )\n elif not models:\n raise ValueError(\"models list is empty\")\n\n if self.disagreement == 'kl_divergence':\n for model in models:\n if not isinstance(model, ProbabilisticModel):\n raise TypeError(\n \"Given disagreement set as 'kl_divergence', all models\"\n \"should be ProbabilisticModel.\"\n )\n\n\n random_state = kwargs.pop('random_state', None)\n self.random_state_ = seed_random_state(random_state)\n\n self.students = list()\n for model in models:\n if isinstance(model, str):\n self.students.append(getattr(libact.models, model)())\n else:\n self.students.append(model)\n self.n_students = len(self.students)\n self.teach_students()\n\n def _vote_disagreement(self, votes):\n \"\"\"\n Return the disagreement measurement of the given number of votes.\n It uses the vote vote to measure the disagreement.\n\n Parameters\n ----------\n votes : list of int, shape==(n_samples, n_students)\n The predictions that each student gives to each sample.\n\n Returns\n -------\n disagreement : list of float, shape=(n_samples)\n The vote entropy of the given votes.\n \"\"\"\n ret = []\n for candidate in votes:\n ret.append(0.0)\n lab_count = {}\n for lab in candidate:\n lab_count[lab] = lab_count.setdefault(lab, 0) + 1\n\n # Using vote entropy to measure disagreement\n for lab in lab_count.keys():\n ret[-1] -= lab_count[lab] / self.n_students * \\\n math.log(float(lab_count[lab]) / self.n_students)\n\n return ret\n\n def _kl_divergence_disagreement(self, proba):\n \"\"\"\n Calculate the Kullback-Leibler (KL) divergence disaagreement measure.\n\n Parameters\n ----------\n proba : array-like, shape=(n_samples, n_students, n_class)\n\n Returns\n -------\n disagreement : list of float, shape=(n_samples)\n The kl_divergence of the given probability.\n \"\"\"\n n_students = np.shape(proba)[1]\n consensus = np.mean(proba, axis=1) # shape=(n_samples, n_class)\n # average probability of each class across all students\n consensus = np.tile(consensus, (n_students, 1, 1)).transpose(1, 0, 2)\n kl = np.sum(proba * np.log(proba / consensus), axis=2)\n return np.mean(kl, axis=1)\n\n def _labeled_uniform_sample(self, sample_size):\n \"\"\"sample labeled entries uniformly\"\"\"\n labeled_entries = self.dataset.get_labeled_entries()\n samples = [labeled_entries[\n self.random_state_.randint(0, len(labeled_entries))\n ]for _ in range(sample_size)]\n return Dataset(*zip(*samples))\n\n def teach_students(self):\n \"\"\"\n Train each model (student) with the labeled data using bootstrap\n aggregating (bagging).\n \"\"\"\n dataset = self.dataset\n for student in self.students:\n bag = self._labeled_uniform_sample(int(dataset.len_labeled()))\n while bag.get_num_of_labels() != dataset.get_num_of_labels():\n bag = self._labeled_uniform_sample(int(dataset.len_labeled()))\n LOGGER.warning('There is student receiving only one label,'\n 're-sample the bag.')\n student.train(bag)\n\n @inherit_docstring_from(QueryStrategy)\n def update(self, entry_id, label):\n # Train each model with newly updated label.\n self.teach_students()\n\n @inherit_docstring_from(QueryStrategy)\n def make_query(self):\n dataset = self.dataset\n unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries())\n\n if self.disagreement == 'vote':\n # Let the trained students vote for unlabeled data\n votes = np.zeros((len(X_pool), len(self.students)))\n for i, student in enumerate(self.students):\n votes[:, i] = student.predict(X_pool)\n\n vote_entropy = self._vote_disagreement(votes)\n ask_idx = self.random_state_.choice(\n np.where(np.isclose(vote_entropy, np.max(vote_entropy)))[0])\n\n elif self.disagreement == 'kl_divergence':\n proba = []\n for student in self.students:\n proba.append(student.predict_proba(X_pool))\n proba = np.array(proba).transpose(1, 0, 2).astype(float)\n\n avg_kl = self._kl_divergence_disagreement(proba)\n ask_idx = self.random_state_.choice(\n np.where(np.isclose(avg_kl, np.max(avg_kl)))[0])\n\n return unlabeled_entry_ids[ask_idx]\n", "\"\"\"Multi-label Active Learning with Auxiliary Learner\n\"\"\"\nimport copy\n\nimport numpy as np\n\nfrom libact.base.dataset import Dataset\nfrom libact.base.interfaces import QueryStrategy, ContinuousModel\nfrom libact.utils import inherit_docstring_from, seed_random_state, zip\nfrom libact.models import LogisticRegression, SVM\nfrom libact.models.multilabel import BinaryRelevance, DummyClf\n\n\nclass MultilabelWithAuxiliaryLearner(QueryStrategy):\n r\"\"\"Multi-label Active Learning with Auxiliary Learner\n\n Parameters\n ----------\n major_learner : :py:mod:`libact.base.interfaces.Model` object instance\n The major multilabel learner. This learner should be the model to be\n used to solve the problem.\n\n auxiliary_learner : :py:mod:`libact.models.multilabel` object instance\n The auxiliary multilabel learner.\n For criterion 'shlr' and 'mmr', it is required to support predict_real\n or predict_proba.\n\n criterion : ['hlr', 'shlr', 'mmr'], optional(default='hlr')\n The criterion for estimating the difference between major_learner and\n auxiliary_learner.\n hlr, hamming loss reduction\n shlr, soft hamming loss reduction\n mmr, maximum margin reduction\n\n b : float\n parameter for criterion shlr.\n It sets the score to be clipped between [-b, b] to remove influence of\n extreme margin values.\n\n random_state : {int, np.random.RandomState instance, None}, optional (default=None)\n If int or None, random_state is passed as parameter to generate\n np.random.RandomState instance. if np.random.RandomState instance,\n random_state is the random number generate.\n\n Attributes\n ----------\n\n Examples\n --------\n Here is an example of declaring a multilabel with auxiliary learner\n query_strategy object:\n\n .. code-block:: python\n\n from libact.query_strategies.multilabel import MultilabelWithAuxiliaryLearner\n from libact.models.multilabel import BinaryRelevance\n from libact.models import LogisticRegression, SVM\n\n qs = MultilabelWithAuxiliaryLearner(\n dataset,\n major_learner=BinaryRelevance(LogisticRegression())\n auxiliary_learner=BinaryRelevance(SVM())\n )\n\n References\n ----------\n .. [1] Hung, Chen-Wei, and Hsuan-Tien Lin. \"Multi-label Active Learning\n\t with Auxiliary Learner.\" ACML. 2011.\n \"\"\"\n\n def __init__(self, dataset, major_learner, auxiliary_learner,\n criterion='hlr', b=1., random_state=None):\n super(MultilabelWithAuxiliaryLearner, self).__init__(dataset)\n\n self.n_labels = len(self.dataset.data[0][1])\n\n self.major_learner = major_learner\n self.auxiliary_learner = auxiliary_learner\n\n self.b = b\n\n self.random_state_ = seed_random_state(random_state)\n\n self.criterion = criterion\n if self.criterion not in ['hlr', 'shlr', 'mmr']:\n raise TypeError(\n \"supported criterion are ['hlr', 'shlr', 'mmr'], the given \"\n \"one is: \" + self.criterion\n )\n\n @inherit_docstring_from(QueryStrategy)\n def make_query(self):\n dataset = self.dataset\n labeled_pool, Y = zip(*dataset.get_labeled_entries())\n unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries())\n\n major_clf = copy.deepcopy(self.major_learner)\n major_clf.train(dataset)\n aux_clf = copy.deepcopy(self.auxiliary_learner)\n aux_clf.train(dataset)\n\n if self.criterion == 'hlr':\n major_pred = major_clf.predict(X_pool)\n aux_pred = aux_clf.predict(X_pool)\n score = np.abs(major_pred - aux_pred).mean(axis=1)\n elif self.criterion in ['mmr', 'shlr']:\n major_pred = major_clf.predict(X_pool) * 2 - 1\n\n if 'predict_real' in dir(aux_clf):\n aux_pred = aux_clf.predict_real(X_pool)\n elif 'predict_proba' in dir(aux_clf):\n aux_pred = aux_clf.predict_proba(X_pool) * 2 - 1\n else:\n raise AttributeError(\"aux_learner did not support either\"\n \"'predict_real' or 'predict_proba'\"\n \"method\")\n\n loss = (major_pred * aux_pred).mean(axis=1)\n if self.criterion == 'mmr':\n score = (1. - major_pred * aux_pred) / 2.\n score = np.sum(score, axis=1)\n elif self.criterion == 'shlr':\n b = self.b\n score = (b - np.clip(major_pred * aux_pred, -b, b)) / 2. / b\n score = np.sum(score, axis=1)\n else:\n raise TypeError(\n \"supported criterion are ['hlr', 'shlr', 'mmr'], the given \"\n \"one is: \" + self.criterion\n )\n\n ask_id = self.random_state_.choice(np.where(score == np.max(score))[0])\n\n return unlabeled_entry_ids[ask_id]\n" ]
[ [ "numpy.sqrt", "numpy.ones", "numpy.full", "sklearn.cluster.AgglomerativeClustering", "numpy.zeros" ], [ "numpy.log", "numpy.tile", "numpy.max", "numpy.shape", "numpy.mean", "numpy.array" ], [ "numpy.max", "numpy.sum", "numpy.abs", "numpy.clip" ] ]
saltastroops/imephu
[ "0c302a73d01fe3ad018e7adf4b91e0beaecc6709" ]
[ "tests/conftest.py" ]
[ "\"\"\"pytest configuration.\"\"\"\nimport io\nimport pathlib\nimport time\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom typer.testing import CliRunner\n\nimport imephu\nfrom imephu.annotation.general import TextAnnotation\nfrom imephu.cli import app\nfrom imephu.salt.finder_chart import FinderChart\n\nrunner = CliRunner()\n\n\[email protected](autouse=True)\ndef no_http_requests(monkeypatch):\n \"\"\"Prevent any real HTTP requests.\n\n Taken (with wording slightly adapted) from\n https://blog.jerrycodes.com/no-http-requests/.\n \"\"\"\n\n def urlopen_mock(self, method, url, *args, **kwargs):\n raise RuntimeError(\n f\"The test was about to make a {method} request to \"\n f\"{self.scheme}://{self.host}{url}\"\n )\n\n monkeypatch.setattr(\n \"urllib3.connectionpool.HTTPConnectionPool.urlopen\", urlopen_mock\n )\n\n\[email protected]()\ndef check_finder(file_regression):\n \"\"\"\n Return a function for checking finder charts.\n\n The finder chart is saved as a png, and the png is compared against a previously\n saved version. If no version exists already, the file is saved and the test fails.\n The saved file should be put under version control.\n\n If the saved png and the previously saved version differ, the test fails.\n\n In case you need to update the saved files, run ``pytest`` with the\n ``--force-regen`` flag.\n\n Parameters\n ----------\n file_regression: file regression fixture\n The file regression fixture from the pytest-regressions plugin.\n\n Returns\n -------\n function\n The function for checking a finder chart.\n \"\"\"\n\n def _check_finder(finder_chart):\n np.random.seed(0)\n try:\n contents = io.BytesIO()\n finder_chart.save(contents, format=\"png\")\n file_regression.check(contents.getvalue(), binary=True, extension=\".png\")\n finally:\n np.random.seed()\n\n return _check_finder\n\n\[email protected]()\ndef check_cli(fits_file, tmp_path_factory, file_regression):\n \"\"\"\n Return a function for checking the command line interface.\n\n Parameters\n ----------\n tmp_path_factory: fixture for creating a temporary directory\n Temporary directory.\n file_regression: fixture for regression checking\n Fixture for file regression checking.\n\n Returns\n -------\n function\n Function for checking the command line interface.\n \"\"\"\n\n def _check_cli(\n instrument_yaml,\n fits_source_yaml=\"fits-source:\\n image-survey: POSS2/UKSTU Red\",\n ):\n configuration = f\"\"\"\\\n{fits_source_yaml}\ntelescope: SALT\npi-family-name: Doe\nproposal-code: 2022-1-SCI-042\nposition-angle: 30d\ntarget:\n name: Magrathea\n ra: 0h 40m 00s\n dec: -60d\n magnitude-range:\n bandpass: V\n minimum: 17\n maximum: 17.3\n{instrument_yaml}\n\"\"\"\n np.random.seed(0)\n try:\n tmp = tmp_path_factory.mktemp(f\"finder-chart-{time.time_ns()}\")\n config = tmp / \"config.yaml\"\n config.write_text(configuration)\n output = tmp / \"finder_chart.png\"\n with mock.patch.object(\n imephu.cli, \"load_fits\", autospec=True\n ) as mock_load_fits:\n fits = fits_file.read_bytes()\n mock_load_fits.return_value = io.BytesIO(fits)\n runner.invoke(app, [\"--config\", config, \"--out\", output])\n finder_chart = output.read_bytes()\n file_regression.check(finder_chart, binary=True, extension=\".png\")\n finally:\n np.random.seed()\n\n return _check_cli\n\n\[email protected]()\ndef fits_file():\n \"\"\"\n Return the path of an example FITS file.\n\n The FITS file whose path is returned shows a 10 arcsecond by 10 arcsecond sky area\n centered on the right ascension 10 degrees and the declination -60 degrees.\n\n Returns\n -------\n `pathlib.Path`\n The path to the example FITS file.\n \"\"\"\n return pathlib.Path(__file__).parent / \"data\" / \"ra10_dec-60.fits\"\n\n\[email protected]()\ndef fits_file2():\n \"\"\"\n Return the path of an example FITS file.\n\n The FITS file whose path is returned shows a 10 arcsecond by 10 arcsecond sky area\n centered on the right ascension 9.75 degrees and the declination -60 degrees.\n\n Returns\n -------\n `pathlib.Path`\n The path to the example FITS file.\n \"\"\"\n return pathlib.Path(__file__).parent / \"data\" / \"ra9.75_dec-60.fits\"\n\n\[email protected]()\ndef fits_center():\n \"\"\"Return the sky coordinates for the center of the example FITS file.\"\"\"\n return SkyCoord(ra=10 * u.deg, dec=-60 * u.deg)\n\n\[email protected]()\ndef fits_center2():\n \"\"\"Return the sky coordinates for the center of the example FITS file.\"\"\"\n return SkyCoord(ra=9.75 * u.deg, dec=-60 * u.deg)\n\n\[email protected]()\ndef mos_mask_xml():\n \"\"\"Return a function for generating XML describing a MOS mask.\"\"\"\n\n def _mask_xml(center, position_angle, reference_stars, slits):\n xml = f\"\"\"\\\n<?xml version=\"1.0\" ?>\n<slitmask>\n<header>\n<parameter name=\"VERSION\" value=\"1.1\" />\n<parameter name=\"PROPOSALCODE\" value=\"INDEF\" />\n<parameter name=\"MASKNUM\" value=\"0\" />\n<parameter name=\"PI\" value=\"INDEF\" />\n<parameter name=\"CREATOR\" value=\"Someone\" />\n<parameter name=\"ROTANGLE\" value=\"{position_angle.to_value(u.deg)}\" />\n<parameter name=\"CENTERRA\" value=\"{center.ra.to_value(u.deg)}\" />\n<parameter name=\"CENTERDEC\" value=\"{center.dec.to_value(u.deg)}\" />\n<parameter name=\"EQUINOX\" value=\"2000.0\" />\n<parameter name=\"NSMODE\" value=\"0\" />\n<parameter name=\"COOSYS\" value=\"RADEC\" />\n<parameter name=\"VALIDATED\" value=\"FALSE\" />\n<parameter name=\"SPECLENGTH\" value=\"12400\" />\n<parameter name=\"SPECOFFSET\" value=\"0\" />\n<parameter name=\"SPECPOLSPLIT\" value=\"0\" />\n<parameter name=\"SPECHEIGHT\" value=\"0\" />\n</header>\n\"\"\"\n id = 1\n for star in reference_stars:\n xml += f\"\"\"\n <refstar\n id=\"{id}\"\n xce=\"{star.ra.to_value(u.deg)}\"\n yce=\"{star.dec.to_value(u.deg)}\"\n radius=\"0.5\" mag=\"0.0\"\n />\"\"\"\n id += 1\n\n for slit in slits:\n xml += f\"\"\"\n <slit\n id=\"{id}\"\n xce=\"{slit.center.ra.to_value(u.deg)}\"\n yce=\"{slit.center.dec.to_value(u.deg)}\"\n width=\"{slit.width.to_value(u.arcsec)}\"\n length=\"{slit.height.to_value(u.arcsec)}\"\n tilt=\"{slit.tilt.to_value(u.deg)}\"\n priority=\"1.0\"\n mag=\"0.0\"\n />\"\"\"\n\n xml += \"</slitmask>\"\n\n return xml\n\n return _mask_xml\n\n\[email protected]()\ndef mock_from_survey(fits_file, fits_file2):\n \"\"\"Return a fixture for mocking getting a finder chart from an image survey.\n\n This fixture mocks the ``from_survey`` method of the\n `~imephu.finder_chart.FinderChart` class. The mock method always returns a finder\n chart with the FITS image of the `fits_file` fixture when called the first time and\n a finder chart with the FITS image of the `fits_file2` fixture when called the\n second time.\n\n .. warning::\n\n The mock function ignores any arguments - you always get the same finder chart.\n In particular this implies that you always should use the `fits_center` fixture\n for the center of the FITS image when calling the function for the first time,\n and the fits_center2 fixture when calling it for the second time.\n \"\"\"\n with mock.patch.object(FinderChart, \"from_survey\", autospec=True) as mock_load_fits:\n mock_load_fits.side_effect = [\n FinderChart(open(fits_file, \"rb\")),\n FinderChart(open(fits_file2, \"rb\")),\n ]\n yield mock_load_fits\n\n\[email protected]()\ndef legend():\n \"\"\"Return a fixture for adding a legend to a finder chart.\"\"\"\n\n def _legend(text, wcs):\n return TextAnnotation(\n SkyCoord(ra=\"00h40m36s\", dec=\"-59d55m30s\"),\n text,\n wcs=wcs,\n color=\"blue\",\n horizontalalignment=\"left\",\n )\n\n return _legend\n" ]
[ [ "numpy.random.seed" ] ]
dieterv77/statsmodels
[ "844381797a475a01c05a4e162592a5a6e3a48032", "844381797a475a01c05a4e162592a5a6e3a48032", "844381797a475a01c05a4e162592a5a6e3a48032", "844381797a475a01c05a4e162592a5a6e3a48032" ]
[ "statsmodels/tsa/vector_ar/tests/example_svar.py", "statsmodels/tsa/statespace/sarimax.py", "statsmodels/stats/tests/test_diagnostic.py", "statsmodels/stats/mediation.py" ]
[ "import numpy as np\nimport statsmodels.api as sm\nimport pandas as pd\n\nmdatagen = sm.datasets.macrodata.load().data\nmdata = mdatagen[['realgdp','realcons','realinv']]\nnames = mdata.dtype.names\nstart = pd.datetime(1959, 3, 31)\nend = pd.datetime(2009, 9, 30)\n#qtr = pd.DatetimeIndex(start=start, end=end, freq=pd.datetools.BQuarterEnd())\nqtr = pd.DatetimeIndex(start=start, end=end, freq='BQ-MAR')\ndata = pd.DataFrame(mdata, index=qtr)\ndata = (np.log(data)).diff().dropna()\n\n#define structural inputs\nA = np.asarray([[1, 0, 0],['E', 1, 0],['E', 'E', 1]])\nB = np.asarray([['E', 0, 0], [0, 'E', 0], [0, 0, 'E']])\nA_guess = np.asarray([0.5, 0.25, -0.38])\nB_guess = np.asarray([0.5, 0.1, 0.05])\nmymodel = SVAR(data, svar_type='AB', A=A, B=B, freq='Q')\nres = mymodel.fit(maxlags=3, maxiter=10000, maxfun=10000, solver='bfgs')\nres.irf(periods=30).plot(impulse='realgdp', plot_stderr=True,\n stderr_type='mc', repl=100)\n", "\"\"\"\nSARIMAX Model\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\nfrom statsmodels.compat.python import long\n\nfrom warnings import warn\n\nimport numpy as np\nfrom .kalman_filter import KalmanFilter\nfrom .mlemodel import MLEModel, MLEResults, MLEResultsWrapper\nfrom .tools import (\n companion_matrix, diff, is_invertible, constrain_stationary_univariate,\n unconstrain_stationary_univariate, solve_discrete_lyapunov,\n prepare_exog\n)\nfrom statsmodels.tools.tools import Bunch\nfrom statsmodels.tools.data import _is_using_pandas\nfrom statsmodels.tsa.tsatools import lagmat\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.tools.sm_exceptions import ValueWarning\nimport statsmodels.base.wrapper as wrap\n\n\nclass SARIMAX(MLEModel):\n r\"\"\"\n Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors\n model\n\n Parameters\n ----------\n endog : array_like\n The observed time-series process :math:`y`\n exog : array_like, optional\n Array of exogenous regressors, shaped nobs x k.\n order : iterable or iterable of iterables, optional\n The (p,d,q) order of the model for the number of AR parameters,\n differences, and MA parameters. `d` must be an integer\n indicating the integration order of the process, while\n `p` and `q` may either be an integers indicating the AR and MA\n orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. Default is\n an AR(1) model: (1,0,0).\n seasonal_order : iterable, optional\n The (P,D,Q,s) order of the seasonal component of the model for the\n AR parameters, differences, MA parameters, and periodicity.\n `d` must be an integer indicating the integration order of the process,\n while `p` and `q` may either be an integers indicating the AR and MA\n orders (so that all lags up to those orders are included) or else\n iterables giving specific AR and / or MA lags to include. `s` is an\n integer giving the periodicity (number of periods in season), often it\n is 4 for quarterly data or 12 for monthly data. Default is no seasonal\n effect.\n trend : str{'n','c','t','ct'} or iterable, optional\n Parameter controlling the deterministic trend polynomial :math:`A(t)`.\n Can be specified as a string where 'c' indicates a constant (i.e. a\n degree zero component of the trend polynomial), 't' indicates a\n linear trend with time, and 'ct' is both. Can also be specified as an\n iterable defining the polynomial as in `numpy.poly1d`, where\n `[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is to not\n include a trend component.\n measurement_error : boolean, optional\n Whether or not to assume the endogenous observations `endog` were\n measured with error. Default is False.\n time_varying_regression : boolean, optional\n Used when an explanatory variables, `exog`, are provided provided\n to select whether or not coefficients on the exogenous regressors are\n allowed to vary over time. Default is False.\n mle_regression : boolean, optional\n Whether or not to use estimate the regression coefficients for the\n exogenous variables as part of maximum likelihood estimation or through\n the Kalman filter (i.e. recursive least squares). If\n `time_varying_regression` is True, this must be set to False. Default\n is True.\n simple_differencing : boolean, optional\n Whether or not to use partially conditional maximum likelihood\n estimation. If True, differencing is performed prior to estimation,\n which discards the first :math:`s D + d` initial rows but results in a\n smaller state-space formulation. If False, the full SARIMAX model is\n put in state-space form so that all datapoints can be used in\n estimation. Default is False.\n enforce_stationarity : boolean, optional\n Whether or not to transform the AR parameters to enforce stationarity\n in the autoregressive component of the model. Default is True.\n enforce_invertibility : boolean, optional\n Whether or not to transform the MA parameters to enforce invertibility\n in the moving average component of the model. Default is True.\n hamilton_representation : boolean, optional\n Whether or not to use the Hamilton representation of an ARMA process\n (if True) or the Harvey representation (if False). Default is False.\n **kwargs\n Keyword arguments may be used to provide default values for state space\n matrices or for Kalman filtering options. See `Representation`, and\n `KalmanFilter` for more details.\n\n Attributes\n ----------\n measurement_error : boolean\n Whether or not to assume the endogenous\n observations `endog` were measured with error.\n state_error : boolean\n Whether or not the transition equation has an error component.\n mle_regression : boolean\n Whether or not the regression coefficients for\n the exogenous variables were estimated via maximum\n likelihood estimation.\n state_regression : boolean\n Whether or not the regression coefficients for\n the exogenous variables are included as elements\n of the state space and estimated via the Kalman\n filter.\n time_varying_regression : boolean\n Whether or not coefficients on the exogenous\n regressors are allowed to vary over time.\n simple_differencing : boolean\n Whether or not to use partially conditional maximum likelihood\n estimation.\n enforce_stationarity : boolean\n Whether or not to transform the AR parameters\n to enforce stationarity in the autoregressive\n component of the model.\n enforce_invertibility : boolean\n Whether or not to transform the MA parameters\n to enforce invertibility in the moving average\n component of the model.\n hamilton_representation : boolean\n Whether or not to use the Hamilton representation of an ARMA process.\n trend : str{'n','c','t','ct'} or iterable\n Parameter controlling the deterministic\n trend polynomial :math:`A(t)`. See the class\n parameter documentation for more information.\n polynomial_ar : array\n Array containing autoregressive lag polynomial\n coefficients, ordered from lowest degree to highest.\n Initialized with ones, unless a coefficient is\n constrained to be zero (in which case it is zero).\n polynomial_ma : array\n Array containing moving average lag polynomial\n coefficients, ordered from lowest degree to highest.\n Initialized with ones, unless a coefficient is\n constrained to be zero (in which case it is zero).\n polynomial_seasonal_ar : array\n Array containing seasonal moving average lag\n polynomial coefficients, ordered from lowest degree\n to highest. Initialized with ones, unless a\n coefficient is constrained to be zero (in which\n case it is zero).\n polynomial_seasonal_ma : array\n Array containing seasonal moving average lag\n polynomial coefficients, ordered from lowest degree\n to highest. Initialized with ones, unless a\n coefficient is constrained to be zero (in which\n case it is zero).\n polynomial_trend : array\n Array containing trend polynomial coefficients,\n ordered from lowest degree to highest. Initialized\n with ones, unless a coefficient is constrained to be\n zero (in which case it is zero).\n k_ar : int\n Highest autoregressive order in the model, zero-indexed.\n k_ar_params : int\n Number of autoregressive parameters to be estimated.\n k_diff : int\n Order of intergration.\n k_ma : int\n Highest moving average order in the model, zero-indexed.\n k_ma_params : int\n Number of moving average parameters to be estimated.\n seasonal_periods : int\n Number of periods in a season.\n k_seasonal_ar : int\n Highest seasonal autoregressive order in the model, zero-indexed.\n k_seasonal_ar_params : int\n Number of seasonal autoregressive parameters to be estimated.\n k_seasonal_diff : int\n Order of seasonal intergration.\n k_seasonal_ma : int\n Highest seasonal moving average order in the model, zero-indexed.\n k_seasonal_ma_params : int\n Number of seasonal moving average parameters to be estimated.\n k_trend : int\n Order of the trend polynomial plus one (i.e. the constant polynomial\n would have `k_trend=1`).\n k_exog : int\n Number of exogenous regressors.\n\n Notes\n -----\n The SARIMA model is specified :math:`(p, d, q) \\times (P, D, Q)_s`.\n\n .. math::\n\n \\phi_p (L) \\tilde \\phi_P (L^s) \\Delta^d \\Delta_s^D y_t = A(t) +\n \\theta_q (L) \\tilde \\theta_Q (L^s) \\zeta_t\n\n In terms of a univariate structural model, this can be represented as\n\n .. math::\n\n y_t & = u_t + \\eta_t \\\\\n \\phi_p (L) \\tilde \\phi_P (L^s) \\Delta^d \\Delta_s^D u_t & = A(t) +\n \\theta_q (L) \\tilde \\theta_Q (L^s) \\zeta_t\n\n where :math:`\\eta_t` is only applicable in the case of measurement error\n (although it is also used in the case of a pure regression model, i.e. if\n p=q=0).\n\n In terms of this model, regression with SARIMA errors can be represented\n easily as\n\n .. math::\n\n y_t & = \\beta_t x_t + u_t \\\\\n \\phi_p (L) \\tilde \\phi_P (L^s) \\Delta^d \\Delta_s^D u_t & = A(t) +\n \\theta_q (L) \\tilde \\theta_Q (L^s) \\zeta_t\n\n this model is the one used when exogenous regressors are provided.\n\n Note that the reduced form lag polynomials will be written as:\n\n .. math::\n\n \\Phi (L) \\equiv \\phi_p (L) \\tilde \\phi_P (L^s) \\\\\n \\Theta (L) \\equiv \\theta_q (L) \\tilde \\theta_Q (L^s)\n\n If `mle_regression` is True, regression coefficients are treated as\n additional parameters to be estimated via maximum likelihood. Otherwise\n they are included as part of the state with a diffuse initialization.\n In this case, however, with approximate diffuse initialization, results\n can be sensitive to the initial variance.\n\n This class allows two different underlying representations of ARMA models\n as state space models: that of Hamilton and that of Harvey. Both are\n equivalent in the sense that they are analytical representations of the\n ARMA model, but the state vectors of each have different meanings. For\n this reason, maximum likelihood does not result in identical parameter\n estimates and even the same set of parameters will result in different\n loglikelihoods.\n\n The Harvey representation is convenient because it allows integrating\n differencing into the state vector to allow using all observations for\n estimation.\n\n In this implementation of differenced models, the Hamilton representation\n is not able to accomodate differencing in the state vector, so\n `simple_differencing` (which performs differencing prior to estimation so\n that the first d + sD observations are lost) must be used.\n\n Many other packages use the Hamilton representation, so that tests against\n Stata and R require using it along with simple differencing (as Stata\n does).\n\n Detailed information about state space models can be found in [1]_. Some\n specific references are:\n\n - Chapter 3.4 describes ARMA and ARIMA models in state space form (using\n the Harvey representation), and gives references for basic seasonal\n models and models with a multiplicative form (for example the airline\n model). It also shows a state space model for a full ARIMA process (this\n is what is done here if `simple_differencing=False`).\n - Chapter 3.6 describes estimating regression effects via the Kalman filter\n (this is performed if `mle_regression` is False), regression with\n time-varying coefficients, and regression with ARMA errors (recall from\n above that if regression effects are present, the model estimated by this\n class is regression with SARIMA errors).\n - Chapter 8.4 describes the application of an ARMA model to an example\n dataset. A replication of this section is available in an example\n IPython notebook in the documentation.\n\n References\n ----------\n .. [1] Durbin, James, and Siem Jan Koopman. 2012.\n Time Series Analysis by State Space Methods: Second Edition.\n Oxford University Press.\n \"\"\"\n\n def __init__(self, endog, exog=None, order=(1, 0, 0),\n seasonal_order=(0, 0, 0, 0), trend=None,\n measurement_error=False, time_varying_regression=False,\n mle_regression=True, simple_differencing=False,\n enforce_stationarity=True, enforce_invertibility=True,\n hamilton_representation=False, **kwargs):\n\n # Model parameters\n self.seasonal_periods = seasonal_order[3]\n self.measurement_error = measurement_error\n self.time_varying_regression = time_varying_regression\n self.mle_regression = mle_regression\n self.simple_differencing = simple_differencing\n self.enforce_stationarity = enforce_stationarity\n self.enforce_invertibility = enforce_invertibility\n self.hamilton_representation = hamilton_representation\n\n # Save given orders\n self.order = order\n self.seasonal_order = seasonal_order\n\n # Enforce non-MLE coefficients if time varying coefficients is\n # specified\n if self.time_varying_regression and self.mle_regression:\n raise ValueError('Models with time-varying regression coefficients'\n ' must integrate the coefficients as part of the'\n ' state vector, so that `mle_regression` must'\n ' be set to False.')\n\n # Lag polynomials\n # Assume that they are given from lowest degree to highest, that all\n # degrees except for the constant are included, and that they are\n # boolean vectors (0 for not included, 1 for included).\n if isinstance(order[0], (int, long, np.integer)):\n self.polynomial_ar = np.r_[1., np.ones(order[0])]\n else:\n self.polynomial_ar = np.r_[1., order[0]]\n if isinstance(order[2], (int, long, np.integer)):\n self.polynomial_ma = np.r_[1., np.ones(order[2])]\n else:\n self.polynomial_ma = np.r_[1., order[2]]\n # Assume that they are given from lowest degree to highest, that the\n # degrees correspond to (1*s, 2*s, ..., P*s), and that they are\n # boolean vectors (0 for not included, 1 for included).\n if isinstance(seasonal_order[0], (int, long, np.integer)):\n self.polynomial_seasonal_ar = np.r_[\n 1., # constant\n ([0] * (self.seasonal_periods - 1) + [1]) * seasonal_order[0]\n ]\n else:\n self.polynomial_seasonal_ar = np.r_[\n 1., [0] * self.seasonal_periods * len(seasonal_order[0])\n ]\n for i in range(len(seasonal_order[0])):\n tmp = (i + 1) * self.seasonal_periods\n self.polynomial_seasonal_ar[tmp] = seasonal_order[0][i]\n if isinstance(seasonal_order[2], (int, long, np.integer)):\n self.polynomial_seasonal_ma = np.r_[\n 1., # constant\n ([0] * (self.seasonal_periods - 1) + [1]) * seasonal_order[2]\n ]\n else:\n self.polynomial_seasonal_ma = np.r_[\n 1., [0] * self.seasonal_periods * len(seasonal_order[2])\n ]\n for i in range(len(seasonal_order[2])):\n tmp = (i + 1) * self.seasonal_periods\n self.polynomial_seasonal_ma[tmp] = seasonal_order[2][i]\n\n # Deterministic trend polynomial\n self.trend = trend\n if trend is None or trend == 'n':\n self.polynomial_trend = np.ones((0))\n elif trend == 'c':\n self.polynomial_trend = np.r_[1]\n elif trend == 't':\n self.polynomial_trend = np.r_[0, 1]\n elif trend == 'ct':\n self.polynomial_trend = np.r_[1, 1]\n else:\n self.polynomial_trend = (np.array(trend) > 0).astype(int)\n\n # Model orders\n # Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the\n # constant term, so they may be zero.\n # Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and\n # q = k_ma_params = k_ma - 1, although this may not be true for models\n # with arbitrary log polynomials.\n self.k_ar = int(self.polynomial_ar.shape[0] - 1)\n self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)\n self.k_diff = int(order[1])\n self.k_ma = int(self.polynomial_ma.shape[0] - 1)\n self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)\n\n self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)\n self.k_seasonal_ar_params = (\n int(np.sum(self.polynomial_seasonal_ar) - 1)\n )\n self.k_seasonal_diff = int(seasonal_order[1])\n self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)\n self.k_seasonal_ma_params = (\n int(np.sum(self.polynomial_seasonal_ma) - 1)\n )\n\n # Make internal copies of the differencing orders because if we use\n # simple differencing, then we will need to internally use zeros after\n # the simple differencing has been performed\n self._k_diff = self.k_diff\n self._k_seasonal_diff = self.k_seasonal_diff\n\n # We can only use the Hamilton representation if differencing is not\n # performed as a part of the state space\n if (self.hamilton_representation and not (self.simple_differencing or\n self._k_diff == self._k_seasonal_diff == 0)):\n raise ValueError('The Hamilton representation is only available'\n ' for models in which there is no differencing'\n ' integrated into the state vector. Set'\n ' `simple_differencing` to True or set'\n ' `hamilton_representation` to False')\n\n # Note: k_trend is not the degree of the trend polynomial, because e.g.\n # k_trend = 1 corresponds to the degree zero polynomial (with only a\n # constant term).\n self.k_trend = int(np.sum(self.polynomial_trend))\n\n # Model order\n # (this is used internally in a number of locations)\n self._k_order = max(self.k_ar + self.k_seasonal_ar,\n self.k_ma + self.k_seasonal_ma + 1)\n if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:\n # Handle time-varying regression\n if self.time_varying_regression:\n self._k_order = 0\n\n # Exogenous data\n (self.k_exog, exog) = prepare_exog(exog)\n\n # Redefine mle_regression to be true only if it was previously set to\n # true and there are exogenous regressors\n self.mle_regression = (\n self.mle_regression and exog is not None and self.k_exog > 0\n )\n # State regression is regression with coefficients estiamted within\n # the state vector\n self.state_regression = (\n not self.mle_regression and exog is not None and self.k_exog > 0\n )\n # If all we have is a regression (so k_ar = k_ma = 0), then put the\n # error term as measurement error\n if self.state_regression and self._k_order == 0:\n self.measurement_error = True\n\n # Number of states\n k_states = self._k_order\n if not self.simple_differencing:\n k_states += (self.seasonal_periods * self._k_seasonal_diff +\n self._k_diff)\n if self.state_regression:\n k_states += self.k_exog\n\n # Number of diffuse states\n k_diffuse_states = k_states\n if self.enforce_stationarity:\n k_diffuse_states -= self._k_order\n\n # Number of positive definite elements of the state covariance matrix\n k_posdef = int(self._k_order > 0)\n # Only have an error component to the states if k_posdef > 0\n self.state_error = k_posdef > 0\n if self.state_regression and self.time_varying_regression:\n k_posdef += self.k_exog\n\n # Diffuse initialization can be more sensistive to the variance value\n # in the case of state regression, so set a higher than usual default\n # variance\n if self.state_regression:\n kwargs.setdefault('initial_variance', 1e10)\n\n # Number of parameters\n self.k_params = (\n self.k_ar_params + self.k_ma_params +\n self.k_seasonal_ar_params + self.k_seasonal_ar_params +\n self.k_trend +\n self.measurement_error + 1\n )\n if self.mle_regression:\n self.k_params += self.k_exog\n\n # We need to have an array or pandas at this point\n self.orig_endog = endog\n self.orig_exog = exog\n if not _is_using_pandas(endog, None):\n endog = np.asanyarray(endog)\n\n # Update the differencing dimensions if simple differencing is applied\n self.orig_k_diff = self._k_diff\n self.orig_k_seasonal_diff = self._k_seasonal_diff\n if (self.simple_differencing and\n (self._k_diff > 0 or self._k_seasonal_diff > 0)):\n self._k_diff = 0\n self._k_seasonal_diff = 0\n\n # Internally used in several locations\n self._k_states_diff = (\n self._k_diff + self.seasonal_periods * self._k_seasonal_diff\n )\n\n # Set some model variables now so they will be available for the\n # initialize() method, below\n self.nobs = len(endog)\n self.k_states = k_states\n self.k_posdef = k_posdef\n\n # By default, do not calculate likelihood while it is controlled by\n # diffuse initial conditions.\n kwargs.setdefault('loglikelihood_burn', k_diffuse_states)\n\n # Initialize the statespace\n super(SARIMAX, self).__init__(\n endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs\n )\n\n # Set as time-varying model if we have time-trend or exog\n if self.k_exog > 0 or len(self.polynomial_trend) > 1:\n self.ssm._time_invariant = False\n\n # Handle kwargs specified initialization\n if self.ssm.initialization is not None:\n self._manual_initialization = True\n\n # Initialize the fixed components of the statespace model\n self.ssm['design'] = self.initial_design\n self.ssm['state_intercept'] = self.initial_state_intercept\n self.ssm['transition'] = self.initial_transition\n self.ssm['selection'] = self.initial_selection\n\n # If we are estimating a simple ARMA model, then we can use a faster\n # initialization method (unless initialization was already specified).\n if k_diffuse_states == 0 and not self._manual_initialization:\n self.initialize_stationary()\n\n # update _init_keys attached by super\n self._init_keys += ['order', 'seasonal_order', 'trend',\n 'measurement_error', 'time_varying_regression',\n 'mle_regression', 'simple_differencing',\n 'enforce_stationarity', 'enforce_invertibility',\n 'hamilton_representation'] + list(kwargs.keys())\n # TODO: I think the kwargs or not attached, need to recover from ???\n\n def _get_init_kwds(self):\n kwds = super(SARIMAX, self)._get_init_kwds()\n\n for key, value in kwds.items():\n if value is None and hasattr(self.ssm, key):\n kwds[key] = getattr(self.ssm, key)\n\n return kwds\n\n def prepare_data(self):\n endog, exog = super(SARIMAX, self).prepare_data()\n\n # Perform simple differencing if requested\n if (self.simple_differencing and\n (self.orig_k_diff > 0 or self.orig_k_seasonal_diff > 0)):\n # Save the original length\n orig_length = endog.shape[0]\n # Perform simple differencing\n endog = diff(endog.copy(), self.orig_k_diff,\n self.orig_k_seasonal_diff, self.seasonal_periods)\n if exog is not None:\n exog = diff(exog.copy(), self.orig_k_diff,\n self.orig_k_seasonal_diff, self.seasonal_periods)\n\n # Reset the ModelData datasets and cache\n self.data.endog, self.data.exog = (\n self.data._convert_endog_exog(endog, exog))\n\n # Reset indexes, if provided\n new_length = self.data.endog.shape[0]\n if self.data.row_labels is not None:\n self.data._cache['row_labels'] = (\n self.data.row_labels[orig_length - new_length:])\n if self._index is not None:\n if self._index_generated:\n self._index = self._index[:-(orig_length - new_length)]\n else:\n self._index = self._index[orig_length - new_length:]\n\n # Reset the nobs\n self.nobs = endog.shape[0]\n\n # Cache the arrays for calculating the intercept from the trend\n # components\n time_trend = np.arange(1, self.nobs + 1)\n self._trend_data = np.zeros((self.nobs, self.k_trend))\n i = 0\n for k in self.polynomial_trend.nonzero()[0]:\n if k == 0:\n self._trend_data[:, i] = np.ones(self.nobs,)\n else:\n self._trend_data[:, i] = time_trend**k\n i += 1\n\n return endog, exog\n\n def initialize(self):\n \"\"\"\n Initialize the SARIMAX model.\n\n Notes\n -----\n These initialization steps must occur following the parent class\n __init__ function calls.\n \"\"\"\n super(SARIMAX, self).initialize()\n\n # Internal flag for whether the default mixed approximate diffuse /\n # stationary initialization has been overridden with a user-supplied\n # initialization\n self._manual_initialization = False\n\n # Cache the indexes of included polynomial orders (for update below)\n # (but we do not want the index of the constant term, so exclude the\n # first index)\n self._polynomial_ar_idx = np.nonzero(self.polynomial_ar)[0][1:]\n self._polynomial_ma_idx = np.nonzero(self.polynomial_ma)[0][1:]\n self._polynomial_seasonal_ar_idx = np.nonzero(\n self.polynomial_seasonal_ar\n )[0][1:]\n self._polynomial_seasonal_ma_idx = np.nonzero(\n self.polynomial_seasonal_ma\n )[0][1:]\n\n # Save the indices corresponding to the reduced form lag polynomial\n # parameters in the transition and selection matrices so that they\n # don't have to be recalculated for each update()\n start_row = self._k_states_diff\n end_row = start_row + self.k_ar + self.k_seasonal_ar\n col = self._k_states_diff\n if not self.hamilton_representation:\n self.transition_ar_params_idx = (\n np.s_['transition', start_row:end_row, col]\n )\n else:\n self.transition_ar_params_idx = (\n np.s_['transition', col, start_row:end_row]\n )\n\n start_row += 1\n end_row = start_row + self.k_ma + self.k_seasonal_ma\n col = 0\n if not self.hamilton_representation:\n self.selection_ma_params_idx = (\n np.s_['selection', start_row:end_row, col]\n )\n else:\n self.design_ma_params_idx = (\n np.s_['design', col, start_row:end_row]\n )\n\n # Cache indices for exog variances in the state covariance matrix\n if self.state_regression and self.time_varying_regression:\n idx = np.diag_indices(self.k_posdef)\n self._exog_variance_idx = ('state_cov', idx[0][-self.k_exog:],\n idx[1][-self.k_exog:])\n\n def initialize_known(self, initial_state, initial_state_cov):\n self._manual_initialization = True\n self.ssm.initialize_known(initial_state, initial_state_cov)\n initialize_known.__doc__ = KalmanFilter.initialize_known.__doc__\n\n def initialize_approximate_diffuse(self, variance=None):\n self._manual_initialization = True\n self.ssm.initialize_approximate_diffuse(variance)\n initialize_approximate_diffuse.__doc__ = (\n KalmanFilter.initialize_approximate_diffuse.__doc__\n )\n\n def initialize_stationary(self):\n self._manual_initialization = True\n self.ssm.initialize_stationary()\n initialize_stationary.__doc__ = (\n KalmanFilter.initialize_stationary.__doc__\n )\n\n def initialize_state(self, variance=None, complex_step=False):\n \"\"\"\n Initialize state and state covariance arrays in preparation for the\n Kalman filter.\n\n Parameters\n ----------\n variance : float, optional\n The variance for approximating diffuse initial conditions. Default\n can be found in the Representation class documentation.\n\n Notes\n -----\n Initializes the ARMA component of the state space to the typical\n stationary values and the other components as approximate diffuse.\n\n Can be overridden be calling one of the other initialization methods\n before fitting the model.\n \"\"\"\n # Check if a manual initialization has already been specified\n if self._manual_initialization:\n return\n\n # If we're not enforcing stationarity, then we can't initialize a\n # stationary component\n if not self.enforce_stationarity:\n self.initialize_approximate_diffuse(variance)\n return\n\n # Otherwise, create the initial state and state covariance matrix\n # as from a combination of diffuse and stationary components\n\n # Create initialized non-stationary components\n if variance is None:\n variance = self.ssm.initial_variance\n\n dtype = self.ssm.transition.dtype\n initial_state = np.zeros(self.k_states, dtype=dtype)\n initial_state_cov = np.eye(self.k_states, dtype=dtype) * variance\n\n # Get the offsets (from the bottom or bottom right of the vector /\n # matrix) for the stationary component.\n if self.state_regression:\n start = -(self.k_exog + self._k_order)\n end = -self.k_exog if self.k_exog > 0 else None\n else:\n start = -self._k_order\n end = None\n\n # Add in the initialized stationary components\n if self._k_order > 0:\n transition = self.ssm['transition', start:end, start:end, 0]\n\n # Initial state\n # In the Harvey representation, if we have a trend that\n # is put into the state intercept and means we have a non-zero\n # unconditional mean\n if not self.hamilton_representation and self.k_trend > 0:\n initial_intercept = (\n self['state_intercept', self._k_states_diff, 0])\n initial_mean = (initial_intercept /\n (1 - np.sum(transition[:, 0])))\n initial_state[self._k_states_diff] = initial_mean\n _start = self._k_states_diff + 1\n _end = _start + transition.shape[0] - 1\n initial_state[_start:_end] = transition[1:, 0] * initial_mean\n\n # Initial state covariance\n selection_stationary = self.ssm['selection', start:end, :, 0]\n selected_state_cov_stationary = np.dot(\n np.dot(selection_stationary, self.ssm['state_cov', :, :, 0]),\n selection_stationary.T)\n initial_state_cov_stationary = solve_discrete_lyapunov(\n transition, selected_state_cov_stationary,\n complex_step=complex_step)\n\n initial_state_cov[start:end, start:end] = (\n initial_state_cov_stationary)\n\n self.ssm.initialize_known(initial_state, initial_state_cov)\n\n @property\n def initial_design(self):\n \"\"\"Initial design matrix\"\"\"\n # Basic design matrix\n design = np.r_[\n [1] * self._k_diff,\n ([0] * (self.seasonal_periods - 1) + [1]) * self._k_seasonal_diff,\n [1] * self.state_error, [0] * (self._k_order - 1)\n ]\n\n if len(design) == 0:\n design = np.r_[0]\n\n # If we have exogenous regressors included as part of the state vector\n # then the exogenous data is incorporated as a time-varying component\n # of the design matrix\n if self.state_regression:\n if self._k_order > 0:\n design = np.c_[\n np.reshape(\n np.repeat(design, self.nobs),\n (design.shape[0], self.nobs)\n ).T,\n self.exog\n ].T[None, :, :]\n else:\n design = self.exog.T[None, :, :]\n return design\n\n @property\n def initial_state_intercept(self):\n \"\"\"Initial state intercept vector\"\"\"\n # TODO make this self.k_trend > 1 and adjust the update to take\n # into account that if the trend is a constant, it is not time-varying\n if self.k_trend > 0:\n state_intercept = np.zeros((self.k_states, self.nobs))\n else:\n state_intercept = np.zeros((self.k_states,))\n return state_intercept\n\n @property\n def initial_transition(self):\n \"\"\"Initial transition matrix\"\"\"\n transition = np.zeros((self.k_states, self.k_states))\n\n # Exogenous regressors component\n if self.state_regression:\n start = -self.k_exog\n # T_\\beta\n transition[start:, start:] = np.eye(self.k_exog)\n\n # Autoregressive component\n start = -(self.k_exog + self._k_order)\n end = -self.k_exog if self.k_exog > 0 else None\n else:\n # Autoregressive component\n start = -self._k_order\n end = None\n\n # T_c\n if self._k_order > 0:\n transition[start:end, start:end] = companion_matrix(self._k_order)\n if self.hamilton_representation:\n transition[start:end, start:end] = np.transpose(\n companion_matrix(self._k_order)\n )\n\n # Seasonal differencing component\n # T^*\n if self._k_seasonal_diff > 0:\n seasonal_companion = companion_matrix(self.seasonal_periods).T\n seasonal_companion[0, -1] = 1\n for d in range(self._k_seasonal_diff):\n start = self._k_diff + d * self.seasonal_periods\n end = self._k_diff + (d + 1) * self.seasonal_periods\n\n # T_c^*\n transition[start:end, start:end] = seasonal_companion\n\n # i\n for i in range(d + 1, self._k_seasonal_diff):\n transition[start, end + self.seasonal_periods - 1] = 1\n\n # \\iota\n transition[start, self._k_states_diff] = 1\n\n # Differencing component\n if self._k_diff > 0:\n idx = np.triu_indices(self._k_diff)\n # T^**\n transition[idx] = 1\n # [0 1]\n if self.seasonal_periods > 0:\n start = self._k_diff\n end = self._k_states_diff\n transition[:self._k_diff, start:end] = (\n ([0] * (self.seasonal_periods - 1) + [1]) *\n self._k_seasonal_diff)\n # [1 0]\n column = self._k_states_diff\n transition[:self._k_diff, column] = 1\n\n return transition\n\n @property\n def initial_selection(self):\n \"\"\"Initial selection matrix\"\"\"\n if not (self.state_regression and self.time_varying_regression):\n if self.k_posdef > 0:\n selection = np.r_[\n [0] * (self._k_states_diff),\n [1] * (self._k_order > 0), [0] * (self._k_order - 1),\n [0] * ((1 - self.mle_regression) * self.k_exog)\n ][:, None]\n\n if len(selection) == 0:\n selection = np.zeros((self.k_states, self.k_posdef))\n else:\n selection = np.zeros((self.k_states, 0))\n else:\n selection = np.zeros((self.k_states, self.k_posdef))\n # Typical state variance\n if self._k_order > 0:\n selection[0, 0] = 1\n # Time-varying regression coefficient variances\n for i in range(self.k_exog, 0, -1):\n selection[-i, -i] = 1\n return selection\n\n @property\n def _res_classes(self):\n return {'fit': (SARIMAXResults, SARIMAXResultsWrapper)}\n\n @staticmethod\n def _conditional_sum_squares(endog, k_ar, polynomial_ar, k_ma,\n polynomial_ma, k_trend=0, trend_data=None):\n k = 2 * k_ma\n r = max(k + k_ma, k_ar)\n\n k_params_ar = 0 if k_ar == 0 else len(polynomial_ar.nonzero()[0]) - 1\n k_params_ma = 0 if k_ma == 0 else len(polynomial_ma.nonzero()[0]) - 1\n\n residuals = None\n if k_ar + k_ma + k_trend > 0:\n # If we have MA terms, get residuals from an AR(k) model to use\n # as data for conditional sum of squares estimates of the MA\n # parameters\n if k_ma > 0:\n Y = endog[k:]\n X = lagmat(endog, k, trim='both')\n params_ar = np.linalg.pinv(X).dot(Y)\n residuals = Y - np.dot(X, params_ar)\n\n # Run an ARMA(p,q) model using the just computed residuals as data\n Y = endog[r:]\n\n X = np.empty((Y.shape[0], 0))\n if k_trend > 0:\n if trend_data is None:\n raise ValueError('Trend data must be provided if'\n ' `k_trend` > 0.')\n X = np.c_[X, trend_data[:(-r if r > 0 else None), :]]\n if k_ar > 0:\n cols = polynomial_ar.nonzero()[0][1:] - 1\n X = np.c_[X, lagmat(endog, k_ar)[r:, cols]]\n if k_ma > 0:\n cols = polynomial_ma.nonzero()[0][1:] - 1\n X = np.c_[X, lagmat(residuals, k_ma)[r-k:, cols]]\n\n # Get the array of [ar_params, ma_params]\n params = np.linalg.pinv(X).dot(Y)\n residuals = Y - np.dot(X, params)\n\n # Default output\n params_trend = []\n params_ar = []\n params_ma = []\n params_variance = []\n\n # Get the params\n offset = 0\n if k_trend > 0:\n params_trend = params[offset:k_trend + offset]\n offset += k_trend\n if k_ar > 0:\n params_ar = params[offset:k_params_ar + offset]\n offset += k_params_ar\n if k_ma > 0:\n params_ma = params[offset:k_params_ma + offset]\n offset += k_params_ma\n if residuals is not None:\n params_variance = (residuals[k_params_ma:]**2).mean()\n\n return (params_trend, params_ar, params_ma,\n params_variance)\n\n @property\n def start_params(self):\n \"\"\"\n Starting parameters for maximum likelihood estimation\n \"\"\"\n\n # Perform differencing if necessary (i.e. if simple differencing is\n # false so that the state-space model will use the entire dataset)\n trend_data = self._trend_data\n if not self.simple_differencing and (\n self._k_diff > 0 or self._k_seasonal_diff > 0):\n endog = diff(self.endog, self._k_diff,\n self._k_seasonal_diff, self.seasonal_periods)\n if self.exog is not None:\n exog = diff(self.exog, self._k_diff,\n self._k_seasonal_diff, self.seasonal_periods)\n else:\n exog = None\n trend_data = trend_data[:endog.shape[0], :]\n else:\n endog = self.endog.copy()\n exog = self.exog.copy() if self.exog is not None else None\n endog = endog.squeeze()\n\n # Although the Kalman filter can deal with missing values in endog,\n # conditional sum of squares cannot\n if np.any(np.isnan(endog)):\n mask = ~np.isnan(endog).squeeze()\n endog = endog[mask]\n if exog is not None:\n exog = exog[mask]\n if trend_data is not None:\n trend_data = trend_data[mask]\n\n # Regression effects via OLS\n params_exog = []\n if self.k_exog > 0:\n params_exog = np.linalg.pinv(exog).dot(endog)\n endog = endog - np.dot(exog, params_exog)\n if self.state_regression:\n params_exog = []\n\n # Non-seasonal ARMA component and trend\n (params_trend, params_ar, params_ma,\n params_variance) = self._conditional_sum_squares(\n endog, self.k_ar, self.polynomial_ar, self.k_ma,\n self.polynomial_ma, self.k_trend, trend_data\n )\n\n # If we have estimated non-stationary start parameters but enforce\n # stationarity is on, raise an error\n invalid_ar = (\n self.k_ar > 0 and\n self.enforce_stationarity and\n not is_invertible(np.r_[1, -params_ar])\n )\n if invalid_ar:\n raise ValueError('Non-stationary starting autoregressive'\n ' parameters found with `enforce_stationarity`'\n ' set to True.')\n\n # If we have estimated non-invertible start parameters but enforce\n # invertibility is on, raise an error\n invalid_ma = (\n self.k_ma > 0 and\n self.enforce_invertibility and\n not is_invertible(np.r_[1, params_ma])\n )\n if invalid_ma:\n raise ValueError('non-invertible starting MA parameters found'\n ' with `enforce_invertibility` set to True.')\n\n # Seasonal Parameters\n _, params_seasonal_ar, params_seasonal_ma, params_seasonal_variance = (\n self._conditional_sum_squares(\n endog, self.k_seasonal_ar, self.polynomial_seasonal_ar,\n self.k_seasonal_ma, self.polynomial_seasonal_ma\n )\n )\n\n # If we have estimated non-stationary start parameters but enforce\n # stationarity is on, raise an error\n invalid_seasonal_ar = (\n self.k_seasonal_ar > 0 and\n self.enforce_stationarity and\n not is_invertible(np.r_[1, -params_seasonal_ar])\n )\n if invalid_seasonal_ar:\n raise ValueError('Non-stationary starting autoregressive'\n ' parameters found with `enforce_stationarity`'\n ' set to True.')\n\n # If we have estimated non-invertible start parameters but enforce\n # invertibility is on, raise an error\n invalid_seasonal_ma = (\n self.k_seasonal_ma > 0 and\n self.enforce_invertibility and\n not is_invertible(np.r_[1, params_seasonal_ma])\n )\n if invalid_seasonal_ma:\n raise ValueError('non-invertible starting seasonal moving average'\n ' parameters found with `enforce_invertibility`'\n ' set to True.')\n\n # Variances\n params_exog_variance = []\n if self.state_regression and self.time_varying_regression:\n # TODO how to set the initial variance parameters?\n params_exog_variance = [1] * self.k_exog\n if self.state_error and params_variance == []:\n if not params_seasonal_variance == []:\n params_variance = params_seasonal_variance\n elif self.k_exog > 0:\n params_variance = np.inner(endog, endog)\n else:\n params_variance = np.inner(endog, endog) / self.nobs\n params_measurement_variance = 1 if self.measurement_error else []\n\n # Combine all parameters\n return np.r_[\n params_trend,\n params_exog,\n params_ar,\n params_ma,\n params_seasonal_ar,\n params_seasonal_ma,\n params_exog_variance,\n params_measurement_variance,\n params_variance\n ]\n\n @property\n def endog_names(self, latex=False):\n \"\"\"Names of endogenous variables\"\"\"\n diff = ''\n if self.k_diff > 0:\n if self.k_diff == 1:\n diff = '\\Delta' if latex else 'D'\n else:\n diff = ('\\Delta^%d' if latex else 'D%d') % self.k_diff\n\n seasonal_diff = ''\n if self.k_seasonal_diff > 0:\n if self.k_seasonal_diff == 1:\n seasonal_diff = (('\\Delta_%d' if latex else 'DS%d') %\n (self.seasonal_periods))\n else:\n seasonal_diff = (('\\Delta_%d^%d' if latex else 'D%dS%d') %\n (self.k_seasonal_diff, self.seasonal_periods))\n endog_diff = self.simple_differencing\n if endog_diff and self.k_diff > 0 and self.k_seasonal_diff > 0:\n return (('%s%s %s' if latex else '%s.%s.%s') %\n (diff, seasonal_diff, self.data.ynames))\n elif endog_diff and self.k_diff > 0:\n return (('%s %s' if latex else '%s.%s') %\n (diff, self.data.ynames))\n elif endog_diff and self.k_seasonal_diff > 0:\n return (('%s %s' if latex else '%s.%s') %\n (seasonal_diff, self.data.ynames))\n else:\n return self.data.ynames\n\n params_complete = [\n 'trend', 'exog', 'ar', 'ma', 'seasonal_ar', 'seasonal_ma',\n 'exog_variance', 'measurement_variance', 'variance'\n ]\n\n @property\n def param_terms(self):\n \"\"\"\n List of parameters actually included in the model, in sorted order.\n\n TODO Make this an OrderedDict with slice or indices as the values.\n \"\"\"\n model_orders = self.model_orders\n # Get basic list from model orders\n params = [\n order for order in self.params_complete\n if model_orders[order] > 0\n ]\n # k_exog may be positive without associated parameters if it is in the\n # state vector\n if 'exog' in params and not self.mle_regression:\n params.remove('exog')\n\n return params\n\n @property\n def param_names(self):\n \"\"\"\n List of human readable parameter names (for parameters actually\n included in the model).\n \"\"\"\n params_sort_order = self.param_terms\n model_names = self.model_names\n return [\n name for param in params_sort_order for name in model_names[param]\n ]\n\n @property\n def model_orders(self):\n \"\"\"\n The orders of each of the polynomials in the model.\n \"\"\"\n return {\n 'trend': self.k_trend,\n 'exog': self.k_exog,\n 'ar': self.k_ar,\n 'ma': self.k_ma,\n 'seasonal_ar': self.k_seasonal_ar,\n 'seasonal_ma': self.k_seasonal_ma,\n 'reduced_ar': self.k_ar + self.k_seasonal_ar,\n 'reduced_ma': self.k_ma + self.k_seasonal_ma,\n 'exog_variance': self.k_exog if (\n self.state_regression and self.time_varying_regression) else 0,\n 'measurement_variance': int(self.measurement_error),\n 'variance': int(self.state_error),\n }\n\n @property\n def model_names(self):\n \"\"\"\n The plain text names of all possible model parameters.\n \"\"\"\n return self._get_model_names(latex=False)\n\n @property\n def model_latex_names(self):\n \"\"\"\n The latex names of all possible model parameters.\n \"\"\"\n return self._get_model_names(latex=True)\n\n def _get_model_names(self, latex=False):\n names = {\n 'trend': None,\n 'exog': None,\n 'ar': None,\n 'ma': None,\n 'seasonal_ar': None,\n 'seasonal_ma': None,\n 'reduced_ar': None,\n 'reduced_ma': None,\n 'exog_variance': None,\n 'measurement_variance': None,\n 'variance': None,\n }\n\n # Trend\n if self.k_trend > 0:\n trend_template = 't_%d' if latex else 'trend.%d'\n names['trend'] = []\n for i in self.polynomial_trend.nonzero()[0]:\n if i == 0:\n names['trend'].append('intercept')\n elif i == 1:\n names['trend'].append('drift')\n else:\n names['trend'].append(trend_template % i)\n\n # Exogenous coefficients\n if self.k_exog > 0:\n names['exog'] = self.exog_names\n\n # Autoregressive\n if self.k_ar > 0:\n ar_template = '$\\\\phi_%d$' if latex else 'ar.L%d'\n names['ar'] = []\n for i in self.polynomial_ar.nonzero()[0][1:]:\n names['ar'].append(ar_template % i)\n\n # Moving Average\n if self.k_ma > 0:\n ma_template = '$\\\\theta_%d$' if latex else 'ma.L%d'\n names['ma'] = []\n for i in self.polynomial_ma.nonzero()[0][1:]:\n names['ma'].append(ma_template % i)\n\n # Seasonal Autoregressive\n if self.k_seasonal_ar > 0:\n seasonal_ar_template = (\n '$\\\\tilde \\\\phi_%d$' if latex else 'ar.S.L%d'\n )\n names['seasonal_ar'] = []\n for i in self.polynomial_seasonal_ar.nonzero()[0][1:]:\n names['seasonal_ar'].append(seasonal_ar_template % i)\n\n # Seasonal Moving Average\n if self.k_seasonal_ma > 0:\n seasonal_ma_template = (\n '$\\\\tilde \\\\theta_%d$' if latex else 'ma.S.L%d'\n )\n names['seasonal_ma'] = []\n for i in self.polynomial_seasonal_ma.nonzero()[0][1:]:\n names['seasonal_ma'].append(seasonal_ma_template % i)\n\n # Reduced Form Autoregressive\n if self.k_ar > 0 or self.k_seasonal_ar > 0:\n reduced_polynomial_ar = reduced_polynomial_ar = -np.polymul(\n self.polynomial_ar, self.polynomial_seasonal_ar\n )\n ar_template = '$\\\\Phi_%d$' if latex else 'ar.R.L%d'\n names['reduced_ar'] = []\n for i in reduced_polynomial_ar.nonzero()[0][1:]:\n names['reduced_ar'].append(ar_template % i)\n\n # Reduced Form Moving Average\n if self.k_ma > 0 or self.k_seasonal_ma > 0:\n reduced_polynomial_ma = np.polymul(\n self.polynomial_ma, self.polynomial_seasonal_ma\n )\n ma_template = '$\\\\Theta_%d$' if latex else 'ma.R.L%d'\n names['reduced_ma'] = []\n for i in reduced_polynomial_ma.nonzero()[0][1:]:\n names['reduced_ma'].append(ma_template % i)\n\n # Exogenous variances\n if self.state_regression and self.time_varying_regression:\n exog_var_template = '$\\\\sigma_\\\\text{%s}^2$' if latex else 'var.%s'\n names['exog_variance'] = [\n exog_var_template % exog_name for exog_name in self.exog_names\n ]\n\n # Measurement error variance\n if self.measurement_error:\n meas_var_tpl = (\n '$\\\\sigma_\\\\eta^2$' if latex else 'var.measurement_error'\n )\n names['measurement_variance'] = [meas_var_tpl]\n\n # State variance\n if self.state_error:\n var_tpl = '$\\\\sigma_\\\\zeta^2$' if latex else 'sigma2'\n names['variance'] = [var_tpl]\n\n return names\n\n def transform_params(self, unconstrained):\n \"\"\"\n Transform unconstrained parameters used by the optimizer to constrained\n parameters used in likelihood evaluation.\n\n Used primarily to enforce stationarity of the autoregressive lag\n polynomial, invertibility of the moving average lag polynomial, and\n positive variance parameters.\n\n Parameters\n ----------\n unconstrained : array_like\n Unconstrained parameters used by the optimizer.\n\n Returns\n -------\n constrained : array_like\n Constrained parameters used in likelihood evaluation.\n\n Notes\n -----\n If the lag polynomial has non-consecutive powers (so that the\n coefficient is zero on some element of the polynomial), then the\n constraint function is not onto the entire space of invertible\n polynomials, although it only excludes a very small portion very close\n to the invertibility boundary.\n \"\"\"\n unconstrained = np.array(unconstrained, ndmin=1)\n constrained = np.zeros(unconstrained.shape, unconstrained.dtype)\n\n start = end = 0\n\n # Retain the trend parameters\n if self.k_trend > 0:\n end += self.k_trend\n constrained[start:end] = unconstrained[start:end]\n start += self.k_trend\n\n # Retain any MLE regression coefficients\n if self.mle_regression:\n end += self.k_exog\n constrained[start:end] = unconstrained[start:end]\n start += self.k_exog\n\n # Transform the AR parameters (phi) to be stationary\n if self.k_ar_params > 0:\n end += self.k_ar_params\n if self.enforce_stationarity:\n constrained[start:end] = (\n constrain_stationary_univariate(unconstrained[start:end])\n )\n else:\n constrained[start:end] = unconstrained[start:end]\n start += self.k_ar_params\n\n # Transform the MA parameters (theta) to be invertible\n if self.k_ma_params > 0:\n end += self.k_ma_params\n if self.enforce_invertibility:\n constrained[start:end] = (\n -constrain_stationary_univariate(unconstrained[start:end])\n )\n else:\n constrained[start:end] = unconstrained[start:end]\n start += self.k_ma_params\n\n # Transform the seasonal AR parameters (\\tilde phi) to be stationary\n if self.k_seasonal_ar > 0:\n end += self.k_seasonal_ar_params\n if self.enforce_stationarity:\n constrained[start:end] = (\n constrain_stationary_univariate(unconstrained[start:end])\n )\n else:\n constrained[start:end] = unconstrained[start:end]\n start += self.k_seasonal_ar_params\n\n # Transform the seasonal MA parameters (\\tilde theta) to be invertible\n if self.k_seasonal_ma_params > 0:\n end += self.k_seasonal_ma_params\n if self.enforce_invertibility:\n constrained[start:end] = (\n -constrain_stationary_univariate(unconstrained[start:end])\n )\n else:\n constrained[start:end] = unconstrained[start:end]\n start += self.k_seasonal_ma_params\n\n # Transform the standard deviation parameters to be positive\n if self.state_regression and self.time_varying_regression:\n end += self.k_exog\n constrained[start:end] = unconstrained[start:end]**2\n start += self.k_exog\n if self.measurement_error:\n constrained[start] = unconstrained[start]**2\n start += 1\n end += 1\n if self.state_error:\n constrained[start] = unconstrained[start]**2\n # start += 1\n # end += 1\n\n return constrained\n\n def untransform_params(self, constrained):\n \"\"\"\n Transform constrained parameters used in likelihood evaluation\n to unconstrained parameters used by the optimizer\n\n Used primarily to reverse enforcement of stationarity of the\n autoregressive lag polynomial and invertibility of the moving average\n lag polynomial.\n\n Parameters\n ----------\n constrained : array_like\n Constrained parameters used in likelihood evaluation.\n\n Returns\n -------\n constrained : array_like\n Unconstrained parameters used by the optimizer.\n\n Notes\n -----\n If the lag polynomial has non-consecutive powers (so that the\n coefficient is zero on some element of the polynomial), then the\n constraint function is not onto the entire space of invertible\n polynomials, although it only excludes a very small portion very close\n to the invertibility boundary.\n \"\"\"\n constrained = np.array(constrained, ndmin=1)\n unconstrained = np.zeros(constrained.shape, constrained.dtype)\n\n start = end = 0\n\n # Retain the trend parameters\n if self.k_trend > 0:\n end += self.k_trend\n unconstrained[start:end] = constrained[start:end]\n start += self.k_trend\n\n # Retain any MLE regression coefficients\n if self.mle_regression:\n end += self.k_exog\n unconstrained[start:end] = constrained[start:end]\n start += self.k_exog\n\n # Transform the AR parameters (phi) to be stationary\n if self.k_ar_params > 0:\n end += self.k_ar_params\n if self.enforce_stationarity:\n unconstrained[start:end] = (\n unconstrain_stationary_univariate(constrained[start:end])\n )\n else:\n unconstrained[start:end] = constrained[start:end]\n start += self.k_ar_params\n\n # Transform the MA parameters (theta) to be invertible\n if self.k_ma_params > 0:\n end += self.k_ma_params\n if self.enforce_invertibility:\n unconstrained[start:end] = (\n unconstrain_stationary_univariate(-constrained[start:end])\n )\n else:\n unconstrained[start:end] = constrained[start:end]\n start += self.k_ma_params\n\n # Transform the seasonal AR parameters (\\tilde phi) to be stationary\n if self.k_seasonal_ar > 0:\n end += self.k_seasonal_ar_params\n if self.enforce_stationarity:\n unconstrained[start:end] = (\n unconstrain_stationary_univariate(constrained[start:end])\n )\n else:\n unconstrained[start:end] = constrained[start:end]\n start += self.k_seasonal_ar_params\n\n # Transform the seasonal MA parameters (\\tilde theta) to be invertible\n if self.k_seasonal_ma_params > 0:\n end += self.k_seasonal_ma_params\n if self.enforce_invertibility:\n unconstrained[start:end] = (\n unconstrain_stationary_univariate(-constrained[start:end])\n )\n else:\n unconstrained[start:end] = constrained[start:end]\n start += self.k_seasonal_ma_params\n\n # Untransform the standard deviation\n if self.state_regression and self.time_varying_regression:\n end += self.k_exog\n unconstrained[start:end] = constrained[start:end]**0.5\n start += self.k_exog\n if self.measurement_error:\n unconstrained[start] = constrained[start]**0.5\n start += 1\n end += 1\n if self.state_error:\n unconstrained[start] = constrained[start]**0.5\n # start += 1\n # end += 1\n\n return unconstrained\n\n def update(self, params, transformed=True, complex_step=False):\n \"\"\"\n Update the parameters of the model\n\n Updates the representation matrices to fill in the new parameter\n values.\n\n Parameters\n ----------\n params : array_like\n Array of new parameters.\n transformed : boolean, optional\n Whether or not `params` is already transformed. If set to False,\n `transform_params` is called. Default is True..\n\n Returns\n -------\n params : array_like\n Array of parameters.\n \"\"\"\n params = super(SARIMAX, self).update(params, transformed=transformed,\n complex_step=False)\n\n params_trend = None\n params_exog = None\n params_ar = None\n params_ma = None\n params_seasonal_ar = None\n params_seasonal_ma = None\n params_exog_variance = None\n params_measurement_variance = None\n params_variance = None\n\n # Extract the parameters\n start = end = 0\n end += self.k_trend\n params_trend = params[start:end]\n start += self.k_trend\n if self.mle_regression:\n end += self.k_exog\n params_exog = params[start:end]\n start += self.k_exog\n end += self.k_ar_params\n params_ar = params[start:end]\n start += self.k_ar_params\n end += self.k_ma_params\n params_ma = params[start:end]\n start += self.k_ma_params\n end += self.k_seasonal_ar_params\n params_seasonal_ar = params[start:end]\n start += self.k_seasonal_ar_params\n end += self.k_seasonal_ma_params\n params_seasonal_ma = params[start:end]\n start += self.k_seasonal_ma_params\n if self.state_regression and self.time_varying_regression:\n end += self.k_exog\n params_exog_variance = params[start:end]\n start += self.k_exog\n if self.measurement_error:\n params_measurement_variance = params[start]\n start += 1\n end += 1\n if self.state_error:\n params_variance = params[start]\n # start += 1\n # end += 1\n\n # Update lag polynomials\n if self.k_ar > 0:\n if self.polynomial_ar.dtype == params.dtype:\n self.polynomial_ar[self._polynomial_ar_idx] = -params_ar\n else:\n polynomial_ar = self.polynomial_ar.real.astype(params.dtype)\n polynomial_ar[self._polynomial_ar_idx] = -params_ar\n self.polynomial_ar = polynomial_ar\n\n if self.k_ma > 0:\n if self.polynomial_ma.dtype == params.dtype:\n self.polynomial_ma[self._polynomial_ma_idx] = params_ma\n else:\n polynomial_ma = self.polynomial_ma.real.astype(params.dtype)\n polynomial_ma[self._polynomial_ma_idx] = params_ma\n self.polynomial_ma = polynomial_ma\n\n if self.k_seasonal_ar > 0:\n idx = self._polynomial_seasonal_ar_idx\n if self.polynomial_seasonal_ar.dtype == params.dtype:\n self.polynomial_seasonal_ar[idx] = -params_seasonal_ar\n else:\n polynomial_seasonal_ar = (\n self.polynomial_seasonal_ar.real.astype(params.dtype)\n )\n polynomial_seasonal_ar[idx] = -params_seasonal_ar\n self.polynomial_seasonal_ar = polynomial_seasonal_ar\n\n if self.k_seasonal_ma > 0:\n idx = self._polynomial_seasonal_ma_idx\n if self.polynomial_seasonal_ma.dtype == params.dtype:\n self.polynomial_seasonal_ma[idx] = params_seasonal_ma\n else:\n polynomial_seasonal_ma = (\n self.polynomial_seasonal_ma.real.astype(params.dtype)\n )\n polynomial_seasonal_ma[idx] = params_seasonal_ma\n self.polynomial_seasonal_ma = polynomial_seasonal_ma\n\n # Get the reduced form lag polynomial terms by multiplying the regular\n # and seasonal lag polynomials\n # Note: that although the numpy np.polymul examples assume that they\n # are ordered from highest degree to lowest, whereas our are from\n # lowest to highest, it does not matter.\n if self.k_seasonal_ar > 0:\n reduced_polynomial_ar = -np.polymul(\n self.polynomial_ar, self.polynomial_seasonal_ar\n )\n else:\n reduced_polynomial_ar = -self.polynomial_ar\n if self.k_seasonal_ma > 0:\n reduced_polynomial_ma = np.polymul(\n self.polynomial_ma, self.polynomial_seasonal_ma\n )\n else:\n reduced_polynomial_ma = self.polynomial_ma\n\n # Observation intercept\n # Exogenous data with MLE estimation of parameters enters through a\n # time-varying observation intercept (is equivalent to simply\n # subtracting it out of the endogenous variable first)\n if self.mle_regression:\n self.ssm['obs_intercept'] = np.dot(self.exog, params_exog)[None, :]\n\n # State intercept (Harvey) or additional observation intercept\n # (Hamilton)\n # SARIMA trend enters through the a time-varying state intercept,\n # associated with the first row of the stationary component of the\n # state vector (i.e. the first element of the state vector following\n # any differencing elements)\n if self.k_trend > 0:\n data = np.dot(self._trend_data, params_trend).astype(params.dtype)\n if not self.hamilton_representation:\n self.ssm['state_intercept', self._k_states_diff, :] = data\n else:\n # The way the trend enters in the Hamilton representation means\n # that the parameter is not an ``intercept'' but instead the\n # mean of the process. The trend values in `data` are meant for\n # an intercept, and so must be transformed to represent the\n # mean instead\n if self.hamilton_representation:\n data /= np.sum(-reduced_polynomial_ar)\n\n # If we already set the observation intercept for MLE\n # regression, just add to it\n if self.mle_regression:\n self.ssm.obs_intercept += data[None, :]\n # Otherwise set it directly\n else:\n self.ssm['obs_intercept'] = data[None, :]\n\n # Observation covariance matrix\n if self.measurement_error:\n self.ssm['obs_cov', 0, 0] = params_measurement_variance\n\n # Transition matrix\n if self.k_ar > 0 or self.k_seasonal_ar > 0:\n self.ssm[self.transition_ar_params_idx] = reduced_polynomial_ar[1:]\n elif not self.ssm.transition.dtype == params.dtype:\n # This is required if the transition matrix is not really in use\n # (e.g. for an MA(q) process) so that it's dtype never changes as\n # the parameters' dtype changes. This changes the dtype manually.\n self.ssm['transition'] = self.ssm['transition'].real.astype(\n params.dtype)\n\n # Selection matrix (Harvey) or Design matrix (Hamilton)\n if self.k_ma > 0 or self.k_seasonal_ma > 0:\n if not self.hamilton_representation:\n self.ssm[self.selection_ma_params_idx] = (\n reduced_polynomial_ma[1:]\n )\n else:\n self.ssm[self.design_ma_params_idx] = reduced_polynomial_ma[1:]\n\n # State covariance matrix\n if self.k_posdef > 0:\n self.ssm['state_cov', 0, 0] = params_variance\n if self.state_regression and self.time_varying_regression:\n self.ssm[self._exog_variance_idx] = params_exog_variance\n\n # Initialize\n if not self._manual_initialization:\n self.initialize_state(complex_step=complex_step)\n\n return params\n\n\nclass SARIMAXResults(MLEResults):\n \"\"\"\n Class to hold results from fitting an SARIMAX model.\n\n Parameters\n ----------\n model : SARIMAX instance\n The fitted model instance\n\n Attributes\n ----------\n specification : dictionary\n Dictionary including all attributes from the SARIMAX model instance.\n polynomial_ar : array\n Array containing autoregressive lag polynomial coefficients,\n ordered from lowest degree to highest. Initialized with ones, unless\n a coefficient is constrained to be zero (in which case it is zero).\n polynomial_ma : array\n Array containing moving average lag polynomial coefficients,\n ordered from lowest degree to highest. Initialized with ones, unless\n a coefficient is constrained to be zero (in which case it is zero).\n polynomial_seasonal_ar : array\n Array containing seasonal autoregressive lag polynomial coefficients,\n ordered from lowest degree to highest. Initialized with ones, unless\n a coefficient is constrained to be zero (in which case it is zero).\n polynomial_seasonal_ma : array\n Array containing seasonal moving average lag polynomial coefficients,\n ordered from lowest degree to highest. Initialized with ones, unless\n a coefficient is constrained to be zero (in which case it is zero).\n polynomial_trend : array\n Array containing trend polynomial coefficients, ordered from lowest\n degree to highest. Initialized with ones, unless a coefficient is\n constrained to be zero (in which case it is zero).\n model_orders : list of int\n The orders of each of the polynomials in the model.\n param_terms : list of str\n List of parameters actually included in the model, in sorted order.\n\n See Also\n --------\n statsmodels.tsa.statespace.kalman_filter.FilterResults\n statsmodels.tsa.statespace.mlemodel.MLEResults\n \"\"\"\n def __init__(self, model, params, filter_results, cov_type='opg',\n **kwargs):\n super(SARIMAXResults, self).__init__(model, params, filter_results,\n cov_type, **kwargs)\n\n self.df_resid = np.inf # attribute required for wald tests\n\n # Save _init_kwds\n self._init_kwds = self.model._get_init_kwds()\n\n # Save model specification\n self.specification = Bunch(**{\n # Set additional model parameters\n 'seasonal_periods': self.model.seasonal_periods,\n 'measurement_error': self.model.measurement_error,\n 'time_varying_regression': self.model.time_varying_regression,\n 'simple_differencing': self.model.simple_differencing,\n 'enforce_stationarity': self.model.enforce_stationarity,\n 'enforce_invertibility': self.model.enforce_invertibility,\n 'hamilton_representation': self.model.hamilton_representation,\n\n 'order': self.model.order,\n 'seasonal_order': self.model.seasonal_order,\n\n # Model order\n 'k_diff': self.model.k_diff,\n 'k_seasonal_diff': self.model.k_seasonal_diff,\n 'k_ar': self.model.k_ar,\n 'k_ma': self.model.k_ma,\n 'k_seasonal_ar': self.model.k_seasonal_ar,\n 'k_seasonal_ma': self.model.k_seasonal_ma,\n\n # Param Numbers\n 'k_ar_params': self.model.k_ar_params,\n 'k_ma_params': self.model.k_ma_params,\n\n # Trend / Regression\n 'trend': self.model.trend,\n 'k_trend': self.model.k_trend,\n 'k_exog': self.model.k_exog,\n\n 'mle_regression': self.model.mle_regression,\n 'state_regression': self.model.state_regression,\n })\n\n # Polynomials\n self.polynomial_trend = self.model.polynomial_trend\n self.polynomial_ar = self.model.polynomial_ar\n self.polynomial_ma = self.model.polynomial_ma\n self.polynomial_seasonal_ar = self.model.polynomial_seasonal_ar\n self.polynomial_seasonal_ma = self.model.polynomial_seasonal_ma\n self.polynomial_reduced_ar = np.polymul(\n self.polynomial_ar, self.polynomial_seasonal_ar\n )\n self.polynomial_reduced_ma = np.polymul(\n self.polynomial_ma, self.polynomial_seasonal_ma\n )\n\n # Distinguish parameters\n self.model_orders = self.model.model_orders\n self.param_terms = self.model.param_terms\n start = end = 0\n for name in self.param_terms:\n if name == 'ar':\n k = self.model.k_ar_params\n elif name == 'ma':\n k = self.model.k_ma_params\n elif name == 'seasonal_ar':\n k = self.model.k_seasonal_ar_params\n elif name == 'seasonal_ma':\n k = self.model.k_seasonal_ma_params\n else:\n k = self.model_orders[name]\n end += k\n setattr(self, '_params_%s' % name, self.params[start:end])\n start += k\n\n # Handle removing data\n self._data_attr_model.extend(['orig_endog', 'orig_exog'])\n\n @cache_readonly\n def arroots(self):\n \"\"\"\n (array) Roots of the reduced form autoregressive lag polynomial\n \"\"\"\n return np.roots(self.polynomial_reduced_ar)**-1\n\n @cache_readonly\n def maroots(self):\n \"\"\"\n (array) Roots of the reduced form moving average lag polynomial\n \"\"\"\n return np.roots(self.polynomial_reduced_ma)**-1\n\n @cache_readonly\n def arfreq(self):\n \"\"\"\n (array) Frequency of the roots of the reduced form autoregressive\n lag polynomial\n \"\"\"\n z = self.arroots\n if not z.size:\n return\n return np.arctan2(z.imag, z.real) / (2 * np.pi)\n\n @cache_readonly\n def mafreq(self):\n \"\"\"\n (array) Frequency of the roots of the reduced form moving average\n lag polynomial\n \"\"\"\n z = self.maroots\n if not z.size:\n return\n return np.arctan2(z.imag, z.real) / (2 * np.pi)\n\n @cache_readonly\n def arparams(self):\n \"\"\"\n (array) Autoregressive parameters actually estimated in the model.\n Does not include seasonal autoregressive parameters (see\n `seasonalarparams`) or parameters whose values are constrained to be\n zero.\n \"\"\"\n return self._params_ar\n\n @cache_readonly\n def seasonalarparams(self):\n \"\"\"\n (array) Seasonal autoregressive parameters actually estimated in the\n model. Does not include nonseasonal autoregressive parameters (see\n `arparams`) or parameters whose values are constrained to be zero.\n \"\"\"\n return self._params_seasonal_ar\n\n @cache_readonly\n def maparams(self):\n \"\"\"\n (array) Moving average parameters actually estimated in the model.\n Does not include seasonal moving average parameters (see\n `seasonalmaparams`) or parameters whose values are constrained to be\n zero.\n \"\"\"\n return self._params_ma\n\n @cache_readonly\n def seasonalmaparams(self):\n \"\"\"\n (array) Seasonal moving average parameters actually estimated in the\n model. Does not include nonseasonal moving average parameters (see\n `maparams`) or parameters whose values are constrained to be zero.\n \"\"\"\n return self._params_seasonal_ma\n\n def get_prediction(self, start=None, end=None, dynamic=False, index=None,\n exog=None, **kwargs):\n \"\"\"\n In-sample prediction and out-of-sample forecasting\n\n Parameters\n ----------\n start : int, str, or datetime, optional\n Zero-indexed observation number at which to start forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type. Default is the the zeroth observation.\n end : int, str, or datetime, optional\n Zero-indexed observation number at which to end forecasting, ie.,\n the first forecast is start. Can also be a date string to\n parse or a datetime type. However, if the dates index does not\n have a fixed frequency, end must be an integer index if you\n want out of sample prediction. Default is the last observation in\n the sample.\n exog : array_like, optional\n If the model includes exogenous regressors, you must provide\n exactly enough out-of-sample values for the exogenous variables if\n end is beyond the last observation in the sample.\n dynamic : boolean, int, str, or datetime, optional\n Integer offset relative to `start` at which to begin dynamic\n prediction. Can also be an absolute date string to parse or a\n datetime type (these are not interpreted as offsets).\n Prior to this observation, true endogenous values will be used for\n prediction; starting with this observation and continuing through\n the end of prediction, forecasted endogenous values will be used\n instead.\n full_results : boolean, optional\n If True, returns a FilterResults instance; if False returns a\n tuple with forecasts, the forecast errors, and the forecast error\n covariance matrices. Default is False.\n **kwargs\n Additional arguments may required for forecasting beyond the end\n of the sample. See `FilterResults.predict` for more details.\n\n Returns\n -------\n forecast : array\n Array of out of sample forecasts.\n \"\"\"\n if start is None:\n start = self.model._index[0]\n\n # Handle start, end, dynamic\n _start, _end, _out_of_sample, prediction_index = (\n self.model._get_prediction_index(start, end, index, silent=True))\n\n # Handle exogenous parameters\n if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):\n # Create a new faux SARIMAX model for the extended dataset\n nobs = self.model.data.orig_endog.shape[0] + _out_of_sample\n endog = np.zeros((nobs, self.model.k_endog))\n\n if self.model.k_exog > 0:\n if exog is None:\n raise ValueError('Out-of-sample forecasting in a model'\n ' with a regression component requires'\n ' additional exogenous values via the'\n ' `exog` argument.')\n exog = np.array(exog)\n required_exog_shape = (_out_of_sample, self.model.k_exog)\n if not exog.shape == required_exog_shape:\n raise ValueError('Provided exogenous values are not of the'\n ' appropriate shape. Required %s, got %s.'\n % (str(required_exog_shape),\n str(exog.shape)))\n exog = np.c_[self.model.data.orig_exog.T, exog.T].T\n\n model_kwargs = self._init_kwds.copy()\n model_kwargs['exog'] = exog\n model = SARIMAX(endog, **model_kwargs)\n model.update(self.params)\n\n # Set the kwargs with the update time-varying state space\n # representation matrices\n for name in self.filter_results.shapes.keys():\n if name == 'obs':\n continue\n mat = getattr(model.ssm, name)\n if mat.shape[-1] > 1:\n if len(mat.shape) == 2:\n kwargs[name] = mat[:, -_out_of_sample:]\n else:\n kwargs[name] = mat[:, :, -_out_of_sample:]\n elif self.model.k_exog == 0 and exog is not None:\n warn('Exogenous array provided to predict, but additional data not'\n ' required. `exog` argument ignored.', ValueWarning)\n\n return super(SARIMAXResults, self).get_prediction(\n start=start, end=end, dynamic=dynamic, index=index, exog=exog,\n **kwargs)\n\n def summary(self, alpha=.05, start=None):\n # Create the model name\n\n # See if we have an ARIMA component\n order = ''\n if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:\n if self.model.k_ar == self.model.k_ar_params:\n order_ar = self.model.k_ar\n else:\n order_ar = tuple(self.polynomial_ar.nonzero()[0][1:])\n if self.model.k_ma == self.model.k_ma_params:\n order_ma = self.model.k_ma\n else:\n order_ma = tuple(self.polynomial_ma.nonzero()[0][1:])\n # If there is simple differencing, then that is reflected in the\n # dependent variable name\n k_diff = 0 if self.model.simple_differencing else self.model.k_diff\n order = '(%s, %d, %s)' % (order_ar, k_diff, order_ma)\n # See if we have an SARIMA component\n seasonal_order = ''\n has_seasonal = (\n self.model.k_seasonal_ar +\n self.model.k_seasonal_diff +\n self.model.k_seasonal_ma\n ) > 0\n if has_seasonal:\n if self.model.k_ar == self.model.k_ar_params:\n order_seasonal_ar = (\n int(self.model.k_seasonal_ar / self.model.seasonal_periods)\n )\n else:\n order_seasonal_ar = (\n tuple(self.polynomial_seasonal_ar.nonzero()[0][1:])\n )\n if self.model.k_ma == self.model.k_ma_params:\n order_seasonal_ma = (\n int(self.model.k_seasonal_ma / self.model.seasonal_periods)\n )\n else:\n order_seasonal_ma = (\n tuple(self.polynomial_seasonal_ma.nonzero()[0][1:])\n )\n # If there is simple differencing, then that is reflected in the\n # dependent variable name\n k_seasonal_diff = self.model.k_seasonal_diff\n if self.model.simple_differencing:\n k_seasonal_diff = 0\n seasonal_order = ('(%s, %d, %s, %d)' %\n (str(order_seasonal_ar), k_seasonal_diff,\n str(order_seasonal_ma),\n self.model.seasonal_periods))\n if not order == '':\n order += 'x'\n model_name = (\n '%s%s%s' % (self.model.__class__.__name__, order, seasonal_order)\n )\n return super(SARIMAXResults, self).summary(\n alpha=alpha, start=start, model_name=model_name\n )\n summary.__doc__ = MLEResults.summary.__doc__\n\n\nclass SARIMAXResultsWrapper(MLEResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {}\n _wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(SARIMAXResultsWrapper, SARIMAXResults)\n", "# -*- coding: utf-8 -*-\n\"\"\"Tests for Regression Diagnostics and Specification Tests\n\nCreated on Thu Feb 09 13:19:47 2012\n\nAuthor: Josef Perktold\nLicense: BSD-3\n\ncurrently all tests are against R\n\n\"\"\"\n#import warnings\n#warnings.simplefilter(\"default\")\n# ResourceWarning doesn't exist in python 2\n#warnings.simplefilter(\"ignore\", ResourceWarning)\nimport os\n\nimport numpy as np\nimport pandas as pd\n\n# skipping some parts\nfrom distutils.version import LooseVersion\nPD_GE_17 = LooseVersion(pd.__version__) >= '0.17'\n\nfrom numpy.testing import (assert_, assert_almost_equal, assert_equal,\n assert_approx_equal, assert_allclose,\n assert_array_equal)\nimport pytest\n\nfrom statsmodels.regression.linear_model import OLS, GLSAR\nfrom statsmodels.tools.tools import add_constant\nfrom statsmodels.datasets import macrodata\n\nimport statsmodels.stats.sandwich_covariance as sw\nimport statsmodels.stats.diagnostic as smsdia\nimport json\n\n#import statsmodels.sandbox.stats.diagnostic as smsdia\nimport statsmodels.stats.outliers_influence as oi\n\ncur_dir = os.path.abspath(os.path.dirname(__file__))\n\ndef compare_t_est(sp, sp_dict, decimal=(14, 14)):\n assert_almost_equal(sp[0], sp_dict['statistic'], decimal=decimal[0])\n assert_almost_equal(sp[1], sp_dict['pvalue'], decimal=decimal[1])\n\n\ndef notyet_atst():\n d = macrodata.load().data\n\n realinv = d['realinv']\n realgdp = d['realgdp']\n realint = d['realint']\n endog = realinv\n exog = add_constant(np.c_[realgdp, realint])\n res_ols1 = OLS(endog, exog).fit()\n\n #growth rates\n gs_l_realinv = 400 * np.diff(np.log(d['realinv']))\n gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))\n lint = d['realint'][:-1]\n tbilrate = d['tbilrate'][:-1]\n\n endogg = gs_l_realinv\n exogg = add_constant(np.c_[gs_l_realgdp, lint])\n exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])\n\n res_ols = OLS(endogg, exogg).fit()\n res_ols2 = OLS(endogg, exogg2).fit()\n\n #the following were done accidentally with res_ols1 in R,\n #with original Greene data\n\n params = np.array([-272.3986041341653, 0.1779455206941112,\n 0.2149432424658157])\n cov_hac_4 = np.array([1321.569466333051, -0.2318836566017612,\n 37.01280466875694, -0.2318836566017614, 4.602339488102263e-05,\n -0.0104687835998635, 37.012804668757, -0.0104687835998635,\n 21.16037144168061]).reshape(3,3, order='F')\n cov_hac_10 = np.array([2027.356101193361, -0.3507514463299015,\n 54.81079621448568, -0.350751446329901, 6.953380432635583e-05,\n -0.01268990195095196, 54.81079621448564, -0.01268990195095195,\n 22.92512402151113]).reshape(3,3, order='F')\n\n #goldfeld-quandt\n het_gq_greater = dict(statistic=13.20512768685082, df1=99, df2=98,\n pvalue=1.246141976112324e-30, distr='f')\n het_gq_less = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.)\n het_gq_2sided = dict(statistic=13.20512768685082, df1=99, df2=98,\n pvalue=1.246141976112324e-30, distr='f')\n\n #goldfeld-quandt, fraction = 0.5\n het_gq_greater_2 = dict(statistic=87.1328934692124, df1=48, df2=47,\n pvalue=2.154956842194898e-33, distr='f')\n\n gq = smsdia.het_goldfeldquandt(endog, exog, split=0.5)\n compare_t_est(gq, het_gq_greater, decimal=(13, 14))\n assert_equal(gq[-1], 'increasing')\n\n\n harvey_collier = dict(stat=2.28042114041313, df=199,\n pvalue=0.02364236161988260, distr='t')\n #hc = harvtest(fm, order.by=ggdp , data = list())\n harvey_collier_2 = dict(stat=0.7516918462158783, df=199,\n pvalue=0.4531244858006127, distr='t')\n\n\n\n ##################################\n\n\n\nclass TestDiagnosticG(object):\n\n @classmethod\n def setup_class(cls):\n d = macrodata.load().data\n #growth rates\n gs_l_realinv = 400 * np.diff(np.log(d['realinv']))\n gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))\n lint = d['realint'][:-1]\n tbilrate = d['tbilrate'][:-1]\n\n endogg = gs_l_realinv\n exogg = add_constant(np.c_[gs_l_realgdp, lint])\n exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])\n exogg3 = add_constant(np.c_[gs_l_realgdp])\n\n res_ols = OLS(endogg, exogg).fit()\n res_ols2 = OLS(endogg, exogg2).fit()\n\n res_ols3 = OLS(endogg, exogg3).fit()\n\n cls.res = res_ols\n cls.res2 = res_ols2\n cls.res3 = res_ols3\n cls.endog = cls.res.model.endog\n cls.exog = cls.res.model.exog\n\n def test_basic(self):\n #mainly to check I got the right regression\n #> mkarray(fm$coefficients, \"params\")\n params = np.array([-9.48167277465485, 4.3742216647032,\n -0.613996969478989])\n\n assert_almost_equal(self.res.params, params, decimal=12)\n\n def test_hac(self):\n res = self.res\n #> nw = NeweyWest(fm, lag = 4, prewhite = FALSE, verbose=TRUE)\n #> nw2 = NeweyWest(fm, lag=10, prewhite = FALSE, verbose=TRUE)\n\n #> mkarray(nw, \"cov_hac_4\")\n cov_hac_4 = np.array([1.385551290884014, -0.3133096102522685,\n -0.0597207976835705, -0.3133096102522685, 0.1081011690351306,\n 0.000389440793564336, -0.0597207976835705, 0.000389440793564339,\n 0.0862118527405036]).reshape(3,3, order='F')\n\n #> mkarray(nw2, \"cov_hac_10\")\n cov_hac_10 = np.array([1.257386180080192, -0.2871560199899846,\n -0.03958300024627573, -0.2871560199899845, 0.1049107028987101,\n 0.0003896205316866944, -0.03958300024627578, 0.0003896205316866961,\n 0.0985539340694839]).reshape(3,3, order='F')\n\n cov = sw.cov_hac_simple(res, nlags=4, use_correction=False)\n bse_hac = sw.se_cov(cov)\n assert_almost_equal(cov, cov_hac_4, decimal=14)\n assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)\n\n cov = sw.cov_hac_simple(res, nlags=10, use_correction=False)\n bse_hac = sw.se_cov(cov)\n assert_almost_equal(cov, cov_hac_10, decimal=14)\n assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)\n\n\n def test_het_goldfeldquandt(self):\n #TODO: test options missing\n\n #> gq = gqtest(fm, alternative='greater')\n #> mkhtest_f(gq, 'het_gq_greater', 'f')\n het_gq_greater = dict(statistic=0.5313259064778423,\n pvalue=0.9990217851193723,\n parameters=(98, 98), distr='f')\n\n #> gq = gqtest(fm, alternative='less')\n #> mkhtest_f(gq, 'het_gq_less', 'f')\n het_gq_less = dict(statistic=0.5313259064778423,\n pvalue=0.000978214880627621,\n parameters=(98, 98), distr='f')\n\n #> gq = gqtest(fm, alternative='two.sided')\n #> mkhtest_f(gq, 'het_gq_two_sided', 'f')\n het_gq_two_sided = dict(statistic=0.5313259064778423,\n pvalue=0.001956429761255241,\n parameters=(98, 98), distr='f')\n\n\n #> gq = gqtest(fm, fraction=0.1, alternative='two.sided')\n #> mkhtest_f(gq, 'het_gq_two_sided_01', 'f')\n het_gq_two_sided_01 = dict(statistic=0.5006976835928314,\n pvalue=0.001387126702579789,\n parameters=(88, 87), distr='f')\n\n #> gq = gqtest(fm, fraction=0.5, alternative='two.sided')\n #> mkhtest_f(gq, 'het_gq_two_sided_05', 'f')\n het_gq_two_sided_05 = dict(statistic=0.434815645134117,\n pvalue=0.004799321242905568,\n parameters=(48, 47), distr='f')\n\n endogg, exogg = self.endog, self.exog\n #tests\n gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5)\n compare_t_est(gq, het_gq_greater, decimal=(14, 14))\n assert_equal(gq[-1], 'increasing')\n\n gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,\n alternative='decreasing')\n compare_t_est(gq, het_gq_less, decimal=(14, 14))\n assert_equal(gq[-1], 'decreasing')\n\n gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,\n alternative='two-sided')\n compare_t_est(gq, het_gq_two_sided, decimal=(14, 14))\n assert_equal(gq[-1], 'two-sided')\n\n #TODO: forcing the same split as R 202-90-90-1=21\n gq = smsdia.het_goldfeldquandt(endogg, exogg, split=90, drop=21,\n alternative='two-sided')\n compare_t_est(gq, het_gq_two_sided_01, decimal=(14, 14))\n assert_equal(gq[-1], 'two-sided')\n #TODO other options ???\n\n def test_het_breusch_pagan(self):\n res = self.res\n\n bptest = dict(statistic=0.709924388395087, pvalue=0.701199952134347,\n parameters=(2,), distr='f')\n\n bp = smsdia.het_breuschpagan(res.resid, res.model.exog)\n compare_t_est(bp, bptest, decimal=(12, 12))\n\n\n\n def test_het_white(self):\n res = self.res\n\n #TODO: regressiontest, compare with Greene or Gretl or Stata\n hw = smsdia.het_white(res.resid, res.model.exog)\n hw_values = (33.503722896538441, 2.9887960597830259e-06,\n 7.7945101228430946, 1.0354575277704231e-06)\n assert_almost_equal(hw, hw_values)\n\n def test_het_arch(self):\n #test het_arch and indirectly het_lm against R\n #> library(FinTS)\n #> at = ArchTest(residuals(fm), lags=4)\n #> mkhtest(at, 'archtest_4', 'chi2')\n archtest_4 = dict(statistic=3.43473400836259,\n pvalue=0.487871315392619, parameters=(4,),\n distr='chi2')\n\n #> at = ArchTest(residuals(fm), lags=12)\n #> mkhtest(at, 'archtest_12', 'chi2')\n archtest_12 = dict(statistic=8.648320999014171,\n pvalue=0.732638635007718, parameters=(12,),\n distr='chi2')\n\n at4 = smsdia.het_arch(self.res.resid, maxlag=4)\n at12 = smsdia.het_arch(self.res.resid, maxlag=12)\n compare_t_est(at4[:2], archtest_4, decimal=(12, 13))\n compare_t_est(at12[:2], archtest_12, decimal=(12, 13))\n\n def test_het_arch2(self):\n #test autolag options, this also test het_lm\n #unfortunately optimal lag=1 for this data\n resid = self.res.resid\n\n res1 = smsdia.het_arch(resid, maxlag=1, autolag=None, store=True)\n rs1 = res1[-1]\n\n res2 = smsdia.het_arch(resid, maxlag=5, autolag='aic', store=True)\n rs2 = res2[-1]\n\n assert_almost_equal(rs2.resols.params, rs1.resols.params, decimal=13)\n assert_almost_equal(res2[:4], res1[:4], decimal=13)\n\n #test that smallest lag, maxlag=1 works\n res3 = smsdia.het_arch(resid, maxlag=1, autolag='aic')\n assert_almost_equal(res3[:4], res1[:4], decimal=13)\n\n def test_acorr_breusch_godfrey(self):\n res = self.res\n\n #bgf = bgtest(fm, order = 4, type=\"F\")\n breuschgodfrey_f = dict(statistic=1.179280833676792,\n pvalue=0.321197487261203,\n parameters=(4,195,), distr='f')\n\n #> bgc = bgtest(fm, order = 4, type=\"Chisq\")\n #> mkhtest(bgc, \"breuschpagan_c\", \"chi2\")\n breuschgodfrey_c = dict(statistic=4.771042651230007,\n pvalue=0.3116067133066697,\n parameters=(4,), distr='chi2')\n\n bg = smsdia.acorr_breusch_godfrey(res, nlags=4)\n bg_r = [breuschgodfrey_c['statistic'], breuschgodfrey_c['pvalue'],\n breuschgodfrey_f['statistic'], breuschgodfrey_f['pvalue']]\n assert_almost_equal(bg, bg_r, decimal=13)\n\n # check that lag choice works\n bg2 = smsdia.acorr_breusch_godfrey(res, nlags=None)\n bg3 = smsdia.acorr_breusch_godfrey(res, nlags=14)\n assert_almost_equal(bg2, bg3, decimal=13)\n\n def test_acorr_ljung_box(self):\n\n #unit-test which may be useful later\n #ddof correction for fitted parameters in ARMA(p,q) fitdf=p+q\n #> bt = Box.test(residuals(fm), lag=4, type = \"Ljung-Box\", fitdf=2)\n #> mkhtest(bt, \"ljung_box_4df2\", \"chi2\")\n # ljung_box_4df2 = dict(statistic=5.23587172795227,\n # pvalue=0.0729532930400377,\n # parameters=(2,), distr='chi2')\n\n #> bt = Box.test(residuals(fm), lag=4, type = \"Box-Pierce\", fitdf=2)\n #> mkhtest(bt, \"ljung_box_bp_4df2\", \"chi2\")\n # ljung_box_bp_4df2 = dict(statistic=5.12462932741681,\n # pvalue=0.0771260128929921,\n # parameters=(2,), distr='chi2')\n\n\n res = self.res\n\n #general test\n\n #> bt = Box.test(residuals(fm), lag=4, type = \"Ljung-Box\")\n #> mkhtest(bt, \"ljung_box_4\", \"chi2\")\n ljung_box_4 = dict(statistic=5.23587172795227, pvalue=0.263940335284713,\n parameters=(4,), distr='chi2')\n\n #> bt = Box.test(residuals(fm), lag=4, type = \"Box-Pierce\")\n #> mkhtest(bt, \"ljung_box_bp_4\", \"chi2\")\n ljung_box_bp_4 = dict(statistic=5.12462932741681,\n pvalue=0.2747471266820692,\n parameters=(4,), distr='chi2')\n\n\n lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid, 4,\n boxpierce=True)\n compare_t_est([lb[-1], lbpval[-1]], ljung_box_4, decimal=(13, 13))\n compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_4, decimal=(13, 13))\n\n def test_acorr_ljung_box_big_default(self):\n res = self.res\n #test with big dataset and default lag\n\n #> bt = Box.test(residuals(fm), type = \"Ljung-Box\")\n #> mkhtest(bt, \"ljung_box_none\", \"chi2\")\n ljung_box_none = dict(statistic=51.03724531797195, pvalue=0.11334744923390,\n distr='chi2')\n\n #> bt = Box.test(residuals(fm), type = \"Box-Pierce\")\n #> mkhtest(bt, \"ljung_box_bp_none\", \"chi2\")\n ljung_box_bp_none = dict(statistic=45.12238537034000,\n pvalue=0.26638168491464,\n distr='chi2')\n lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid, boxpierce=True)\n compare_t_est([lb[-1], lbpval[-1]], ljung_box_none, decimal=(13, 13))\n compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_none, decimal=(13, 13))\n\n def test_acorr_ljung_box_small_default(self):\n res = self.res\n #test with small dataset and default lag\n\n #> bt = Box.test(residuals(fm), type = \"Ljung-Box\")\n #> mkhtest(bt, \"ljung_box_small\", \"chi2\")\n ljung_box_small = dict(statistic=9.61503968281915, pvalue=0.72507000996945,\n parameters=(0,), distr='chi2')\n\n #> bt = Box.test(residuals(fm), type = \"Box-Pierce\")\n #> mkhtest(bt, \"ljung_box_bp_small\", \"chi2\")\n ljung_box_bp_small = dict(statistic=7.41692150864936,\n pvalue=0.87940785887006,\n parameters=(0,), distr='chi2')\n\n lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid[:30], boxpierce=True)\n compare_t_est([lb[-1], lbpval[-1]], ljung_box_small, decimal=(13, 13))\n compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_small, decimal=(13, 13))\n\n\n def test_harvey_collier(self):\n\n #> hc = harvtest(fm, order.by = NULL, data = list())\n #> mkhtest_f(hc, 'harvey_collier', 't')\n harvey_collier = dict(statistic=0.494432160939874,\n pvalue=0.6215491310408242,\n parameters=(198), distr='t')\n\n #> hc2 = harvtest(fm, order.by=ggdp , data = list())\n #> mkhtest_f(hc2, 'harvey_collier_2', 't')\n harvey_collier_2 = dict(statistic=1.42104628340473,\n pvalue=0.1568762892441689,\n parameters=(198), distr='t')\n\n hc = smsdia.linear_harvey_collier(self.res)\n compare_t_est(hc, harvey_collier, decimal=(12, 12))\n\n\n def test_rainbow(self):\n #rainbow test\n #> rt = raintest(fm)\n #> mkhtest_f(rt, 'raintest', 'f')\n raintest = dict(statistic=0.6809600116739604, pvalue=0.971832843583418,\n parameters=(101, 98), distr='f')\n\n #> rt = raintest(fm, center=0.4)\n #> mkhtest_f(rt, 'raintest_center_04', 'f')\n raintest_center_04 = dict(statistic=0.682635074191527,\n pvalue=0.971040230422121,\n parameters=(101, 98), distr='f')\n\n #> rt = raintest(fm, fraction=0.4)\n #> mkhtest_f(rt, 'raintest_fraction_04', 'f')\n raintest_fraction_04 = dict(statistic=0.565551237772662,\n pvalue=0.997592305968473,\n parameters=(122, 77), distr='f')\n\n #> rt = raintest(fm, order.by=ggdp)\n #Warning message:\n #In if (order.by == \"mahalanobis\") { :\n # the condition has length > 1 and only the first element will be used\n #> mkhtest_f(rt, 'raintest_order_gdp', 'f')\n raintest_order_gdp = dict(statistic=1.749346160513353,\n pvalue=0.002896131042494884,\n parameters=(101, 98), distr='f')\n\n rb = smsdia.linear_rainbow(self.res)\n compare_t_est(rb, raintest, decimal=(13, 14))\n rb = smsdia.linear_rainbow(self.res, frac=0.4)\n compare_t_est(rb, raintest_fraction_04, decimal=(13, 14))\n\n\n def test_compare_lr(self):\n res = self.res\n res3 = self.res3 #nested within res\n #lrtest\n #lrt = lrtest(fm, fm2)\n #Model 1: ginv ~ ggdp + lint\n #Model 2: ginv ~ ggdp\n\n lrtest = dict(loglike1=-763.9752181602237, loglike2=-766.3091902020184,\n chi2value=4.66794408358942, pvalue=0.03073069384028677,\n df=(4,3,1))\n lrt = res.compare_lr_test(res3)\n assert_almost_equal(lrt[0], lrtest['chi2value'], decimal=11)\n assert_almost_equal(lrt[1], lrtest['pvalue'], decimal=11)\n\n waldtest = dict(fvalue=4.65216373312492, pvalue=0.03221346195239025,\n df=(199,200,1))\n\n wt = res.compare_f_test(res3)\n assert_almost_equal(wt[0], waldtest['fvalue'], decimal=11)\n assert_almost_equal(wt[1], waldtest['pvalue'], decimal=11)\n\n\n def test_compare_nonnested(self):\n res = self.res\n res2 = self.res2\n #jt = jtest(fm, lm(ginv ~ ggdp + tbilrate))\n #Estimate Std. Error t value Pr(>|t|)\n jtest = [('M1 + fitted(M2)', 1.591505670785873, 0.7384552861695823,\n 2.155182176352370, 0.032354572525314450, '*'),\n ('M2 + fitted(M1)', 1.305687653016899, 0.4808385176653064,\n 2.715438978051544, 0.007203854534057954, '**')]\n\n jt1 = smsdia.compare_j(res2, res)\n assert_almost_equal(jt1, jtest[0][3:5], decimal=13)\n\n jt2 = smsdia.compare_j(res, res2)\n assert_almost_equal(jt2, jtest[1][3:5], decimal=14)\n\n #Estimate Std. Error z value Pr(>|z|)\n coxtest = [('fitted(M1) ~ M2', -0.782030488930356, 0.599696502782265,\n -1.304043770977755, 1.922186587840554e-01, ' '),\n ('fitted(M2) ~ M1', -2.248817107408537, 0.392656854330139,\n -5.727181590258883, 1.021128495098556e-08, '***')]\n\n ct1 = smsdia.compare_cox(res, res2)\n assert_almost_equal(ct1, coxtest[0][3:5], decimal=13)\n\n ct2 = smsdia.compare_cox(res2, res)\n assert_almost_equal(ct2, coxtest[1][3:5], decimal=12)\n #TODO should be approx\n\n # Res.Df Df F Pr(>F)\n encomptest = [('M1 vs. ME', 198, -1, 4.644810213266983,\n 0.032354572525313666, '*'),\n ('M2 vs. ME', 198, -1, 7.373608843521585,\n 0.007203854534058054, '**')]\n\n # Estimate Std. Error t value\n petest = [('M1 + log(fit(M1))-fit(M2)', -229.281878354594596,\n 44.5087822087058598, -5.15139, 6.201281252449979e-07),\n ('M2 + fit(M1)-exp(fit(M2))', 0.000634664704814,\n 0.0000462387010349, 13.72583, 1.319536115230356e-30)]\n\n\n def test_cusum_ols(self):\n #R library(strucchange)\n #> sc = sctest(ginv ~ ggdp + lint, type=\"OLS-CUSUM\")\n #> mkhtest(sc, 'cusum_ols', 'BB')\n cusum_ols = dict(statistic=1.055750610401214, pvalue=0.2149567397376543,\n parameters=(), distr='BB') #Brownian Bridge\n\n k_vars=3\n cs_ols = smsdia.breaks_cusumolsresid(self.res.resid, ddof=k_vars) #\n compare_t_est(cs_ols, cusum_ols, decimal=(12, 12))\n\n def test_breaks_hansen(self):\n #> sc = sctest(ginv ~ ggdp + lint, type=\"Nyblom-Hansen\")\n #> mkhtest(sc, 'breaks_nyblom_hansen', 'BB')\n breaks_nyblom_hansen = dict(statistic=1.0300792740544484,\n pvalue=0.1136087530212015,\n parameters=(), distr='BB')\n\n bh = smsdia.breaks_hansen(self.res)\n assert_almost_equal(bh[0], breaks_nyblom_hansen['statistic'],\n decimal=13)\n #TODO: breaks_hansen doesn't return pvalues\n\n\n def test_recursive_residuals(self):\n\n reccumres_standardize = np.array([-2.151, -3.748, -3.114, -3.096,\n -1.865, -2.230, -1.194, -3.500, -3.638, -4.447, -4.602, -4.631, -3.999,\n -4.830, -5.429, -5.435, -6.554, -8.093, -8.567, -7.532, -7.079, -8.468,\n -9.320, -12.256, -11.932, -11.454, -11.690, -11.318, -12.665, -12.842,\n -11.693, -10.803, -12.113, -12.109, -13.002, -11.897, -10.787, -10.159,\n -9.038, -9.007, -8.634, -7.552, -7.153, -6.447, -5.183, -3.794, -3.511,\n -3.979, -3.236, -3.793, -3.699, -5.056, -5.724, -4.888, -4.309, -3.688,\n -3.918, -3.735, -3.452, -2.086, -6.520, -7.959, -6.760, -6.855, -6.032,\n -4.405, -4.123, -4.075, -3.235, -3.115, -3.131, -2.986, -1.813, -4.824,\n -4.424, -4.796, -4.000, -3.390, -4.485, -4.669, -4.560, -3.834, -5.507,\n -3.792, -2.427, -1.756, -0.354, 1.150, 0.586, 0.643, 1.773, -0.830,\n -0.388, 0.517, 0.819, 2.240, 3.791, 3.187, 3.409, 2.431, 0.668, 0.957,\n -0.928, 0.327, -0.285, -0.625, -2.316, -1.986, -0.744, -1.396, -1.728,\n -0.646, -2.602, -2.741, -2.289, -2.897, -1.934, -2.532, -3.175, -2.806,\n -3.099, -2.658, -2.487, -2.515, -2.224, -2.416, -1.141, 0.650, -0.947,\n 0.725, 0.439, 0.885, 2.419, 2.642, 2.745, 3.506, 4.491, 5.377, 4.624,\n 5.523, 6.488, 6.097, 5.390, 6.299, 6.656, 6.735, 8.151, 7.260, 7.846,\n 8.771, 8.400, 8.717, 9.916, 9.008, 8.910, 8.294, 8.982, 8.540, 8.395,\n 7.782, 7.794, 8.142, 8.362, 8.400, 7.850, 7.643, 8.228, 6.408, 7.218,\n 7.699, 7.895, 8.725, 8.938, 8.781, 8.350, 9.136, 9.056, 10.365, 10.495,\n 10.704, 10.784, 10.275, 10.389, 11.586, 11.033, 11.335, 11.661, 10.522,\n 10.392, 10.521, 10.126, 9.428, 9.734, 8.954, 9.949, 10.595, 8.016,\n 6.636, 6.975])\n\n rr = smsdia.recursive_olsresiduals(self.res, skip=3, alpha=0.95)\n assert_equal(np.round(rr[5][1:], 3), reccumres_standardize) #extra zero in front\n #assert_equal(np.round(rr[3][4:], 3), np.diff(reccumres_standardize))\n assert_almost_equal(rr[3][4:], np.diff(reccumres_standardize),3)\n assert_almost_equal(rr[4][3:].std(ddof=1), 10.7242, decimal=4)\n\n #regression number, visually checked with graph from gretl\n ub0 = np.array([ 13.37318571, 13.50758959, 13.64199346, 13.77639734,\n 13.91080121])\n ub1 = np.array([ 39.44753774, 39.58194162, 39.7163455 , 39.85074937,\n 39.98515325])\n lb, ub = rr[6]\n assert_almost_equal(ub[:5], ub0, decimal=7)\n assert_almost_equal(lb[:5], -ub0, decimal=7)\n assert_almost_equal(ub[-5:], ub1, decimal=7)\n assert_almost_equal(lb[-5:], -ub1, decimal=7)\n\n #test a few values with explicit OLS\n endog = self.res.model.endog\n exog = self.res.model.exog\n params = []\n ypred = []\n for i in range(3,10):\n resi = OLS(endog[:i], exog[:i]).fit()\n ypred.append(resi.model.predict(resi.params, exog[i]))\n params.append(resi.params)\n assert_almost_equal(rr[2][3:10], ypred, decimal=12)\n assert_almost_equal(rr[0][3:10], endog[3:10] - ypred, decimal=12)\n assert_almost_equal(rr[1][2:9], params, decimal=12)\n\n def test_normality(self):\n res = self.res\n\n #> library(nortest) #Lilliefors (Kolmogorov-Smirnov) normality test\n #> lt = lillie.test(residuals(fm))\n #> mkhtest(lt, \"lilliefors\", \"-\")\n lilliefors1 = dict(statistic=0.0723390908786589,\n pvalue=0.01204113540102896, parameters=(), distr='-')\n\n #> lt = lillie.test(residuals(fm)**2)\n #> mkhtest(lt, \"lilliefors\", \"-\")\n lilliefors2 = dict(statistic=0.301311621898024,\n pvalue=1.004305736618051e-51,\n parameters=(), distr='-')\n\n #> lt = lillie.test(residuals(fm)[1:20])\n #> mkhtest(lt, \"lilliefors\", \"-\")\n lilliefors3 = dict(statistic=0.1333956004203103,\n pvalue=0.20, parameters=(), distr='-')\n\n lf1 = smsdia.lilliefors(res.resid)\n lf2 = smsdia.lilliefors(res.resid**2)\n lf3 = smsdia.lilliefors(res.resid[:20])\n\n compare_t_est(lf1, lilliefors1, decimal=(14, 14))\n compare_t_est(lf2, lilliefors2, decimal=(14, 14)) #pvalue very small\n assert_approx_equal(lf2[1], lilliefors2['pvalue'], significant=10)\n compare_t_est(lf3, lilliefors3, decimal=(14, 1))\n #R uses different approximation for pvalue in last case\n\n #> ad = ad.test(residuals(fm))\n #> mkhtest(ad, \"ad3\", \"-\")\n adr1 = dict(statistic=1.602209621518313, pvalue=0.0003937979149362316,\n parameters=(), distr='-')\n\n #> ad = ad.test(residuals(fm)**2)\n #> mkhtest(ad, \"ad3\", \"-\")\n adr2 = dict(statistic=np.inf, pvalue=np.nan, parameters=(), distr='-')\n\n #> ad = ad.test(residuals(fm)[1:20])\n #> mkhtest(ad, \"ad3\", \"-\")\n adr3 = dict(statistic=0.3017073732210775, pvalue=0.5443499281265933,\n parameters=(), distr='-')\n\n ad1 = smsdia.normal_ad(res.resid)\n compare_t_est(ad1, adr1, decimal=(11, 13))\n ad2 = smsdia.normal_ad(res.resid**2)\n assert_(np.isinf(ad2[0]))\n ad3 = smsdia.normal_ad(res.resid[:20])\n compare_t_est(ad3, adr3, decimal=(11, 12))\n\n\n def test_influence(self):\n res = self.res\n\n #this test is slow\n infl = oi.OLSInfluence(res)\n\n path = os.path.join(cur_dir, \"results\", \"influence_lsdiag_R.json\")\n with open(path, 'r') as fp:\n lsdiag = json.load(fp)\n\n #basic\n assert_almost_equal(np.array(lsdiag['cov.scaled']).reshape(3, 3),\n res.cov_params(), decimal=14)\n assert_almost_equal(np.array(lsdiag['cov.unscaled']).reshape(3, 3),\n res.normalized_cov_params, decimal=14)\n\n c0, c1 = infl.cooks_distance #TODO: what's c1\n\n\n assert_almost_equal(c0, lsdiag['cooks'], decimal=14)\n assert_almost_equal(infl.hat_matrix_diag, lsdiag['hat'], decimal=14)\n assert_almost_equal(infl.resid_studentized_internal,\n lsdiag['std.res'], decimal=14)\n\n #slow:\n #infl._get_all_obs() #slow, nobs estimation loop, called implicitly\n dffits, dffth = infl.dffits\n assert_almost_equal(dffits, lsdiag['dfits'], decimal=14)\n assert_almost_equal(infl.resid_studentized_external,\n lsdiag['stud.res'], decimal=14)\n\n import pandas\n fn = os.path.join(cur_dir,\"results/influence_measures_R.csv\")\n infl_r = pandas.read_csv(fn, index_col=0)\n conv = lambda s: 1 if s=='TRUE' else 0\n fn = os.path.join(cur_dir,\"results/influence_measures_bool_R.csv\")\n #not used yet:\n #infl_bool_r = pandas.read_csv(fn, index_col=0,\n # converters=dict(zip(lrange(7),[conv]*7)))\n infl_r2 = np.asarray(infl_r)\n assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)\n assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)\n #duplicates\n assert_almost_equal(dffits, infl_r2[:,3], decimal=14)\n assert_almost_equal(c0, infl_r2[:,5], decimal=14)\n assert_almost_equal(infl.hat_matrix_diag, infl_r2[:,6], decimal=14)\n\n #Note: for dffits, R uses a threshold around 0.36, mine: dffits[1]=0.24373\n #TODO: finish and check thresholds and pvalues\n '''\n R has\n >>> np.nonzero(np.asarray(infl_bool_r[\"dffit\"]))[0]\n array([ 6, 26, 63, 76, 90, 199])\n >>> np.nonzero(np.asarray(infl_bool_r[\"cov.r\"]))[0]\n array([ 4, 26, 59, 61, 63, 72, 76, 84, 91, 92, 94, 95, 108,\n 197, 198])\n >>> np.nonzero(np.asarray(infl_bool_r[\"hat\"]))[0]\n array([ 62, 76, 84, 90, 91, 92, 95, 108, 197, 199])\n '''\n\n\nclass TestDiagnosticGPandas(TestDiagnosticG):\n\n @classmethod\n def setup_class(cls):\n d = macrodata.load_pandas().data\n #growth rates\n d['gs_l_realinv'] = 400 * np.log(d['realinv']).diff()\n d['gs_l_realgdp'] = 400 * np.log(d['realgdp']).diff()\n d['lint'] = d['realint'].shift(1)\n d['tbilrate'] = d['tbilrate'].shift(1)\n\n d = d.dropna()\n cls.d = d\n endogg = d['gs_l_realinv']\n exogg = add_constant(d[['gs_l_realgdp', 'lint']])\n exogg2 = add_constant(d[['gs_l_realgdp', 'tbilrate']])\n exogg3 = add_constant(d[['gs_l_realgdp']])\n\n res_ols = OLS(endogg, exogg).fit()\n res_ols2 = OLS(endogg, exogg2).fit()\n\n res_ols3 = OLS(endogg, exogg3).fit()\n\n cls.res = res_ols\n cls.res2 = res_ols2\n cls.res3 = res_ols3\n cls.endog = cls.res.model.endog\n cls.exog = cls.res.model.exog\n\n\ndef grangertest():\n #> gt = grangertest(ginv, ggdp, order=4)\n #> gt\n #Granger causality test\n #\n #Model 1: ggdp ~ Lags(ggdp, 1:4) + Lags(ginv, 1:4)\n #Model 2: ggdp ~ Lags(ggdp, 1:4)\n\n grangertest = dict(fvalue=1.589672703015157, pvalue=0.178717196987075,\n df=(198,193))\n\ndef test_outlier_influence_funcs():\n #smoke test\n x = add_constant(np.random.randn(10, 2))\n y = x.sum(1) + np.random.randn(10)\n res = OLS(y, x).fit()\n out_05 = oi.summary_table(res)\n # GH3344 : Check alpha has an effect\n out_01 = oi.summary_table(res, alpha=0.01)\n assert_(np.all(out_01[1][:, 6] <= out_05[1][:, 6]))\n assert_(np.all(out_01[1][:, 7] >= out_05[1][:, 7]))\n\n res2 = OLS(y, x[:,0]).fit()\n oi.summary_table(res2, alpha=0.05)\n infl = res2.get_influence()\n infl.summary_table()\n\ndef test_influence_wrapped():\n from pandas import DataFrame\n from pandas.util.testing import assert_series_equal\n\n d = macrodata.load_pandas().data\n #growth rates\n gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()\n gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()\n lint = d['realint'][:-1]\n\n # re-index these because they won't conform to lint\n gs_l_realgdp.index = lint.index\n gs_l_realinv.index = lint.index\n\n data = dict(const=np.ones_like(lint), lint=lint, lrealgdp=gs_l_realgdp)\n #order is important\n exog = DataFrame(data, columns=['const','lrealgdp','lint'])\n\n res = OLS(gs_l_realinv, exog).fit()\n\n #basic\n # already tested\n #assert_almost_equal(lsdiag['cov.scaled'],\n # res.cov_params().values.ravel(), decimal=14)\n #assert_almost_equal(lsdiag['cov.unscaled'],\n # res.normalized_cov_params.values.ravel(), decimal=14)\n\n infl = oi.OLSInfluence(res)\n\n # smoke test just to make sure it works, results separately tested\n df = infl.summary_frame()\n assert_(isinstance(df, DataFrame))\n\n #this test is slow\n path = os.path.join(cur_dir, \"results\", \"influence_lsdiag_R.json\")\n with open(path, \"r\") as fp:\n lsdiag = json.load(fp)\n\n c0, c1 = infl.cooks_distance #TODO: what's c1, it's pvalues? -ss\n\n\n #NOTE: we get a hard-cored 5 decimals with pandas testing\n assert_almost_equal(c0, lsdiag['cooks'], 14)\n assert_almost_equal(infl.hat_matrix_diag, (lsdiag['hat']), 14)\n assert_almost_equal(infl.resid_studentized_internal,\n lsdiag['std.res'], 14)\n\n #slow:\n dffits, dffth = infl.dffits\n assert_almost_equal(dffits, lsdiag['dfits'], 14)\n assert_almost_equal(infl.resid_studentized_external,\n lsdiag['stud.res'], 14)\n\n import pandas\n fn = os.path.join(cur_dir,\"results/influence_measures_R.csv\")\n infl_r = pandas.read_csv(fn, index_col=0)\n conv = lambda s: 1 if s=='TRUE' else 0\n fn = os.path.join(cur_dir,\"results/influence_measures_bool_R.csv\")\n #not used yet:\n #infl_bool_r = pandas.read_csv(fn, index_col=0,\n # converters=dict(zip(lrange(7),[conv]*7)))\n infl_r2 = np.asarray(infl_r)\n #TODO: finish wrapping this stuff\n assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)\n assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)\n\n\ndef test_influence_dtype():\n # see #2148 bug when endog is integer\n y = np.ones(20)\n np.random.seed(123)\n x = np.random.randn(20, 3)\n res1 = OLS(y, x).fit()\n\n res2 = OLS(y*1., x).fit()\n cr1 = res1.get_influence().cov_ratio\n cr2 = res2.get_influence().cov_ratio\n assert_allclose(cr1, cr2, rtol=1e-14)\n # regression test for values\n cr3 = np.array(\n [ 1.22239215, 1.31551021, 1.52671069, 1.05003921, 0.89099323,\n 1.57405066, 1.03230092, 0.95844196, 1.15531836, 1.21963623,\n 0.87699564, 1.16707748, 1.10481391, 0.98839447, 1.08999334,\n 1.35680102, 1.46227715, 1.45966708, 1.13659521, 1.22799038])\n assert_almost_equal(cr1, cr3, decimal=8)\n\n\ndef test_outlier_test():\n # results from R with NA -> 1. Just testing interface here because\n # outlier_test is just a wrapper\n labels = ['accountant', 'pilot', 'architect', 'author', 'chemist',\n 'minister', 'professor', 'dentist', 'reporter', 'engineer',\n 'undertaker', 'lawyer', 'physician', 'welfare.worker', 'teacher',\n 'conductor', 'contractor', 'factory.owner', 'store.manager',\n 'banker', 'bookkeeper', 'mail.carrier', 'insurance.agent',\n 'store.clerk', 'carpenter', 'electrician', 'RR.engineer',\n 'machinist', 'auto.repairman', 'plumber', 'gas.stn.attendant',\n 'coal.miner', 'streetcar.motorman', 'taxi.driver',\n 'truck.driver', 'machine.operator', 'barber', 'bartender',\n 'shoe.shiner', 'cook', 'soda.clerk', 'watchman', 'janitor',\n 'policeman', 'waiter']\n #Duncan's prestige data from car\n exog = [[1.0, 62.0, 86.0], [1.0, 72.0, 76.0], [1.0, 75.0, 92.0],\n [1.0, 55.0, 90.0], [1.0, 64.0, 86.0], [1.0, 21.0, 84.0],\n [1.0, 64.0, 93.0], [1.0, 80.0, 100.0], [1.0, 67.0, 87.0],\n [1.0, 72.0, 86.0], [1.0, 42.0, 74.0], [1.0, 76.0, 98.0],\n [1.0, 76.0, 97.0], [1.0, 41.0, 84.0], [1.0, 48.0, 91.0],\n [1.0, 76.0, 34.0], [1.0, 53.0, 45.0], [1.0, 60.0, 56.0],\n [1.0, 42.0, 44.0], [1.0, 78.0, 82.0], [1.0, 29.0, 72.0],\n [1.0, 48.0, 55.0], [1.0, 55.0, 71.0], [1.0, 29.0, 50.0],\n [1.0, 21.0, 23.0], [1.0, 47.0, 39.0], [1.0, 81.0, 28.0],\n [1.0, 36.0, 32.0], [1.0, 22.0, 22.0], [1.0, 44.0, 25.0],\n [1.0, 15.0, 29.0], [1.0, 7.0, 7.0], [1.0, 42.0, 26.0],\n [1.0, 9.0, 19.0], [1.0, 21.0, 15.0], [1.0, 21.0, 20.0],\n [1.0, 16.0, 26.0], [1.0, 16.0, 28.0], [1.0, 9.0, 17.0],\n [1.0, 14.0, 22.0], [1.0, 12.0, 30.0], [1.0, 17.0, 25.0],\n [1.0, 7.0, 20.0], [1.0, 34.0, 47.0], [1.0, 8.0, 32.0]]\n endog = [ 82., 83., 90., 76., 90., 87., 93., 90., 52., 88., 57.,\n 89., 97., 59., 73., 38., 76., 81., 45., 92., 39., 34.,\n 41., 16., 33., 53., 67., 57., 26., 29., 10., 15., 19.,\n 10., 13., 24., 20., 7., 3., 16., 6., 11., 8., 41.,\n 10.]\n ndarray_mod = OLS(endog, exog).fit()\n rstudent = [3.1345185839, -2.3970223990, 2.0438046359, -1.9309187757,\n 1.8870465798, -1.7604905300, -1.7040324156, 1.6024285876,\n -1.4332485037, -1.1044851583, 1.0688582315, 1.0185271840,\n -0.9024219332, -0.9023876471, -0.8830953936, 0.8265782334,\n 0.8089220547, 0.7682770197, 0.7319491074, -0.6665962829,\n 0.5227352794, -0.5135016547, 0.5083881518, 0.4999224372,\n -0.4980818221, -0.4759717075, -0.4293565820, -0.4114056499,\n -0.3779540862, 0.3556874030, 0.3409200462, 0.3062248646,\n 0.3038999429, -0.3030815773, -0.1873387893, 0.1738050251,\n 0.1424246593, -0.1292266025, 0.1272066463, -0.0798902878,\n 0.0788467222, 0.0722556991, 0.0505098280, 0.0233215136,\n 0.0007112055]\n unadj_p = [0.003177202, 0.021170298, 0.047432955, 0.060427645, 0.066248120,\n 0.085783008, 0.095943909, 0.116738318, 0.159368890, 0.275822623,\n 0.291386358, 0.314400295, 0.372104049, 0.372122040, 0.382333561,\n 0.413260793, 0.423229432, 0.446725370, 0.468363101, 0.508764039,\n 0.603971990, 0.610356737, 0.613905871, 0.619802317, 0.621087703,\n 0.636621083, 0.669911674, 0.682917818, 0.707414459, 0.723898263,\n 0.734904667, 0.760983108, 0.762741124, 0.763360242, 0.852319039,\n 0.862874018, 0.887442197, 0.897810225, 0.899398691, 0.936713197,\n 0.937538115, 0.942749758, 0.959961394, 0.981506948, 0.999435989]\n bonf_p = [0.1429741, 0.9526634, 2.1344830, 2.7192440, 2.9811654, 3.8602354,\n 4.3174759, 5.2532243, 7.1716001, 12.4120180, 13.1123861, 14.1480133,\n 16.7446822, 16.7454918, 17.2050103, 18.5967357, 19.0453245,\n 20.1026416, 21.0763395, 22.8943818, 27.1787396, 27.4660532,\n 27.6257642, 27.8911043, 27.9489466, 28.6479487, 30.1460253,\n 30.7313018, 31.8336506, 32.5754218, 33.0707100, 34.2442399,\n 34.3233506, 34.3512109, 38.3543568, 38.8293308, 39.9348989,\n 40.4014601, 40.4729411, 42.1520939, 42.1892152, 42.4237391,\n 43.1982627, 44.1678127, 44.9746195]\n bonf_p = np.array(bonf_p)\n bonf_p[bonf_p > 1] = 1\n sorted_labels = [\"minister\", \"reporter\", \"contractor\", \"insurance.agent\",\n \"machinist\", \"store.clerk\", \"conductor\", \"factory.owner\",\n \"mail.carrier\", \"streetcar.motorman\", \"carpenter\", \"coal.miner\",\n \"bartender\", \"bookkeeper\", \"soda.clerk\", \"chemist\", \"RR.engineer\",\n \"professor\", \"electrician\", \"gas.stn.attendant\", \"auto.repairman\",\n \"watchman\", \"banker\", \"machine.operator\", \"dentist\", \"waiter\",\n \"shoe.shiner\", \"welfare.worker\", \"plumber\", \"physician\", \"pilot\",\n \"engineer\", \"accountant\", \"lawyer\", \"undertaker\", \"barber\",\n \"store.manager\", \"truck.driver\", \"cook\", \"janitor\", \"policeman\",\n \"architect\", \"teacher\", \"taxi.driver\", \"author\"]\n\n res2 = np.c_[rstudent, unadj_p, bonf_p]\n res = oi.outlier_test(ndarray_mod, method='b', labels=labels, order=True)\n np.testing.assert_almost_equal(res.values, res2, 7)\n np.testing.assert_equal(res.index.tolist(), sorted_labels) # pylint: disable-msg=E1103\n\n data = pd.DataFrame(np.column_stack((endog, exog)),\n columns='y const var1 var2'.split(),\n index=labels)\n\n # check `order` with pandas bug in #3971\n res_pd = OLS.from_formula('y ~ const + var1 + var2 - 0', data).fit()\n\n res_outl2 = oi.outlier_test(res_pd, method='b', order=True)\n assert_almost_equal(res_outl2.values, res2, 7)\n assert_equal(res_outl2.index.tolist(), sorted_labels)\n\n if PD_GE_17:\n # pandas < 0.17 does not have sort_values method\n res_outl1 = res_pd.outlier_test(method='b')\n res_outl1 = res_outl1.sort_values(['unadj_p'], ascending=True)\n assert_almost_equal(res_outl1.values, res2, 7)\n assert_equal(res_outl1.index.tolist(), sorted_labels)\n assert_array_equal(res_outl2.index, res_outl1.index)\n\n\n # additional keywords in method\n res_outl3 = res_pd.outlier_test(method='b', order=True)\n assert_equal(res_outl3.index.tolist(), sorted_labels)\n res_outl4 = res_pd.outlier_test(method='b', order=True, cutoff=0.15)\n assert_equal(res_outl4.index.tolist(), sorted_labels[:1])\n\n\nif __name__ == '__main__':\n import pytest\n pytest.main([__file__, '-vvs', '-x', '--pdb'])\n\n #t = TestDiagnosticG()\n #t.test_basic()\n #t.test_hac()\n #t.test_acorr_breusch_godfrey()\n #t.test_acorr_ljung_box()\n #t.test_het_goldfeldquandt()\n #t.test_het_breusch_pagan()\n #t.test_het_white()\n #t.test_compare_lr()\n #t.test_compare_nonnested()\n #t.test_influence()\n\n\n ##################################################\n\n '''\n J test\n\n Model 1: ginv ~ ggdp + lint\n Model 2: ginv ~ ggdp + tbilrate\n Estimate Std. Error t value Pr(>|t|)\n M1 + fitted(M2) 1.591505670785873 0.7384552861695823 2.15518 0.0323546 *\n M2 + fitted(M1) 1.305687653016899 0.4808385176653064 2.71544 0.0072039 **\n ---\n Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1\n\n\n = lm(ginv ~ ggdp + tbilrate)\n > ct = coxtest(fm, fm3)\n > ct\n Cox test\n\n Model 1: ginv ~ ggdp + lint\n Model 2: ginv ~ ggdp + tbilrate\n Estimate Std. Error z value Pr(>|z|)\n fitted(M1) ~ M2 -0.782030488930356 0.599696502782265 -1.30404 0.19222\n fitted(M2) ~ M1 -2.248817107408537 0.392656854330139 -5.72718 1.0211e-08 ***\n ---\n Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1\n\n\n\n > et = encomptest(fm, fm3)\n > et\n Encompassing test\n\n Model 1: ginv ~ ggdp + lint\n Model 2: ginv ~ ggdp + tbilrate\n Model E: ginv ~ ggdp + lint + tbilrate\n Res.Df Df F Pr(>F)\n M1 vs. ME 198 -1 4.64481 0.0323546 *\n M2 vs. ME 198 -1 7.37361 0.0072039 **\n ---\n Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1\n\n\n > fm4 = lm(realinv ~ realgdp + realint, data=d)\n > fm5 = lm(log(realinv) ~ realgdp + realint, data=d)\n > pet = petest(fm4, fm5)\n > pet\n PE test\n\n Model 1: realinv ~ realgdp + realint\n Model 2: log(realinv) ~ realgdp + realint\n Estimate Std. Error t value\n M1 + log(fit(M1))-fit(M2) -229.281878354594596 44.5087822087058598 -5.15139\n M2 + fit(M1)-exp(fit(M2)) 0.000634664704814 0.0000462387010349 13.72583\n Pr(>|t|)\n M1 + log(fit(M1))-fit(M2) 6.2013e-07 ***\n M2 + fit(M1)-exp(fit(M2)) < 2.22e-16 ***\n ---\n Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1\n\n '''\n", "\"\"\"\nMediation analysis\n\nImplements algorithm 1 ('parametric inference') and algorithm 2\n('nonparametric inference') from:\n\nImai, Keele, Tingley (2010). A general approach to causal mediation\nanalysis. Psychological Methods 15:4, 309-334.\n\nhttp://imai.princeton.edu/research/files/BaronKenny.pdf\n\nThe algorithms are described on page 317 of the paper.\n\nIn the case of linear models with no interactions involving the\nmediator, the results should be similar or identical to the earlier\nBarron-Kenny approach.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom statsmodels.graphics.utils import maybe_name_or_idx\nimport statsmodels.compat.pandas as pdc # pragma: no cover\n\n\nclass Mediation(object):\n \"\"\"\n Conduct a mediation analysis.\n\n Parameters\n ----------\n outcome_model : statsmodels model\n Regression model for the outcome. Predictor variables include\n the treatment/exposure, the mediator, and any other variables\n of interest.\n mediator_model : statsmodels model\n Regression model for the mediator variable. Predictor\n variables include the treatment/exposure and any other\n variables of interest.\n exposure : string or (int, int) tuple\n The name or column position of the treatment/exposure\n variable. If positions are given, the first integer is the\n column position of the exposure variable in the outcome model\n and the second integer is the position of the exposure variable\n in the mediator model. If a string is given, it must be the name\n of the exposure variable in both regression models.\n mediator : string or int\n The name or column position of the mediator variable in the\n outcome regression model. If None, infer the name from the\n mediator model formula (if present).\n moderators : dict\n Map from variable names or index positions to values of\n moderator variables that are held fixed when calculating\n mediation effects. If the keys are index position they must\n be tuples `(i, j)` where `i` is the index in the outcome model\n and `j` is the index in the mediator model. Otherwise the\n keys must be variable names.\n outcome_fit_kwargs : dict-like\n Keyword arguments to use when fitting the outcome model.\n mediator_fit_kwargs : dict-like\n Keyword arguments to use when fitting the mediator model.\n\n Returns a ``MediationResults`` object.\n\n Notes\n -----\n The mediator model class must implement ``get_distribution``.\n\n Examples\n --------\n A basic mediation analysis using formulas:\n\n >>> import statsmodels.api as sm\n >>> import statsmodels.genmod.families.links as links\n >>> probit = links.probit\n >>> outcome_model = sm.GLM.from_formula(\"cong_mesg ~ emo + treat + age + educ + gender + income\",\n ... data, family=sm.families.Binomial(link=probit()))\n >>> mediator_model = sm.OLS.from_formula(\"emo ~ treat + age + educ + gender + income\", data)\n >>> med = Mediation(outcome_model, mediator_model, \"treat\", \"emo\").fit()\n >>> med.summary()\n\n A basic mediation analysis without formulas. This may be slightly\n faster than the approach using formulas. If there are any\n interactions involving the treatment or mediator variables this\n approach will not work, you must use formulas.\n\n >>> import patsy\n >>> outcome = np.asarray(data[\"cong_mesg\"])\n >>> outcome_exog = patsy.dmatrix(\"emo + treat + age + educ + gender + income\", data,\n ... return_type='dataframe')\n >>> probit = sm.families.links.probit\n >>> outcome_model = sm.GLM(outcome, outcome_exog, family=sm.families.Binomial(link=probit()))\n >>> mediator = np.asarray(data[\"emo\"])\n >>> mediator_exog = patsy.dmatrix(\"treat + age + educ + gender + income\", data,\n ... return_type='dataframe')\n >>> mediator_model = sm.OLS(mediator, mediator_exog)\n >>> tx_pos = [outcome_exog.columns.tolist().index(\"treat\"),\n ... mediator_exog.columns.tolist().index(\"treat\")]\n >>> med_pos = outcome_exog.columns.tolist().index(\"emo\")\n >>> med = Mediation(outcome_model, mediator_model, tx_pos, med_pos).fit()\n >>> med.summary()\n\n A moderated mediation analysis. The mediation effect is computed\n for people of age 20.\n\n >>> fml = \"cong_mesg ~ emo + treat*age + emo*age + educ + gender + income\",\n >>> outcome_model = sm.GLM.from_formula(fml, data,\n ... family=sm.families.Binomial())\n >>> mediator_model = sm.OLS.from_formula(\"emo ~ treat*age + educ + gender + income\", data)\n >>> moderators = {\"age\" : 20}\n >>> med = Mediation(outcome_model, mediator_model, \"treat\", \"emo\",\n ... moderators=moderators).fit()\n\n References\n ----------\n Imai, Keele, Tingley (2010). A general approach to causal mediation\n analysis. Psychological Methods 15:4, 309-334.\n http://imai.princeton.edu/research/files/BaronKenny.pdf\n\n Tingley, Yamamoto, Hirose, Keele, Imai (2014). mediation : R\n package for causal mediation analysis. Journal of Statistical\n Software 59:5. http://www.jstatsoft.org/v59/i05/paper\n \"\"\"\n\n def __init__(self, outcome_model, mediator_model, exposure, mediator=None,\n moderators=None, outcome_fit_kwargs=None, mediator_fit_kwargs=None):\n\n self.outcome_model = outcome_model\n self.mediator_model = mediator_model\n self.exposure = exposure\n self.moderators = moderators if moderators is not None else {}\n\n if mediator is None:\n self.mediator = self._guess_endog_name(mediator_model, 'mediator')\n else:\n self.mediator = mediator\n\n self._outcome_fit_kwargs = (outcome_fit_kwargs if outcome_fit_kwargs\n is not None else {})\n self._mediator_fit_kwargs = (mediator_fit_kwargs if mediator_fit_kwargs\n is not None else {})\n\n # We will be changing these so need to copy.\n self._outcome_exog = outcome_model.exog.copy()\n self._mediator_exog = mediator_model.exog.copy()\n\n # Position of the exposure variable in the mediator model.\n self._exp_pos_mediator = self._variable_pos('exposure', 'mediator')\n\n # Position of the exposure variable in the outcome model.\n self._exp_pos_outcome = self._variable_pos('exposure', 'outcome')\n\n # Position of the mediator variable in the outcome model.\n self._med_pos_outcome = self._variable_pos('mediator', 'outcome')\n\n\n def _variable_pos(self, var, model):\n if model == 'mediator':\n mod = self.mediator_model\n else:\n mod = self.outcome_model\n\n if var == 'mediator':\n return maybe_name_or_idx(self.mediator, mod)[1]\n\n exp = self.exposure\n exp_is_2 = ((len(exp) == 2) and (type(exp) != type('')))\n\n if exp_is_2:\n if model == 'outcome':\n return exp[0]\n elif model == 'mediator':\n return exp[1]\n else:\n return maybe_name_or_idx(exp, mod)[1]\n\n\n def _guess_endog_name(self, model, typ):\n if hasattr(model, 'formula'):\n return model.formula.split(\"~\")[0].strip()\n else:\n raise ValueError('cannot infer %s name without formula' % typ)\n\n\n def _simulate_params(self, result):\n \"\"\"\n Simulate model parameters from fitted sampling distribution.\n \"\"\"\n mn = result.params\n cov = result.cov_params()\n return np.random.multivariate_normal(mn, cov)\n\n\n def _get_mediator_exog(self, exposure):\n \"\"\"\n Return the mediator exog matrix with exposure set to the given\n value. Set values of moderated variables as needed.\n \"\"\"\n mediator_exog = self._mediator_exog\n if not hasattr(self.mediator_model, 'formula'):\n mediator_exog[:, self._exp_pos_mediator] = exposure\n for ix in self.moderators:\n v = self.moderators[ix]\n mediator_exog[:, ix[1]] = v\n else:\n # Need to regenerate the model exog\n df = self.mediator_model.data.frame.copy()\n df.loc[:, self.exposure] = exposure\n for vname in self.moderators:\n v = self.moderators[vname]\n df.loc[:, vname] = v\n klass = self.mediator_model.__class__\n init_kwargs = self.mediator_model._get_init_kwds()\n model = klass.from_formula(data=df, **init_kwargs)\n mediator_exog = model.exog\n\n return mediator_exog\n\n\n def _get_outcome_exog(self, exposure, mediator):\n \"\"\"\n Retun the exog design matrix with mediator and exposure set to\n the given values. Set values of moderated variables as\n needed.\n \"\"\"\n outcome_exog = self._outcome_exog\n if not hasattr(self.outcome_model, 'formula'):\n outcome_exog[:, self._med_pos_outcome] = mediator\n outcome_exog[:, self._exp_pos_outcome] = exposure\n for ix in self.moderators:\n v = self.moderators[ix]\n outcome_exog[:, ix[0]] = v\n else:\n # Need to regenerate the model exog\n df = self.outcome_model.data.frame.copy()\n df.loc[:, self.exposure] = exposure\n df.loc[:, self.mediator] = mediator\n for vname in self.moderators:\n v = self.moderators[vname]\n df.loc[:, vname] = v\n klass = self.outcome_model.__class__\n init_kwargs = self.outcome_model._get_init_kwds()\n model = klass.from_formula(data=df, **init_kwargs)\n outcome_exog = model.exog\n\n return outcome_exog\n\n\n def _fit_model(self, model, fit_kwargs, boot=False):\n klass = model.__class__\n init_kwargs = model._get_init_kwds()\n endog = model.endog\n exog = model.exog\n if boot:\n ii = np.random.randint(0, len(endog), len(endog))\n endog = endog[ii]\n exog = exog[ii, :]\n outcome_model = klass(endog, exog, **init_kwargs)\n return outcome_model.fit(**fit_kwargs)\n\n\n def fit(self, method=\"parametric\", n_rep=1000):\n \"\"\"\n Fit a regression model to assess mediation.\n\n Parameters\n ----------\n method : string\n Either 'parametric' or 'bootstrap'.\n n_rep : integer\n The number of simulation replications.\n\n Returns a MediationResults object.\n \"\"\"\n\n if method.startswith(\"para\"):\n # Initial fit to unperturbed data.\n outcome_result = self._fit_model(self.outcome_model, self._outcome_fit_kwargs)\n mediator_result = self._fit_model(self.mediator_model, self._mediator_fit_kwargs)\n elif not method.startswith(\"boot\"):\n raise(\"method must be either 'parametric' or 'bootstrap'\")\n\n indirect_effects = [[], []]\n direct_effects = [[], []]\n\n for iter in range(n_rep):\n\n if method == \"parametric\":\n # Realization of outcome model parameters from sampling distribution\n outcome_params = self._simulate_params(outcome_result)\n\n # Realization of mediation model parameters from sampling distribution\n mediation_params = self._simulate_params(mediator_result)\n else:\n outcome_result = self._fit_model(self.outcome_model,\n self._outcome_fit_kwargs, boot=True)\n outcome_params = outcome_result.params\n mediator_result = self._fit_model(self.mediator_model,\n self._mediator_fit_kwargs, boot=True)\n mediation_params = mediator_result.params\n\n # predicted outcomes[tm][te] is the outcome when the\n # mediator is set to tm and the outcome/exposure is set to\n # te.\n predicted_outcomes = [[None, None], [None, None]]\n for tm in 0, 1:\n mex = self._get_mediator_exog(tm)\n gen = self.mediator_model.get_distribution(mediation_params,\n mediator_result.scale,\n exog=mex)\n potential_mediator = gen.rvs(mex.shape[0])\n\n for te in 0, 1:\n oex = self._get_outcome_exog(te, potential_mediator)\n po = self.outcome_model.predict(outcome_params, oex)\n predicted_outcomes[tm][te] = po\n\n for t in 0, 1:\n indirect_effects[t].append(predicted_outcomes[1][t] - predicted_outcomes[0][t])\n direct_effects[t].append(predicted_outcomes[t][1] - predicted_outcomes[t][0])\n\n for t in 0, 1:\n indirect_effects[t] = np.asarray(indirect_effects[t]).T\n direct_effects[t] = np.asarray(direct_effects[t]).T\n\n self.indirect_effects = indirect_effects\n self.direct_effects = direct_effects\n\n rslt = MediationResults(self.indirect_effects, self.direct_effects)\n rslt.method = method\n return rslt\n\n\ndef _pvalue(vec):\n return 2 * min(sum(vec > 0), sum(vec < 0)) / float(len(vec))\n\n\nclass MediationResults(object):\n \"\"\"\n A class for holding the results of a mediation analysis.\n\n The following terms are used in the summary output:\n\n ACME : average causal mediated effect\n ADE : average direct effect\n \"\"\"\n\n def __init__(self, indirect_effects, direct_effects):\n\n self.indirect_effects = indirect_effects\n self.direct_effects = direct_effects\n\n indirect_effects_avg = [None, None]\n direct_effects_avg = [None, None]\n for t in 0, 1:\n indirect_effects_avg[t] = indirect_effects[t].mean(0)\n direct_effects_avg[t] = direct_effects[t].mean(0)\n\n self.ACME_ctrl = indirect_effects_avg[0]\n self.ACME_tx = indirect_effects_avg[1]\n self.ADE_ctrl = direct_effects_avg[0]\n self.ADE_tx = direct_effects_avg[1]\n self.total_effect = (self.ACME_ctrl + self.ACME_tx + self.ADE_ctrl + self.ADE_tx) / 2\n\n self.prop_med_ctrl = self.ACME_ctrl / self.total_effect\n self.prop_med_tx = self.ACME_tx / self.total_effect\n self.prop_med_avg = (self.prop_med_ctrl + self.prop_med_tx) / 2\n\n self.ACME_avg = (self.ACME_ctrl + self.ACME_tx) / 2\n self.ADE_avg = (self.ADE_ctrl + self.ADE_tx) / 2\n\n\n def summary(self, alpha=0.05):\n \"\"\"\n Provide a summary of a mediation analysis.\n \"\"\"\n\n columns = [\"Estimate\", \"Lower CI bound\", \"Upper CI bound\", \"P-value\"]\n index = [\"ACME (control)\", \"ACME (treated)\", \"ADE (control)\", \"ADE (treated)\",\n \"Total effect\", \"Prop. mediated (control)\", \"Prop. mediated (treated)\",\n \"ACME (average)\", \"ADE (average)\", \"Prop. mediated (average)\"]\n smry = pd.DataFrame(columns=columns, index=index)\n\n for i, vec in enumerate([self.ACME_ctrl, self.ACME_tx, self.ADE_ctrl, self.ADE_tx,\n self.total_effect, self.prop_med_ctrl,\n self.prop_med_tx, self.ACME_avg, self.ADE_avg,\n self.prop_med_avg]):\n\n if ((vec is self.prop_med_ctrl) or (vec is self.prop_med_tx) or\n (vec is self.prop_med_avg)):\n smry.iloc[i, 0] = np.median(vec)\n else:\n smry.iloc[i, 0] = vec.mean()\n smry.iloc[i, 1] = np.percentile(vec, 100 * alpha / 2)\n smry.iloc[i, 2] = np.percentile(vec, 100 * (1 - alpha / 2))\n smry.iloc[i, 3] = _pvalue(vec)\n\n if pdc.version < '0.17.0': # pragma: no cover\n smry = smry.convert_objects(convert_numeric=True)\n else: # pragma: no cover\n smry = smry.apply(pd.to_numeric, errors='coerce')\n\n return smry\n" ]
[ [ "numpy.log", "numpy.asarray", "pandas.DatetimeIndex", "pandas.DataFrame", "pandas.datetime" ], [ "numpy.dot", "numpy.arctan2", "numpy.arange", "numpy.eye", "numpy.roots", "numpy.asanyarray", "numpy.repeat", "numpy.zeros", "numpy.nonzero", "numpy.polymul", "numpy.isnan", "numpy.array", "numpy.sum", "numpy.inner", "numpy.triu_indices", "numpy.ones", "numpy.linalg.pinv", "numpy.diag_indices", "numpy.empty" ], [ "numpy.diag", "numpy.testing.assert_approx_equal", "numpy.asarray", "pandas.DataFrame", "numpy.all", "numpy.round", "numpy.random.randn", "numpy.testing.assert_equal", "pandas.read_csv", "numpy.ones_like", "numpy.testing.assert_almost_equal", "numpy.diff", "numpy.column_stack", "numpy.log", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.seed", "numpy.ones", "numpy.testing.assert_array_equal", "numpy.isinf" ], [ "numpy.asarray", "numpy.random.multivariate_normal", "numpy.median", "numpy.percentile", "pandas.DataFrame" ] ]
JiuShiNewBee/mypyfesom2
[ "d84adad116888f83b89813e1a86ce8a233171138" ]
[ "pyfesom2/fesom_plot_tools.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# This file is part of pyfesom2\n# Original code by Dmitry Sidorenko, 2013\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntry:\n from mpl_toolkits.basemap import Basemap\nexcept KeyError:\n # dirty hack to avoid KeyError: 'PROJ_LIB' problem with basemap\n import conda\n import os\n\n conda_file_dir = conda.__file__\n conda_dir = conda_file_dir.split(\"lib\")[0]\n proj_lib = os.path.join(os.path.join(conda_dir, \"share\"), \"proj\")\n os.environ[\"PROJ_LIB\"] = proj_lib\n\n from mpl_toolkits.basemap import Basemap\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom .regriding import fesom2regular\nfrom netCDF4 import Dataset, MFDataset, num2date\nimport matplotlib as mpl\n\n# mpl.use('Qt5Agg')\n# %matplotlib inline\nimport matplotlib.pylab as plt\nimport numpy as np\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom cmocean import cm as cmo\nfrom matplotlib import cm\nimport sys, os\n\n# sys.path.append(os.path.join(os.path.dirname(__file__), \"../\"))\n# import pyfesom2 as pf\nfrom cartopy.util import add_cyclic_point\n\n# from scipy.interpolate import griddata\n# import scipy.spatial.qhull as qhull\n# from scipy.interpolate import LinearNDInterpolator, CloughTocher2DInterpolator\nfrom cartopy.util import add_cyclic_point\n\n# import xarray as xr\nimport shapely.vectorized\nimport joblib\nfrom .transect import *\nimport matplotlib\nfrom .ut import mask_ne\n\n\ndef ftriplot(\n mesh,\n data2,\n contours,\n cmap=[],\n oce=\"global\",\n do_cbar=True,\n mlabels=[0, 0, 0, 0],\n plabels=[0, 0, 0, 0],\n extend=\"both\",\n data_on_elem=0,\n\tblat=45,\n):\n if cmap == []:\n cmap = plt.cm.jet\n if oce == \"global\":\n data2 = np.copy(data2)\n\n elem2 = mesh.elem[mesh.no_cyclic_elem, :]\n\n if data_on_elem == 0:\n d = data2[elem2].mean(axis=1)\n else:\n data2 = data2[mesh.no_cyclic_elem]\n d = data2\n\n k = [i for (i, val) in enumerate(d) if not np.isnan(val)]\n elem2 = elem2[k, :]\n\n if data_on_elem == 1:\n data2 = data2[k]\n\n print(\"ftriplot, number of dummy points:\", len(d) - len(k))\n map = Basemap(projection=\"robin\", lon_0=0)\n x, y = map(mesh.x2, mesh.y2)\n map.drawmapboundary(fill_color=\"0.9\")\n map.drawcoastlines()\n map.drawparallels(np.arange(-90, 90, 30), labels=plabels) # [1,0,0,0]\n map.drawmeridians(\n np.arange(map.lonmin, map.lonmax + 30, 60), labels=mlabels\n ) # [0,0,0,1]\n # data2[data2>900]=np.nan\n eps = (contours.max() - contours.min()) / 50.0\n data2[data2 <= contours.min()] = contours.min() + eps\n data2[data2 >= contours.max()] = contours.max() - eps\n if data_on_elem:\n im = plt.tripcolor(x, y, elem2, facecolors=data2, cmap=cmap)\n else:\n im = plt.tricontourf(\n x, y, elem2, data2, levels=contours, cmap=cmap, extend=extend\n )\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n\n # \t\tn=642155-1\n # \t\tn=83089-1\n # \t\tplt.plot(x[n-1], y[n-1], markersize=10, marker='o')\n elif oce == \"np\":\n data2 = np.copy(data2)\n elem2 = mesh.elem # [mesh.no_cyclic_elem,:]\n d = data2[elem2].mean(axis=1)\n k = [i for (i, val) in enumerate(d) if not np.isnan(val)]\n elem2 = elem2[k, :]\n print(\"ftriplot, number of dummy points:\", len(d) - len(k))\n map = Basemap(projection=\"nplaea\", boundinglat=blat, lon_0=0, resolution=\"l\")\n x, y = map(mesh.x2, mesh.y2)\n map.drawcoastlines()\n map.drawparallels(np.arange(-80.0, 81.0, 20.0), labels=plabels)\n map.drawmeridians(np.arange(-180.0, 181.0, 20.0), labels=mlabels) # [0,1,0,0]\n map.drawmapboundary(fill_color=\"0.9\")\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n # data2[data2>900]=np.nan\n eps = (contours.max() - contours.min()) / 100.0\n data2[data2 <= contours.min()] = contours.min() + eps\n data2[data2 >= contours.max()] = contours.max() - eps\n im = plt.tricontourf(\n x, y, elem2, data2, levels=contours, cmap=cmap, extend=extend\n )\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n elif oce == \"sp\":\n data2 = np.copy(data2)\n elem2 = mesh.elem # [mesh.no_cyclic_elem,:]\n d = data2[elem2].mean(axis=1)\n k = [i for (i, val) in enumerate(d) if not np.isnan(val)]\n elem2 = elem2[k, :]\n print(\"ftriplot, number of dummy points:\", len(d) - len(k))\n #map = Basemap(projection=\"splaea\", boundinglat=-20, lon_0=180, resolution=\"l\")\n map = Basemap(projection='spstere',boundinglat=blat,lon_0=0,resolution='l')\n x, y = map(mesh.x2, mesh.y2)\n map.drawcoastlines()\n map.drawparallels(np.arange(-80.0, 81.0, 20.0), labels=plabels)\n map.drawmeridians(np.arange(-180.0, 181.0, 20.0), labels=mlabels)\n map.drawmapboundary(fill_color=\"0.9\")\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n # data2[data2>900]=np.nan\n eps = (contours.max() - contours.min()) / 100.0\n data2[data2 <= contours.min()] = contours.min() + eps\n data2[data2 >= contours.max()] = contours.max() - eps\n im = plt.tricontourf(\n #x, y, elem2, data2, levels=contours, cmap=cmap, extend=extend\n x, y, data2, levels=contours, cmap=cmap, extend=extend\n )\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n return (im, map, cbar if (do_cbar) else False)\n\n\ndef wplot_xy(xx, yy, zz, contours, cmap=[], do_cbar=True, oce=\"global\"):\n import numpy as np\n import matplotlib.pyplot as plt\n from mpl_toolkits.basemap import Basemap\n from matplotlib.colors import LinearSegmentedColormap\n\n if cmap == []:\n cmap = plt.cm.jet\n eps = (contours.max() - contours.min()) / 100.0\n zz[zz <= contours.min()] = contours.min() + eps\n zz[zz >= contours.max()] = contours.max() - eps\n\n if oce == \"global\":\n\n map = Basemap(projection=\"robin\", lon_0=0, llcrnrlon=-180.0, urcrnrlon=180.0)\n xxx, yyy = map(xx, yy)\n\n map.drawmapboundary(fill_color=\"0.9\")\n map.drawcoastlines()\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n map.drawparallels(np.arange(-90, 90, 45), labels=[1, 0, 0, 0])\n map.drawmeridians([-120.0, 0.0, 120.0], labels=[0, 0, 0, 1])\n im = plt.contourf(xxx, yyy, zz, levels=contours, cmap=cmap, extend=\"both\")\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n return (im, map, cbar)\n else:\n return (im, map)\n elif oce == \"np\":\n map = Basemap(projection=\"nplaea\", boundinglat=45, lon_0=0, resolution=\"l\")\n xxx, yyy = map(xx, yy)\n\n map.drawmapboundary(fill_color=\"0.9\")\n map.drawcoastlines()\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n map.drawparallels(np.arange(-80.0, 81.0, 20.0), labels=[0, 0, 0, 0])\n map.drawmeridians(\n np.arange(-180.0, 181.0, 20.0), labels=[0, 0, 0, 0]\n ) # [0,1,0,0]\n im = plt.contourf(xxx, yyy, zz, levels=contours, cmap=cmap, extend=\"both\")\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n return (im, map, cbar)\n else:\n return (im, map)\n elif oce == \"sp\":\n map = Basemap(projection=\"splaea\", boundinglat=-20, lon_0=180, resolution=\"l\")\n xxx, yyy = map(xx, yy)\n\n map.drawmapboundary(fill_color=\"0.9\")\n map.drawcoastlines()\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n map.drawparallels(np.arange(-80.0, 81.0, 20.0), labels=[0, 0, 0, 0])\n map.drawmeridians(np.arange(-180.0, 181.0, 20.0), labels=[0, 0, 0, 0])\n im = plt.contourf(xxx, yyy, zz, levels=contours, cmap=cmap, extend=\"both\")\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n return (im, map, cbar)\n else:\n return (im, map)\n\n\ndef wplot_yz(y, z, v, contours, cmap=[]):\n import numpy as np\n import matplotlib.pyplot as plt\n from matplotlib.colors import LinearSegmentedColormap\n\n if cmap == []:\n cmap = plt.cm.jet\n\n im = plt.contourf(y, z, v, levels=contours, cmap=cmap, extend=\"both\")\n cbar = plt.colorbar(orientation=\"horizontal\")\n plt.grid()\n return (im, cbar)\n\n\ndef movingaverage(interval, window_size):\n import numpy as np\n\n window = np.ones(int(window_size)) / float(window_size)\n ret = list(interval)\n for i in range(window_size):\n ret = ret + [ret[-1]]\n ret = np.convolve(np.array(ret), window, \"valid\")\n return ret\n\n\ndef plot(\n mesh,\n data,\n cmap=None,\n influence=80000,\n box=[-180, 180, -89, 90],\n res=[360, 180],\n interp=\"nn\",\n mapproj=\"merc\",\n levels=None,\n ptype=\"cf\",\n units=None,\n figsize=(10, 10),\n rowscol=(1, 1),\n titles=None,\n distances_path=None,\n inds_path=None,\n qhull_path=None,\n basepath=None,\n):\n \"\"\"\n Plots interpolated 2d field on the map.\n\n Parameters\n ----------\n mesh: mesh object\n FESOM2 mesh object\n data: np.array or list of np.arrays\n FESOM 2 data on nodes (for u,v,u_ice and v_ice one have to first interpolate from elements to nodes).\n Can be ether one np.ndarray or list of np.ndarrays.\n cmap: str\n Name of the colormap from cmocean package or from the standard matplotlib set.\n By default `Spectral_r` will be used.\n influence: float\n Radius of influence for interpolation, in meters.\n box: list\n Map boundaries in -180 180 -90 90 format that will be used for interpolation (default [-180 180 -89 90]).\n res: list\n Number of points along each axis that will be used for interpolation (for lon and lat),\n default [360, 180].\n interp: str\n Interpolation method. Options are 'nn' (nearest neighbor), 'idist' (inverce distance), \"linear\" and \"cubic\".\n mapproj: str\n Map projection. Options are Mercator (merc), Plate Carree (pc),\n North Polar Stereo (np), South Polar Stereo (sp), Robinson (rob)\n levels: list\n Levels for contour plot in format min max numberOfLevels.\n If not provided min/max values from data will be used with 40 levels.\n ptype: str\n Plot type. Options are contourf (\\'cf\\') and pcolormesh (\\'pcm\\')\n units: str\n Units for color bar.\n figsize: tuple\n figure size in inches\n rowscol: tuple\n number of rows and columns.\n titles: str or list\n Title of the plot (if string) or subplots (if list of strings)\n distances_path : string\n Path to the file with distances. If not provided and dumpfile=True, it will be created.\n inds_path : string\n Path to the file with inds. If not provided and dumpfile=True, it will be created.\n qhull_path : str\n Path to the file with qhull (needed for linear and cubic interpolations). If not provided and dumpfile=True, it will be created.\n basepath: str\n path where to store additional interpolation files. If None (default),\n the path of the mesh will be used.\n \"\"\"\n if not isinstance(data, list):\n data = [data]\n if titles:\n if not isinstance(titles, list):\n titles = [titles]\n if len(titles) != len(data):\n raise ValueError(\n \"The number of titles do not match the number of data fields, please adjust titles (or put to None)\")\n\n if (rowscol[0] * rowscol[1]) < len(data):\n raise ValueError(\n \"Number of rows*columns is smaller than number of data fields, please adjust rowcol.\")\n\n if cmap:\n if isinstance(cmap, (matplotlib.colors.Colormap)):\n colormap = cmap\n elif cmap in cmo.cmapnames:\n colormap = cmo.cmap_d[cmap]\n elif cmap in plt.cm.datad:\n colormap = plt.get_cmap(cmap)\n else:\n raise ValueError(\n \"Get unrecognised name for the colormap `{}`. Colormaps should be from standard matplotlib set of from cmocean package.\".format(\n cmap\n )\n )\n else:\n colormap = plt.get_cmap(\"Spectral_r\")\n\n radius_of_influence = influence\n\n left, right, down, up = box\n lonNumber, latNumber = res\n\n # flf = Dataset(ifile)\n lonreg = np.linspace(left, right, lonNumber)\n latreg = np.linspace(down, up, latNumber)\n lonreg2, latreg2 = np.meshgrid(lonreg, latreg)\n\n interpolated = []\n for datainstance in data:\n\n if interp == \"nn\":\n ofesom = fesom2regular(\n datainstance,\n mesh,\n lonreg2,\n latreg2,\n distances_path=distances_path,\n inds_path=inds_path,\n radius_of_influence=radius_of_influence,\n basepath=basepath,\n )\n interpolated.append(ofesom)\n elif interp == \"idist\":\n ofesom = fesom2regular(\n datainstance,\n mesh,\n lonreg2,\n latreg2,\n distances_path=distances_path,\n inds_path=inds_path,\n radius_of_influence=radius_of_influence,\n how=\"idist\",\n k=5,\n basepath=basepath,\n )\n interpolated.append(ofesom)\n elif interp == \"linear\":\n ofesom = fesom2regular(\n datainstance,\n mesh,\n lonreg2,\n latreg2,\n how=\"linear\",\n qhull_path=qhull_path,\n basepath=basepath,\n )\n interpolated.append(ofesom)\n elif interp == \"cubic\":\n ofesom = fesom2regular(\n datainstance, mesh, lonreg2, latreg2, basepath=basepath, how=\"cubic\"\n )\n interpolated.append(ofesom)\n\n # nearth = cfeature.NaturalEarthFeature(\"physical\", \"ocean\", \"50m\")\n # main_geom = [contour for contour in nearth.geometries()][0]\n\n # mask = shapely.vectorized.contains(main_geom, lonreg2, latreg2)\n # m2 = np.where(((lonreg2 == -180.0) & (latreg2 > 71.5)), True, mask)\n # m2 = np.where(\n # ((lonreg2 == -180.0) & (latreg2 < 70.95) & (latreg2 > 68.96)), True, m2\n # )\n # m2 = np.where(((lonreg2 == -180.0) & (latreg2 < 65.33)), True, m2)\n\n m2 = mask_ne(lonreg2, latreg2)\n\n # m2 = np.where(((lonreg2 == 180.)&(latreg2>71.5)), True, m2)\n # m2 = np.where(((lonreg2 == 180.)&(latreg2<70.95)&(latreg2>68.96)), True, m2)\n # m2 = np.where(((lonreg2 == 180.)&(latreg2<65.33)), True, m2)\n\n for i, interpolated_instance in enumerate(interpolated):\n interpolated[i] = np.ma.masked_where(m2, interpolated[i])\n interpolated[i] = np.ma.masked_equal(interpolated[i], 0)\n\n if mapproj == \"merc\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.Mercator()),\n constrained_layout=True,\n figsize=figsize,\n )\n elif mapproj == \"pc\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.PlateCarree()),\n constrained_layout=True,\n figsize=figsize,\n )\n elif mapproj == \"np\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.NorthPolarStereo()),\n constrained_layout=True,\n figsize=figsize,\n )\n elif mapproj == \"sp\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.SouthPolarStereo()),\n constrained_layout=True,\n figsize=figsize,\n )\n elif mapproj == \"rob\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.Robinson()),\n constrained_layout=True,\n figsize=figsize,\n )\n if isinstance(ax, np.ndarray):\n ax = ax.flatten()\n else:\n ax = [ax]\n\n for ind, data_int in enumerate(interpolated):\n ax[ind].set_extent([left, right, down, up], crs=ccrs.PlateCarree())\n if levels:\n mmin, mmax, nnum = levels\n nnum = int(nnum)\n else:\n mmin = np.nanmin(data_int)\n mmax = np.nanmax(data_int)\n nnum = 40\n data_levels = np.linspace(mmin, mmax, nnum)\n if ptype == \"cf\":\n data_int_cyc, lon_cyc = add_cyclic_point(data_int, coord=lonreg)\n image = ax[ind].contourf(\n lon_cyc,\n latreg,\n data_int_cyc,\n levels=data_levels,\n transform=ccrs.PlateCarree(),\n cmap=colormap,\n extend=\"both\",\n )\n elif ptype == \"pcm\":\n data_int_cyc, lon_cyc = add_cyclic_point(data_int, coord=lonreg)\n image = ax[ind].pcolormesh(\n lon_cyc,\n latreg,\n data_int_cyc,\n vmin=mmin,\n vmax=mmax,\n transform=ccrs.PlateCarree(),\n cmap=colormap,\n )\n else:\n raise ValueError(\"Inknown plot type {}\".format(ptype))\n\n # ax.coastlines(resolution = '50m',lw=0.5)\n ax[ind].add_feature(\n cfeature.GSHHSFeature(levels=[1], scale=\"low\", facecolor=\"lightgray\")\n )\n if titles:\n ax[ind].set_title(titles.pop(0), size=20)\n\n for delind in range(ind + 1, len(ax)):\n fig.delaxes(ax[delind])\n\n cb = fig.colorbar(image, orientation=\"horizontal\", ax=ax, pad=0.01, shrink=0.9)\n\n cb.ax.tick_params(labelsize=15)\n\n if units:\n cb.set_label(units, size=20)\n else:\n pass\n\n return ax\n\n\ndef plot_transect_map(\n lon_start, lat_start, lon_end, lat_end, mesh, npoints=30, view=\"w\", stock_img=False\n):\n # plt.figure(figsize=(10,10))\n lonlat = transect_get_lonlat(\n lon_start, lat_start, lon_end, lat_end, npoints=npoints\n )\n nodes = transect_get_nodes(lonlat, mesh)\n # dist = transect_get_distance(lonlat)\n\n if view == \"w\":\n ax = plt.subplot(111, projection=ccrs.Mercator(central_longitude=0))\n ax.set_extent([180, -180, -80, 90], crs=ccrs.PlateCarree())\n elif view == \"np\":\n ax = plt.subplot(111, projection=ccrs.NorthPolarStereo(central_longitude=0))\n ax.set_extent([180, -180, 60, 90], crs=ccrs.PlateCarree())\n elif view == \"sp\":\n ax = plt.subplot(111, projection=ccrs.SouthPolarStereo(central_longitude=0))\n ax.set_extent([180, -180, -90, -50], crs=ccrs.PlateCarree())\n else:\n raise ValueError(\n 'The \"{}\" is not recognized as valid view option.'.format(view)\n )\n\n ax.scatter(lonlat[:, 0], lonlat[:, 1], s=30, c=\"b\", transform=ccrs.PlateCarree())\n ax.scatter(\n mesh.x2[nodes], mesh.y2[nodes], s=30, c=\"r\", transform=ccrs.PlateCarree()\n )\n if stock_img == True:\n ax.stock_img()\n ax.coastlines(resolution=\"50m\")\n return ax\n\n\ndef plot_transect(\n data3d,\n mesh,\n lon_start,\n lat_start,\n lon_end,\n lat_end,\n npoints=30,\n maxdepth=1000,\n label=\"$^{\\circ}$C\",\n title=\"\",\n levels=None,\n cmap=cm.Spectral_r,\n ax=None,\n dist=None,\n nodes=None,\n ncols=2,\n figsize=None,\n transect_data=[],\n max_distance=1e6,\n):\n\n depth_index = ind_for_depth(maxdepth, mesh)\n if not isinstance(data3d, list):\n if ax is None:\n ax = plt.gca()\n oneplot = True\n else:\n oneplot = False\n if (type(dist) is np.ndarray) and (type(nodes) is np.ndarray):\n if not (type(transect_data) is np.ma.core.MaskedArray):\n lonlat = transect_get_lonlat(\n lon_start, lat_start, lon_end, lat_end, npoints=npoints\n )\n mask2d = transect_get_mask(nodes, mesh, lonlat, max_distance)\n transect_data = transect_get_data(data3d, nodes, mask2d)\n else:\n lonlat = transect_get_lonlat(\n lon_start, lat_start, lon_end, lat_end, npoints=npoints\n )\n nodes = transect_get_nodes(lonlat, mesh)\n dist = transect_get_distance(lonlat)\n # profile = transect_get_profile(nodes, mesh)\n if not (type(transect_data) is np.ma.core.MaskedArray):\n mask2d = transect_get_mask(nodes, mesh, lonlat, max_distance)\n transect_data = transect_get_data(data3d, nodes, mask2d)\n\n image = ax.contourf(\n dist,\n np.abs(mesh.zlev[:depth_index]),\n transect_data[:, :depth_index].T,\n levels=levels,\n cmap=cmap,\n extend=\"both\",\n )\n ax.invert_yaxis()\n ax.set_title(title)\n ax.set_xlabel(\"km\")\n ax.set_ylabel(\"m\")\n\n if oneplot:\n cb = plt.colorbar(image)\n cb.set_label(label)\n\n return image\n else:\n ncols = float(ncols)\n nplots = len(data3d)\n nrows = math.ceil(nplots / ncols)\n ncols = int(ncols)\n nrows = int(nrows)\n nplot = 1\n\n if not figsize:\n figsize = (8 * ncols, 2 * nrows * ncols)\n fig, ax = plt.subplots(nrows, ncols, figsize=figsize)\n ax = ax.flatten()\n for ind, data in enumerate(data3d):\n if (type(dist) is np.ndarray) and (type(nodes) is np.ndarray):\n transect_data = transect_get_data(data, nodes)\n else:\n lonlat = transect_get_lonlat(\n lon_start, lat_start, lon_end, lat_end, npoints=npoints\n )\n nodes = transect_get_nodes(lonlat, mesh)\n dist = transect_get_distance(lonlat)\n # profile = transect_get_profile(nodes, mesh)\n mask2d = transect_get_mask(nodes, mesh, lonlat, max_distance)\n transect_data = transect_get_data(data3d, nodes, max_distance)\n\n image = ax[ind].contourf(\n dist,\n np.abs(mesh.zlev[:depth_index]),\n transect_data[:, :depth_index].T,\n levels=levels,\n cmap=cmap,\n extend=\"both\",\n )\n ax[ind].invert_yaxis()\n if not isinstance(title, list):\n ax[ind].set_title(title)\n else:\n ax[ind].set_title(title[ind])\n ax[ind].set_xlabel(\"km\")\n ax[ind].set_ylabel(\"m\")\n\n cb = fig.colorbar(image, orientation=\"horizontal\", ax=ax[ind], pad=0.11)\n cb.set_label(label)\n for delind in range(ind + 1, len(ax)):\n\n fig.delaxes(ax[delind])\n\n fig.tight_layout()\n" ]
[ [ "numpy.nanmax", "matplotlib.pyplot.contourf", "numpy.linspace", "numpy.nanmin", "matplotlib.pyplot.get_cmap", "numpy.ma.masked_where", "matplotlib.pyplot.tripcolor", "matplotlib.pyplot.gca", "numpy.arange", "matplotlib.pyplot.tricontourf", "numpy.copy", "numpy.isnan", "numpy.ma.masked_equal", "numpy.array", "numpy.meshgrid", "numpy.abs", "matplotlib.pyplot.subplots", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.grid" ] ]
googleinterns/gail-dyn
[ "31c93b12d068dede0dbe69547f0b2e500374f260" ]
[ "third_party/a2c_ppo_acktr/baselines/results_plotter.py" ]
[ "# The MIT License\n#\n# Copyright (c) 2017 OpenAI (http://openai.com)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport numpy as np\nimport matplotlib\n\nmatplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode\n\nimport matplotlib.pyplot as plt\n\nplt.rcParams['svg.fonttype'] = 'none'\n\nfrom third_party.a2c_ppo_acktr.baselines.common import plot_util\n\nX_TIMESTEPS = 'timesteps'\nX_EPISODES = 'episodes'\nX_WALLTIME = 'walltime_hrs'\nY_REWARD = 'reward'\nY_TIMESTEPS = 'timesteps'\nPOSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]\nEPISODES_WINDOW = 100\nCOLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',\n 'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',\n 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']\n\n\ndef rolling_window(a, window):\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n strides = a.strides + (a.strides[-1],)\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\n\ndef window_func(x, y, window, func):\n yw = rolling_window(y, window)\n yw_func = func(yw, axis=-1)\n return x[window - 1:], yw_func\n\n\ndef ts2xy(ts, xaxis, yaxis):\n if xaxis == X_TIMESTEPS:\n x = np.cumsum(ts.l.values)\n elif xaxis == X_EPISODES:\n x = np.arange(len(ts))\n elif xaxis == X_WALLTIME:\n x = ts.t.values / 3600.\n else:\n raise NotImplementedError\n if yaxis == Y_REWARD:\n y = ts.r.values\n elif yaxis == Y_TIMESTEPS:\n y = ts.l.values\n else:\n raise NotImplementedError\n return x, y\n\n\ndef plot_curves(xy_list, xaxis, yaxis, title):\n fig = plt.figure(figsize=(8, 2))\n maxx = max(xy[0][-1] for xy in xy_list)\n minx = 0\n for (i, (x, y)) in enumerate(xy_list):\n color = COLORS[i % len(COLORS)]\n plt.scatter(x, y, s=2)\n x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) # So returns average of last EPISODE_WINDOW episodes\n plt.plot(x, y_mean, color=color)\n plt.xlim(minx, maxx)\n plt.title(title)\n plt.xlabel(xaxis)\n plt.ylabel(yaxis)\n plt.tight_layout()\n fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout())\n plt.grid(True)\n\n\ndef split_by_task(taskpath):\n return taskpath['dirname'].split('/')[-1].split('-')[0]\n\n\ndef plot_results(dirs, num_timesteps=10e6, xaxis=X_TIMESTEPS, yaxis=Y_REWARD, title='', split_fn=split_by_task):\n results = plot_util.load_results(dirs)\n plot_util.plot_results(results, xy_fn=lambda r: ts2xy(r['monitor'], xaxis, yaxis), split_fn=split_fn,\n average_group=True, resample=int(1e6))\n\n\n# Example usage in jupyter-notebook\n# from third_party.a2c_ppo_acktr.baselines.results_plotter import plot_results\n# %matplotlib inline\n# plot_results(\"./log\")\n# Here ./log is a directory containing the monitor.csv files\n\ndef main():\n import argparse\n import os\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--dirs', help='List of log directories', nargs='*', default=['./log'])\n parser.add_argument('--num_timesteps', type=int, default=int(10e6))\n parser.add_argument('--xaxis', help='Varible on X-axis', default=X_TIMESTEPS)\n parser.add_argument('--yaxis', help='Varible on Y-axis', default=Y_REWARD)\n parser.add_argument('--task_name', help='Title of plot', default='Breakout')\n args = parser.parse_args()\n args.dirs = [os.path.abspath(dir) for dir in args.dirs]\n plot_results(args.dirs, args.num_timesteps, args.xaxis, args.yaxis, args.task_name)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "matplotlib.pyplot.scatter", "matplotlib.use", "numpy.cumsum", "numpy.lib.stride_tricks.as_strided", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
vishalbelsare/PySyft
[ "6b2cb4ca3a54e8bb2e61d549bf7773aa955d7468", "fb04404fcfbef82fad1fb47407b35a24e9afb599", "fb04404fcfbef82fad1fb47407b35a24e9afb599" ]
[ "packages/syft/tests/syft/core/pointer/garbage_collection/gc_strategies_test.py", "packages/syft/tests/syft/core/node/domain/domain_test.py", "packages/syft/src/syft/core/tensor/smpc/share_tensor.py" ]
[ "# third party\nimport torch\n\n# syft absolute\nimport syft as sy\nfrom syft.core.pointer.garbage_collection import GCBatched\nfrom syft.core.pointer.garbage_collection import GCSimple\nfrom syft.core.pointer.garbage_collection import GarbageCollection\nfrom syft.core.pointer.garbage_collection import gc_get_default_strategy\nfrom syft.core.pointer.garbage_collection import gc_set_default_strategy\n\n\ndef test_gc_simple_strategy() -> None:\n node = sy.VirtualMachine(name=\"alice\")\n client = node.get_client()\n\n x = torch.tensor([1, 2, 3, 4])\n ptr = x.send(client, pointable=False)\n\n assert len(node.store) == 1\n\n del ptr\n\n assert len(node.store) == 0\n\n\ndef test_gc_batched_strategy_setter() -> None:\n node = sy.VirtualMachine(name=\"alice\")\n client = node.get_client()\n client.gc.gc_strategy = GCBatched(threshold=10)\n\n x = torch.tensor([1, 2, 3, 4])\n\n for _ in range(9):\n x.send(client, pointable=False)\n\n assert len(node.store) == 9\n\n x.send(client, pointable=False)\n\n assert len(node.store) == 0\n\n\ndef test_gc_batched_strategy_gc_constructor() -> None:\n # don't share a VM with other tests\n node = sy.VirtualMachine()\n client = node.get_client()\n client.gc = GarbageCollection(\"gcbatched\", 5)\n\n x = torch.tensor([1, 2, 3, 4])\n\n for _ in range(4):\n x.send(client, pointable=False)\n\n assert len(node.store) == 4\n\n x.send(client, pointable=False)\n\n assert len(node.store) == 0\n\n\ndef test_gc_change_default_gc_strategy(node: sy.VirtualMachine) -> None:\n gc_prev_strategy = gc_get_default_strategy()\n gc_set_default_strategy(\"gcbatched\")\n\n client = node.get_client()\n\n res = isinstance(client.gc.gc_strategy, GCBatched)\n\n # Revert\n gc_set_default_strategy(gc_prev_strategy)\n sy.core.pointer.garbage_collection.GC_DEFAULT_STRATEGY = GCSimple\n\n assert res\n\n\ndef test_gc_batched_delete_at_change() -> None:\n node = sy.VirtualMachine(name=\"alice\")\n client = node.get_client()\n\n # Change the strategy\n client.gc.gc_strategy = GCBatched()\n\n x = torch.tensor([1, 2, 3, 4])\n\n x.send(client, pointable=False)\n x.send(client, pointable=False)\n x.send(client, pointable=False)\n\n assert len(node.store) == 3\n\n # It should for the GCBatched to delete all the cached to-delete objs\n client.gc.gc_strategy = GCSimple()\n\n assert len(node.store) == 0\n", "# third party\nimport numpy as np\nimport pytest\nimport torch as th\n\n# syft absolute\nfrom syft.core.common.message import SyftMessage\nfrom syft.core.common.uid import UID\nfrom syft.core.node.common.node_service.request_receiver.request_receiver_messages import (\n RequestStatus,\n)\nfrom syft.core.node.domain import Domain\n\n\[email protected]\nasync def test_domain_creation() -> None:\n Domain(name=\"test domain\")\n\n\[email protected]\ndef test_domain_serde() -> None:\n\n domain_1 = Domain(name=\"domain 1\")\n domain_1_client = domain_1.get_client()\n\n tensor = th.tensor([1, 2, 3])\n _ = tensor.send(domain_1_client)\n\n\n# MADHAVA: this needs fixing\[email protected]\[email protected]\ndef test_domain_request_pending() -> None:\n domain_1 = Domain(name=\"remote domain\")\n tensor = th.tensor([1, 2, 3])\n\n domain_1_client = domain_1.get_root_client()\n data_ptr_domain_1 = tensor.send(domain_1_client)\n\n domain_2 = Domain(name=\"my domain\")\n\n data_ptr_domain_1.request(\n reason=\"I'd lke to see this pointer\",\n )\n\n requested_object = data_ptr_domain_1.id_at_location\n\n # make request\n message_request_id = domain_1_client.requests.get_request_id_from_object_id(\n object_id=requested_object\n )\n\n # check status\n response = data_ptr_domain_1.check_access(\n node=domain_2, request_id=message_request_id\n )\n\n assert RequestStatus.Pending == response\n\n\n# MADHAVA: this needs fixing\[email protected]\[email protected]\[email protected]\ndef test_domain_request_denied() -> None:\n domain_1 = Domain(name=\"remote domain\")\n tensor = th.tensor([1, 2, 3])\n\n domain_1_client = domain_1.get_root_client()\n data_ptr_domain_1 = tensor.send(domain_1_client)\n\n domain_2 = Domain(name=\"my domain\")\n\n data_ptr_domain_1.request(reason=\"I'd lke to see this pointer\")\n\n requested_object = data_ptr_domain_1.id_at_location\n\n # make request\n message_request_id = domain_1_client.requests.get_request_id_from_object_id(\n object_id=requested_object\n )\n\n # domain 1 client rejects request\n domain_1.requests[0].owner_client_if_available = domain_1_client\n domain_1.requests[0].deny()\n\n # check status\n response = data_ptr_domain_1.check_access(\n node=domain_2, request_id=message_request_id\n )\n\n assert RequestStatus.Rejected == response\n\n\n# MADHAVA: this needs fixing\[email protected]\[email protected]\ndef test_domain_request_accepted() -> None:\n domain_1 = Domain(name=\"remote domain\")\n tensor = th.tensor([1, 2, 3])\n\n domain_1_client = domain_1.get_root_client()\n data_ptr_domain_1 = tensor.send(domain_1_client)\n\n domain_2 = Domain(name=\"my domain\")\n\n data_ptr_domain_1.request(reason=\"I'd lke to see this pointer\")\n\n requested_object = data_ptr_domain_1.id_at_location\n\n message_request_id = domain_1_client.requests.get_request_id_from_object_id(\n object_id=requested_object\n )\n\n domain_1.requests[0].owner_client_if_available = domain_1_client\n domain_1.requests[0].accept()\n\n response = data_ptr_domain_1.check_access(\n node=domain_2, request_id=message_request_id\n )\n\n assert RequestStatus.Accepted == response\n\n\[email protected]\ndef test_domain_is_for_me_exception() -> None:\n domain_1 = Domain(name=\"remote domain\")\n\n with pytest.raises(Exception):\n msg = SyftMessage()\n domain_1.message_is_for_me(msg)\n\n\[email protected]\ndef test_object_exists_on_domain() -> None:\n\n domain = Domain(\"my domain\").get_root_client()\n x = np.array([1, 2, 3, 4]).astype(np.int32)\n uid = UID()\n ptr = np.array([1, 2, 3, 4]).astype(np.int32).send(domain)\n ptr.id_at_location = uid\n assert not ptr.exists\n ptr = x.send(domain, id_at_location_override=uid)\n assert ptr.exists\n", "# future\nfrom __future__ import annotations\n\n# stdlib\nimport functools\nfrom functools import lru_cache\nimport operator\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\n# third party\nfrom google.protobuf.reflection import GeneratedProtocolMessageType\nimport numpy as np\nimport torch\n\n# syft absolute\n# absolute\nimport syft as sy\n\n# relative\nfrom . import utils\nfrom .... import logger\nfrom ....proto.core.tensor.share_tensor_pb2 import ShareTensor as ShareTensor_PB\nfrom ...common.serde.deserialize import _deserialize as deserialize\nfrom ...common.serde.serializable import serializable\nfrom ...common.serde.serialize import _serialize as serialize\nfrom ...smpc.store.crypto_store import CryptoStore\nfrom ..passthrough import PassthroughTensor # type: ignore\nfrom .party import Party\n\nMETHODS_FORWARD_ALL_SHARES = {\n \"repeat\",\n \"copy\",\n \"diagonal\",\n \"flatten\",\n \"transpose\",\n \"partition\",\n \"resize\",\n \"ravel\",\n \"compress\",\n \"reshape\",\n \"squeeze\",\n \"swapaxes\",\n \"__pos__\",\n \"__neg__\",\n \"take\",\n \"choose\",\n \"cumsum\",\n \"trace\",\n}\nINPLACE_OPS = {\"resize\", \"put\"}\nRING_SIZE_TO_OP = {\n 2: {\n \"add\": operator.xor,\n \"sub\": operator.xor,\n \"mul\": operator.and_,\n \"lt\": operator.lt,\n \"gt\": operator.gt,\n \"ge\": operator.ge,\n \"le\": operator.le,\n \"eq\": operator.eq,\n \"ne\": operator.ne,\n },\n 2\n ** 32: {\n \"add\": operator.add,\n \"sub\": operator.sub,\n \"mul\": operator.mul,\n \"lt\": operator.lt,\n \"gt\": operator.gt,\n \"ge\": operator.ge,\n \"le\": operator.le,\n \"eq\": operator.eq,\n \"ne\": operator.ne,\n },\n}\n\nCACHE_CLIENTS: Dict[Party, Any] = {}\n\n\ndef populate_store(*args: List[Any], **kwargs: Dict[Any, Any]) -> None:\n ShareTensor.crypto_store.populate_store(*args, **kwargs) # type: ignore\n\n\n@serializable()\nclass ShareTensor(PassthroughTensor):\n crypto_store = CryptoStore()\n\n __slots__ = (\n \"rank\",\n \"ring_size\",\n \"clients\", # clients connections\n \"min_value\",\n \"max_value\",\n \"generator_przs\",\n # Only ShareTensors with seed_przs could be sent over the wire\n \"seed_przs\",\n \"parties_info\",\n \"nr_parties\",\n )\n\n def __init__(\n self,\n rank: int,\n parties_info: List[Party],\n ring_size: int,\n seed_przs: int = 42,\n clients: Optional[List[Any]] = None,\n value: Optional[Any] = None,\n init_clients: bool = False,\n ) -> None:\n # TODO: Ring size needs to be changed to 2^64 (or other specific sizes)\n self.rank = rank\n self.ring_size = ring_size\n self.nr_parties = len(parties_info)\n self.parties_info = parties_info\n self.clients = []\n if clients is not None:\n self.clients = clients\n elif init_clients: # type: ignore\n self.clients = ShareTensor.login_clients(parties_info)\n\n self.min_value, self.max_value = ShareTensor.compute_min_max_from_ring(\n self.ring_size\n )\n\n # This should be set only in the deserializer\n self.generator_przs = None\n self.seed_przs = seed_przs\n super().__init__(value)\n\n @staticmethod\n def login_clients(parties_info: List[Party]) -> Any:\n clients = []\n for party_info in parties_info:\n party_info.url = party_info.url.replace(\"localhost\", \"docker-host\")\n client = CACHE_CLIENTS.get(party_info, None)\n if client is None:\n # default cache to true, here to prevent multiple logins\n # due to gevent monkey patching, context switch is done during\n # during socket connection initialization.\n CACHE_CLIENTS[party_info] = True\n # TODO: refactor to use a guest account\n client = sy.login( # nosec\n url=party_info.url,\n email=\"[email protected]\",\n password=\"changethis\",\n port=party_info.port,\n verbose=False,\n )\n base_url = client.routes[0].connection.base_url\n client.routes[0].connection.base_url = base_url.replace( # type: ignore\n \"localhost\", \"docker-host\"\n )\n CACHE_CLIENTS[party_info] = client\n clients.append(client)\n return clients\n\n def __getitem__(self, item: Union[str, int, slice]) -> ShareTensor:\n return ShareTensor(\n rank=self.rank,\n parties_info=self.parties_info,\n ring_size=self.ring_size,\n value=self.child[item],\n clients=self.clients,\n )\n\n def copy_tensor(self) -> ShareTensor:\n return ShareTensor(\n value=self.child,\n rank=self.rank,\n parties_info=self.parties_info,\n ring_size=self.ring_size,\n seed_przs=self.seed_przs,\n clients=self.clients,\n )\n\n @staticmethod\n @lru_cache(32)\n def compute_min_max_from_ring(ring_size: int = 2 ** 32) -> Tuple[int, int]:\n if ring_size == 2:\n min_value, max_value = 0, 1\n else:\n min_value = (-ring_size) // 2\n max_value = (ring_size) // 2 - 1\n return min_value, max_value\n\n @staticmethod\n @lru_cache(maxsize=None)\n def get_op(ring_size: int, op_str: str) -> Callable[..., Any]:\n \"\"\"Returns method attribute based on ring_size and op_str.\n Args:\n ring_size (int): Ring size\n op_str (str): Operation string.\n Returns:\n op (Callable[...,Any]): The operation method for the op_str.\n Raises:\n ValueError : If invalid ring size or op_str is given as input.\n \"\"\"\n ops = RING_SIZE_TO_OP.get(ring_size, None)\n\n if ops is None:\n raise ValueError(f\"Do not have operations for ring size {ring_size}\")\n\n op = ops.get(op_str, None)\n if op is None:\n raise ValueError(\n f\"Operator {op_str} does not exist for ring size {ring_size}\"\n )\n\n return op\n\n \"\"\" TODO: Remove this -- we would use generate_przs since the scenario we are testing is that\n the secret is remotly\n @staticmethod\n def generate_shares(secret, nr_shares, ring_size=2 ** 64):\n from .fixed_precision_tensor import FixedPrecisionTensor\n\n if not isinstance(secret, (int, FixedPrecisionTensor)):\n secret = FixedPrecisionTensor(value=secret)\n\n shape = secret.shape\n min_value, max_value = ShareTensor.compute_min_max_from_ring(ring_size)\n\n generator_shares = np.random.default_rng()\n\n random_shares = []\n for i in range(nr_shares):\n random_value = generator_shares.integers(\n low=min_value, high=max_value, size=shape\n )\n fpt_value = FixedPrecisionTensor(value=random_value)\n random_shares.append(fpt_value)\n\n shares_fpt = []\n for i in range(nr_shares):\n if i == 0:\n share = value = random_shares[i]\n elif i < nr_shares - 1:\n share = random_shares[i] - random_shares[i - 1]\n else:\n share = secret - random_shares[i - 1]\n\n shares_fpt.append(share)\n\n # Add the ShareTensor class between them\n shares = []\n for rank, share_fpt in enumerate(shares_fpt):\n share_fpt.child = ShareTensor(rank=rank, value=share_fpt.child)\n shares.append(share_fpt)\n\n return shares\n \"\"\"\n\n @staticmethod\n def generate_przs(\n value: Any,\n shape: Tuple[int, ...],\n rank: int,\n parties_info: List[Party],\n ring_size: int = 2 ** 32,\n seed_przs: Optional[int] = None,\n generator_przs: Optional[Any] = None,\n init_clients: bool = True,\n ) -> \"ShareTensor\":\n\n nr_parties = len(parties_info)\n\n # Try:\n # 1. First get numpy type if secret is numpy and obtain ring size from there\n # 2. If not get the type from the ring size\n\n numpy_type = None\n ring_size_final = None\n\n ring_size_from_type = utils.TYPE_TO_RING_SIZE.get(\n getattr(value, \"dtype\", None), None\n )\n if ring_size_from_type is None:\n logger.warning(\"Could not get ring size from {value}\")\n else:\n ring_size_final = ring_size_from_type\n numpy_type = value.dtype\n\n if numpy_type is None:\n numpy_type = utils.RING_SIZE_TO_TYPE.get(ring_size, None)\n ring_size_final = ring_size\n\n if numpy_type is None:\n raise ValueError(f\"Ring size {ring_size} not known how to be treated\")\n\n # relative\n from ..tensor import Tensor\n\n if (seed_przs is None) == (generator_przs is None):\n raise ValueError(\"Only seed_przs or generator should be populated\")\n\n if value is None:\n value = Tensor(np.zeros(shape, dtype=numpy_type))\n\n # TODO: Sending the seed and having each party generate the shares is not safe\n # Since the parties would know some of the other parties shares (this might not impose a risk\n # when shares are not sent between parties -- like private addition/subtraction, but it might\n # impose for multiplication\n # The secret holder should generate the shares and send them to the other parties\n if generator_przs:\n generator_shares = generator_przs\n else:\n generator_shares = np.random.default_rng(seed_przs)\n\n if isinstance(value.child, ShareTensor):\n value = value.child\n\n share = ShareTensor(\n value=value.child,\n rank=rank,\n parties_info=parties_info,\n seed_przs=seed_przs, # type: ignore #TODO:Inspect as we could pass none.\n init_clients=init_clients,\n ring_size=ring_size_final, # type: ignore\n )\n\n share.generator_przs = generator_shares\n shares = [\n generator_shares.integers(\n low=share.min_value,\n high=share.max_value,\n size=shape,\n endpoint=True,\n dtype=numpy_type,\n )\n for _ in range(nr_parties)\n ]\n\n op = ShareTensor.get_op(ring_size_final, \"sub\")\n przs_share = op(shares[rank], shares[(rank + 1) % nr_parties])\n share.child = op(share.child, przs_share)\n\n return share\n\n @staticmethod\n def generate_przs_on_dp_tensor(\n value: Optional[Any],\n shape: Tuple[int],\n rank: int,\n parties_info: List[Party],\n seed_przs: int,\n share_wrapper: Any,\n ring_size: int = 2 ** 32,\n ) -> PassthroughTensor:\n\n if value is not None:\n share = ShareTensor.generate_przs(\n value=value.child,\n shape=shape,\n rank=rank,\n parties_info=parties_info,\n seed_przs=seed_przs,\n ring_size=ring_size,\n )\n else:\n share = ShareTensor.generate_przs(\n value=value,\n shape=shape,\n rank=rank,\n parties_info=parties_info,\n seed_przs=seed_przs,\n ring_size=ring_size,\n )\n\n share_wrapper.child.child = share\n\n return share_wrapper\n\n @staticmethod\n def sanity_check(\n share: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]\n ) -> None:\n \"\"\"Check type for share\n\n Args:\n share (Union[int, float, ShareTensor, np.ndarray, torch.Tensor]): value to check\n\n Raises:\n ValueError: if type is not supported\n \"\"\"\n if isinstance(share, float):\n raise ValueError(\"Type float not supported yet!\")\n\n if isinstance(share, np.ndarray) and (\n not np.issubdtype(share.dtype, np.integer)\n and share.dtype != np.dtype(\"bool\")\n ):\n raise ValueError(\n f\"NPArray should have type int or bool, but found {share.dtype}\"\n )\n\n if isinstance(share, torch.Tensor) and torch.is_floating_point(share):\n raise ValueError(\"Torch tensor should have type int, but found float\")\n\n def apply_function(\n self, y: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"], op_str: str\n ) -> \"ShareTensor\":\n \"\"\"Apply a given operation.\n\n Args:\n y (Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]): tensor to apply the operator.\n op_str (str): Operator.\n\n Returns:\n ShareTensor: Result of the operation.\n \"\"\"\n\n op = ShareTensor.get_op(self.ring_size, op_str)\n numpy_type = utils.RING_SIZE_TO_TYPE.get(self.ring_size, None)\n if numpy_type is None:\n raise ValueError(f\"Do not know numpy type for ring size {self.ring_size}\")\n\n print(\"=====================================================\")\n print(\"OP\", op, numpy_type, self.ring_size)\n print(\"====================================================\")\n\n if isinstance(y, ShareTensor):\n utils.get_ring_size(self.ring_size, y.ring_size)\n value = op(self.child, y.child)\n else:\n # TODO: Converting y to numpy because doing \"numpy op torch tensor\" raises exception\n value = op(self.child, np.array(y, numpy_type)) # TODO: change to np.int64\n\n res = self.copy_tensor()\n res.child = value\n return res\n\n def add(\n self, y: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]\n ) -> \"ShareTensor\":\n \"\"\"Apply the \"add\" operation between \"self\" and \"y\".\n\n Args:\n y (Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]): self + y\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n\n ShareTensor.sanity_check(y)\n\n new_share = self.apply_function(y, \"add\")\n\n return new_share\n\n def sub(\n self, y: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]\n ) -> \"ShareTensor\":\n \"\"\"Apply the \"sub\" operation between \"self\" and \"y\".\n\n Args:\n y (Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]): self - y\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n\n ShareTensor.sanity_check(y)\n new_share = self.apply_function(y, \"sub\")\n return new_share\n\n def rsub(\n self, y: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]\n ) -> \"ShareTensor\":\n \"\"\"Apply the \"rsub\" operation between \"self\" and \"y\"\n\n Args:\n y (Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]): y - self\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n\n ShareTensor.sanity_check(y)\n new_self = self.mul(-1)\n new_share = new_self.apply_function(y, \"add\")\n return new_share\n\n def mul(\n self, y: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]\n ) -> \"ShareTensor\":\n \"\"\"Apply the \"mul\" operation between \"self\" and \"y\".\n\n Args:\n y (Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]): self * y\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n # if isinstance(y, ShareTensor):\n # raise ValueError(\n # \"We should not reach this point for private multiplication. Only public one\"\n # )\n\n ShareTensor.sanity_check(y)\n new_share = self.apply_function(y, \"mul\")\n return new_share\n\n def matmul(\n self, y: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]\n ) -> \"ShareTensor\":\n \"\"\"Apply the \"matmul\" operation between \"self\" and \"y\".\n\n Args:\n y (Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]): self @ y.\n\n Returns:\n ShareTensor: Result of the operation.\n \"\"\"\n if isinstance(y, ShareTensor):\n raise ValueError(\"Private matmul not supported yet\")\n\n ShareTensor.sanity_check(y)\n new_share = self.apply_function(y, \"matmul\")\n return new_share\n\n def rmatmul(self, y: torch.Tensor) -> \"ShareTensor\":\n \"\"\"Apply the \"rmatmul\" operation between \"y\" and \"self\".\n\n Args:\n y (torch.Tensor): y @ self\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n if isinstance(y, ShareTensor):\n raise ValueError(\"Private matmul not supported yet\")\n\n ShareTensor.sanity_check(y)\n new_share = y.apply_function(self, \"matmul\")\n return new_share\n\n def lt(self, y: Union[ShareTensor, np.ndarray]) -> \"ShareTensor\":\n \"\"\"Apply the \"lt\" operation between \"y\" and \"self\".\n\n Args:\n y (Union[ShareTensor,np.ndarray]): self < y\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n # raise ValueError(\n # \"It should not reach this point since we generate SMPCAction for this\"\n # )\n ShareTensor.sanity_check(y)\n new_share = self.apply_function(y, \"lt\")\n return new_share\n\n def gt(self, y: Union[ShareTensor, np.ndarray]) -> \"ShareTensor\":\n \"\"\"Apply the \"gt\" operation between \"y\" and \"self\".\n\n Args:\n y (Union[ShareTensor,np.ndarray]): self > y\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n # raise ValueError(\n # \"It should not reach this point since we generate SMPCAction for this\"\n # )\n ShareTensor.sanity_check(y)\n new_share = self.apply_function(y, \"gt\")\n return new_share\n\n def ge(self, y: Union[ShareTensor, np.ndarray]) -> \"ShareTensor\":\n \"\"\"Apply the \"ge\" operation between \"y\" and \"self\".\n\n Args:\n y (Union[ShareTensor,np.ndarray]): self >= y\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n # raise ValueError(\n # \"It should not reach this point since we generate SMPCAction for this\"\n # )\n ShareTensor.sanity_check(y)\n new_share = self.apply_function(y, \"ge\")\n return new_share\n\n def le(self, y: Union[ShareTensor, np.ndarray]) -> \"ShareTensor\":\n \"\"\"Apply the \"le\" operation between \"y\" and \"self\".\n\n Args:\n y (Union[ShareTensor,np.ndarray]): self <= y\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n # raise ValueError(\n # \"It should not reach this point since we generate SMPCAction for this\"\n # )\n ShareTensor.sanity_check(y)\n new_share = self.apply_function(y, \"le\")\n return new_share\n\n def ne(self, y: Union[ShareTensor, np.ndarray]) -> \"ShareTensor\":\n \"\"\"Apply the \"ne\" operation between \"y\" and \"self\".\n\n Args:\n y (Union[ShareTensor,np.ndarray]): self != y\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n # raise ValueError(\n # \"It should not reach this point since we generate SMPCAction for this\"\n # )\n ShareTensor.sanity_check(y)\n new_share = self.apply_function(y, \"ne\")\n return new_share\n\n def bit_decomposition(self) -> \"ShareTensor\":\n \"\"\"Apply the \"decomposition\" operation on self\n\n Args:\n None\n\n Returns:\n ShareTensor. Result of the operation.\n \"\"\"\n raise ValueError(\n \"It should not reach this point since we generate SMPCAction for this\"\n )\n\n def eq(self, other: Any) -> bool:\n \"\"\"Equal operator.\n Check if \"self\" is equal with another object given a set of\n attributes to compare.\n Args:\n other (Any): Value to compare.\n Returns:\n bool: True if equal False if not.\n \"\"\"\n # TODO: Rasswanth: Fix later after the comparison operation\n # relative\n # from .... import Tensor\n\n # if (\n # isinstance(self.child, Tensor)\n # and isinstance(other.child, Tensor)\n # and (self.child != other.child).child.any() # type: ignore\n # ):\n # return False\n\n # if (\n # isinstance(self.child, np.ndarray)\n # and isinstance(other.child, np.ndarray)\n # and (self.child != other.child).any()\n # ):\n # return False\n\n # if self.rank != other.rank:\n # return False\n\n # if self.ring_size != other.ring_size:\n # return False\n\n # if self.nr_parties != other.nr_parties:\n # return False\n\n # return True\n\n return self.child == other.child\n\n # TRASK: commenting out because ShareTEnsor doesn't appear to have .session_uuid or .config\n # def div(\n # self, y: Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]\n # ) -> \"ShareTensor\":\n # \"\"\"Apply the \"div\" operation between \"self\" and \"y\".\n #\n # Args:\n # y (Union[int, float, torch.Tensor, np.ndarray, \"ShareTensor\"]): Denominator.\n #\n # Returns:\n # ShareTensor: Result of the operation.\n #\n # Raises:\n # ValueError: If y is not an integer or LongTensor.\n # \"\"\"\n # if not isinstance(y, (int, torch.LongTensor)):\n # raise ValueError(\"Div works (for the moment) only with integers!\")\n #\n # res = ShareTensor(session_uuid=self.session_uuid, config=self.config)\n # # res = self.apply_function(y, \"floordiv\")\n # res.tensor = self.tensor // y\n # return res\n\n def bit_extraction(self, pos: int = 0) -> ShareTensor:\n \"\"\"Extracts the bit at the specified position.\n\n Args:\n pos (int): position to extract bit.\n\n Returns:\n ShareTensor : extracted bits at specific position.\n\n Raises:\n ValueError: If invalid position is provided.\n \"\"\"\n ring_bits = utils.get_nr_bits(self.ring_size)\n if pos < 0 or pos > ring_bits - 1:\n raise ValueError(\n f\"Invalid position for bit_extraction: {pos}, must be in range:[0,{ring_bits-1}]\"\n )\n shape = self.shape\n numpy_type = utils.RING_SIZE_TO_TYPE[self.ring_size]\n # logical shift\n bit_mask = np.ones(shape, dtype=numpy_type) << pos\n value = self.child & bit_mask\n value = value.astype(np.bool_)\n share = self.copy_tensor()\n share.child = value\n return share\n\n @staticmethod\n def hook_method(__self: ShareTensor, method_name: str) -> Callable[..., Any]:\n \"\"\"Hook a framework method.\n\n Args:\n method_name (str): method to hook\n\n Returns:\n A hooked method\n \"\"\"\n\n def method_all_shares(\n _self: ShareTensor, *args: List[Any], **kwargs: Dict[Any, Any]\n ) -> Any:\n\n share = _self.child\n if method_name != \"resize\":\n method = getattr(share, method_name)\n else:\n # Should be modified to remove copy\n # https://stackoverflow.com/questions/23253144/numpy-the-array-doesnt-have-its-own-data\n share = share.copy()\n method = getattr(share, method_name)\n\n if method_name not in INPLACE_OPS:\n new_share = method(*args, **kwargs)\n else:\n method(*args, **kwargs)\n new_share = share\n\n res = _self.copy_tensor()\n\n # TODO : Some operations return np.int64 by default, should modify\n # when we have support for np.int64 or do explicit casting.\n if method_name == \"trace\":\n new_share = np.array(new_share, dtype=np.int32)\n res.child = new_share\n\n return res\n\n return functools.partial(method_all_shares, __self)\n\n def __getattribute__(self, attr_name: str) -> Any:\n if attr_name in METHODS_FORWARD_ALL_SHARES or attr_name in INPLACE_OPS:\n return ShareTensor.hook_method(self, attr_name)\n\n return object.__getattribute__(self, attr_name)\n\n def _object2proto(self) -> ShareTensor_PB:\n proto_init_kwargs = {\n \"rank\": self.rank,\n \"parties_info\": [serialize(party) for party in self.parties_info],\n \"seed_przs\": self.seed_przs,\n \"ring_size\": sy.serialize(self.ring_size, to_bytes=True),\n }\n if isinstance(self.child, np.ndarray):\n proto_init_kwargs[\"array\"] = serialize(self.child)\n elif isinstance(self.child, torch.Tensor):\n proto_init_kwargs[\"array\"] = serialize(np.array(self.child))\n else:\n proto_init_kwargs[\"tensor\"] = serialize(self.child)\n\n return ShareTensor_PB(**proto_init_kwargs)\n\n @staticmethod\n def _proto2object(proto: ShareTensor_PB) -> \"ShareTensor\":\n init_kwargs = {\n \"rank\": proto.rank,\n \"parties_info\": [deserialize(party) for party in proto.parties_info],\n \"seed_przs\": proto.seed_przs,\n \"ring_size\": int(sy.deserialize(proto.ring_size, from_bytes=True)),\n }\n if proto.HasField(\"tensor\"):\n init_kwargs[\"value\"] = deserialize(proto.tensor)\n else:\n init_kwargs[\"value\"] = deserialize(proto.array)\n\n # init_kwargs[\"init_clients\"] = True\n res = ShareTensor(**init_kwargs)\n generator_przs = np.random.default_rng(proto.seed_przs)\n res.generator_przs = generator_przs\n return res\n\n @staticmethod\n def get_protobuf_schema() -> GeneratedProtocolMessageType:\n return ShareTensor_PB\n\n __add__ = add\n __radd__ = add\n __sub__ = sub\n __rsub__ = rsub\n __mul__ = mul\n __rmul__ = mul\n __matmul__ = matmul\n __rmatmul__ = rmatmul\n __lt__ = lt\n __gt__ = gt\n __ge__ = ge\n __le__ = le\n __eq__ = eq\n __ne__ = ne\n" ]
[ [ "torch.tensor" ], [ "numpy.array", "torch.tensor" ], [ "torch.is_floating_point", "numpy.issubdtype", "numpy.dtype", "numpy.ones", "numpy.array", "numpy.zeros", "numpy.random.default_rng" ] ]
millernj/phys202-project
[ "51c56d4bd849a717081c6d686e5abbba225d4334" ]
[ "core.py" ]
[ "import numpy as np\n\nsigmoid = lambda x: 1/(1 +np.exp(-x))\n\ndef perceptron_sigmoid(weights, inputvect):\n return sigmoid(np.dot(np.append(inputvect,[1]), weights))\n\ndef gen_network(size):\n weights= [np.array([[np.random.randn() for _ in range(size[n-1]+1)]\n for _ in range(size[n])]) for n in range(len(size))[1:]]\n return weights\n\ndef propforward(network, inputvect):\n outputs = []\n for layer in network:\n neural_input = inputvect\n output = [perceptron_sigmoid(weights, neural_input) for weights in layer]\n outputs.append(output)\n inputvect = output\n \n outputs = np.array(outputs)\n return [outputs[:-1], outputs[-1]]\n\ndef target_convert(n):\n result = np.zeros((10,))\n result[n]=1\n return result\n\ndef find_deltas_sigmoid(outputs, targets):\n return [output*(1-output)*(output-target) for output, target in zip(outputs, targets)]\n\ndef edit_weights(layer, input_list, deltas, learning_rate): \n for a, inpt in enumerate(input_list):\n layer-=learning_rate/len(input_list)*np.dot(deltas[a].reshape(len(deltas[a]),1),\n np.append(inpt,[1]).reshape(1,len(inpt)+1))\ndef backprob(network, inputvect, targets):\n \n hidden_outputs, outputs = propforward(network, inputvect)\n \n change_in_outputs = find_deltas_sigmoid(outputs, targets)\n \n list_deltas = [[] for _ in range(len(network))]\n list_deltas[-1] = change_in_outputs\n \n for n in range(len(network))[-1:0:-1]:\n delta = change_in_outputs\n change_in_hidden_outputs= [hidden_output*(1-hidden_output)*\n np.dot(delta, np.array([a[i] for a in network[n]]).transpose())\n for i, hidden_output in enumerate(hidden_outputs[n-1])]\n list_deltas[n-1] = change_in_hidden_outputs\n change_in_outputs = change_in_hidden_outputs\n \n return list_deltas\n\ndef stoc_descent(network, input_list, target_list, learning_rate):\n mega_delta = []\n hidden_output = [propforward(network, inpt)[0] for inpt in input_list]\n for inpt, target in zip(input_list, target_list):\n mega_delta.append(backprob(network, inpt, target))\n \n inputs=[]\n inputs.append(input_list)\n for n in range(len(network)):\n inputs.append(hidden_output[n])\n assert len(inputs) == len(network) + 1\n deltas = []\n \n \n\n for n in range(len(network)):\n deltas.append([np.array(delta[n]) for delta in mega_delta])\n \n assert len(deltas)==len(network)\n for n in range(len(network)):\n edit_weights(network[n], inputs[n], deltas[n], learning_rate)\n\ndef output_reader(output):\n assert len(output)==10\n result=[]\n for i, t in enumerate(output):\n if t == max(output) and abs(t-1)<=0.5:\n result.append(i)\n if len(result)==1:\n return result[0]\n else:\n return 0\n\ndef target_convert(n):\n assert n <= 9 and n >= 0\n n = round(n)\n result = np.zeros((10,))\n result[n]=1\n return result\n\ndef train_network(network, training_inputs, training_targets, training_cycles = 30,\n numbers_per_cycle = 1438,batch_size = 15,learning_rate = 1):\n \n train_data_index = np.linspace(0,numbers_per_cycle, numbers_per_cycle + 1)\n target_list = [target_convert(n) for n in training_targets[0:numbers_per_cycle]]\n np.random.seed(1)\n np.random.shuffle(train_data_index)\n for _ in range(training_cycles):\n for n in train_data_index:\n if n+batch_size <= numbers_per_cycle:\n training_data = training_inputs[int(n):int(n+batch_size)]\n target_data = target_list[int(n):int(n+batch_size)]\n else: \n training_data = training_inputs[int(n-batch_size):numbers_per_cycle]\n assert len(training_data)!=0\n target_data = target_list[int(n-batch_size):numbers_per_cycle]\n stoc_descent(network, training_data, target_data, learning_rate)\n \ndef check_net(network, testing_list, target_list, rnge):\n guesses = []\n targets = []\n number_correct = 0\n rnge = range(rnge[0],rnge[1])\n for n in rnge:\n\n guesses.append(output_reader(propforward(network, testing_list[n])[1]))\n targets.append(target_list[n])\n\n for guess, target in zip(guesses, targets):\n if guess == target:\n number_correct+=1\n number_total = len(rnge)\n print(number_correct/number_total*100)\n print(\"%s/%s\" %(str(number_correct), str(number_total)))\n\n" ]
[ [ "numpy.random.seed", "numpy.linspace", "numpy.random.shuffle", "numpy.append", "numpy.random.randn", "numpy.exp", "numpy.array", "numpy.zeros" ] ]
cshreyastech/deep-reinforcement-learning
[ "f2c9a45c76afa65083eed6994785fd1c3e04b1ec", "f2c9a45c76afa65083eed6994785fd1c3e04b1ec" ]
[ "p1_navigation/model.py", "p1_navigation/prioritized_experience_replay_agent.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass QNetwork(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fc1_units (int): Number of nodes in first hidden layer\n fc2_units (int): Number of nodes in second hidden layer\n \"\"\"\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, action_size)\n\n def forward(self, state):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)", "import numpy as np\nimport random\nfrom collections import namedtuple, deque\n\nfrom model import QNetwork\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nBUFFER_SIZE = int(1e5) # replay buffer size\nBATCH_SIZE = 64 # minibatch size\nGAMMA = 0.99 # discount factor\nTAU = 1e-3 # for soft update of target parameters\nLR = 5e-4 # learning rate \nUPDATE_EVERY = 4 # how often to update the network\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass Agent():\n \"\"\"Interacts with and learns from the environment.\"\"\"\n\n def __init__(self, state_size, action_size, seed):\n \"\"\"Initialize an Agent object.\n \n Params\n ======\n state_size (int): dimension of each state\n action_size (int): dimension of each action\n seed (int): random seed\n \"\"\"\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n\n # Q-Network\n self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)\n self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)\n\n # Replay memory\n self.memory = PrioritizedReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)\n # Initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n \n def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % UPDATE_EVERY\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > BATCH_SIZE:\n #experiences = self.memory.sample()\n #self.learn(experiences, GAMMA)\n #self.learn_double_dqn(experiences, GAMMA)\n \n states, actions, rewards, next_states, dones, indices, weights = self.memory.sample()\n self.learn(states, actions, rewards, next_states, dones, indices, weights, GAMMA)\n\n def act(self, state, eps=0.):\n \"\"\"Returns actions for given state as per current policy.\n \n Params\n ======\n state (array_like): current state\n eps (float): epsilon, for epsilon-greedy action selection\n \"\"\"\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))\n\n def learn(self, states, actions, rewards, next_states, dones, indices, weights, gamma):\n \"\"\"Update value parameters using given batch of experience tuples.\n Params\n ======\n experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples \n gamma (float): discount factor\n \"\"\"\n #states, actions, rewards, next_states, dones = experiences\n \n # Get max predicted Q values (for next states) from target model\n Q_local_argmax = self.qnetwork_local(next_states).detach().max(1)[1].unsqueeze(1)\n \n \n Q_targets_next = self.qnetwork_target(next_states).gather(1, Q_local_argmax)\n \n \n # Compute Q targets for current states \n Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))\n\n # Get expected Q values from local model\n Q_expected = self.qnetwork_local(states).gather(1, actions)\n\n # Compute loss\n loss = F.mse_loss(Q_expected, Q_targets) * weights\n priorities = loss + 1e-5\n loss = loss.mean()\n \n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.memory.update_priorities(indices, priorities)\n \n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU) \n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter \n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\n\nclass PrioritizedReplayBuffer:\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self, action_size, buffer_size, batch_size, seed, priorities_alpha=0.6):\n \"\"\"Initialize a ReplayBuffer object.\n Params\n ======\n action_size (int): dimension of each action\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n seed (int): random seed\n \"\"\"\n self.priorities_alpha = priorities_alpha\n self.capacity = buffer_size\n self.memory = []\n self.pos = 0\n self.priorities = np.zeros((buffer_size,), dtype=np.float32)\n \n self.action_size = action_size\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n \n def add(self, state, action, reward, next_state, done):\n max_priority = self.priorities.max() if self.memory else 1.0\n\n if len(self.memory) < self.capacity:\n \"\"\"Add a new experience to memory.\"\"\"\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n else:\n self.memory[self.pos] = (state, action, reward, next_state, done)\n \n self.priorities[self.pos] = max_priority\n self.pos = (self.pos + 1) % self.capacity\n \n def sample(self, beta=0.4):\n if len(self.memory) == self.capacity:\n priorities = self.priorities\n else:\n priorities = self.priorities[:self.pos]\n \n probabilities = priorities ** self.priorities_alpha\n priorities /= priorities.sum()\n \n indices = np.random.choice(len(self.memory), self.batch_size, p=priorities)\n \n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n #experiences = random.sample(self.memory, k=self.batch_size)\n experiences = [self.memory[idx] for idx in indices]\n \n total = len(self.memory)\n weights = (total * probabilities[indices]) ** (-beta)\n weights /= weights.max()\n weights = np.array(weights, dtype=np.float32)\n \n #for e in experiences:\n # print(len(e.state), len(e.next_state))\n \n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)\n actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)\n \n weights = torch.from_numpy(weights).float().to(device)\n \n \n return (states, actions, rewards, next_states, dones, indices, weights)\n \n def update_priorities(self, batch_indices, batch_priorities):\n for index, priority in zip(batch_indices, batch_priorities):\n self.priorities[index] = priority\n\n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)" ]
[ [ "torch.nn.Linear", "torch.manual_seed" ], [ "numpy.arange", "torch.from_numpy", "torch.nn.functional.mse_loss", "torch.no_grad", "torch.cuda.is_available", "numpy.array", "numpy.zeros", "numpy.vstack" ] ]
mkeshita/grove
[ "dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3" ]
[ "grove/tests/jordan_gradient/test_jordan_gradient.py" ]
[ "import numpy as np\nfrom unittest.mock import patch\nfrom pyquil import Program\nfrom pyquil.gates import H, CPHASE, SWAP, MEASURE\n\nfrom grove.alpha.phaseestimation.phase_estimation import controlled\nfrom grove.alpha.jordan_gradient.jordan_gradient import gradient_program, estimate_gradient\n\n\ndef test_gradient_program():\n f_h = 0.25\n precision = 2\n \n trial_prog = gradient_program(f_h, precision)\n \n result_prog = Program([H(0), H(1)])\n\n phase_factor = np.exp(1.0j * 2 * np.pi * abs(f_h))\n U = np.array([[phase_factor, 0],\n [0, phase_factor]])\n q_out = range(precision, precision+1)\n for i in range(precision):\n if i > 0:\n U = np.dot(U, U)\n cU = controlled(U)\n name = \"CONTROLLED-U{0}\".format(2 ** i)\n result_prog.defgate(name, cU)\n result_prog.inst((name, i) + tuple(q_out))\n\n result_prog.inst([SWAP(0, 1), H(0), CPHASE(-1.5707963267948966, 0, 1),\n H(1), MEASURE(0, 0), MEASURE(1, 1)])\n\n assert(trial_prog == result_prog)\n\n\ndef test_estimate_gradient():\n test_perturbation = .25\n test_precision = 3\n test_measurements = 10\n\n with patch(\"pyquil.api.QuantumComputer\") as qc:\n qc.run.return_value = np.asarray([[0, 1, 0, 0] for i in range(test_measurements)])\n\n gradient_estimate = estimate_gradient(test_perturbation, test_precision,\n n_measurements=test_measurements,\n qc=qc)\n\n assert(np.isclose(gradient_estimate, test_perturbation))\n" ]
[ [ "numpy.dot", "numpy.array", "numpy.isclose" ] ]
bioexcel/biobb_ml
[ "f99346ef7885d3a62de47dab738a01db4b27467a", "f99346ef7885d3a62de47dab738a01db4b27467a", "f99346ef7885d3a62de47dab738a01db4b27467a" ]
[ "biobb_ml/classification/classification_predict.py", "biobb_ml/clustering/k_means_coefficient.py", "biobb_ml/classification/support_vector_machine.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"Module containing the ClassificationPredict class and the command line interface.\"\"\"\nimport argparse\nimport pandas as pd\nimport joblib\nfrom biobb_common.generic.biobb_object import BiobbObject\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import linear_model\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import ensemble\nfrom sklearn import svm\nfrom biobb_common.configuration import settings\nfrom biobb_common.tools import file_utils as fu\nfrom biobb_common.tools.file_utils import launchlogger\nfrom biobb_ml.classification.common import *\n\nclass ClassificationPredict(BiobbObject):\n \"\"\"\n | biobb_ml ClassificationPredict\n | Makes predictions from an input dataset and a given classification model.\n | Makes predictions from an input dataset (provided either as a file or as a dictionary property) and a given classification model trained with `DecisionTreeClassifier <https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html>`_, `KNeighborsClassifier <https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html>`_, `LogisticRegression <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`_, `RandomForestClassifier <https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_, `Support Vector Machine <https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>`_ methods.\n\n Args:\n input_model_path (str): Path to the input model. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/classification/model_classification_predict.pkl>`_. Accepted formats: pkl (edam:format_3653).\n input_dataset_path (str) (Optional): Path to the dataset to predict. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/classification/input_classification_predict.csv>`_. Accepted formats: csv (edam:format_3752).\n output_results_path (str): Path to the output results file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_classification_predict.csv>`_. Accepted formats: csv (edam:format_3752).\n properties (dic - Python dictionary object containing the tool parameters, not input/output files):\n * **predictions** (*list*) - (None) List of dictionaries with all values you want to predict targets. It will be taken into account only in case **input_dataset_path** is not provided. Format: [{ 'var1': 1.0, 'var2': 2.0 }, { 'var1': 4.0, 'var2': 2.7 }] for datasets with headers and [[ 1.0, 2.0 ], [ 4.0, 2.7 ]] for datasets without headers.\n * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.\n * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.\n\n Examples:\n This is a use example of how to use the building block from Python::\n\n from biobb_ml.classification.classification_predict import classification_predict\n prop = { \n 'predictions': [\n { \n 'var1': 1.0, \n 'var2': 2.0 \n }, \n { \n 'var1': 4.0, \n 'var2': 2.7 \n }\n ] \n }\n classification_predict(input_model_path='/path/to/myModel.pkl', \n output_results_path='/path/to/newPredictedResults.csv',\n input_dataset_path='/path/to/myDataset.csv', \n properties=prop)\n\n Info:\n * wrapped_software:\n * name: scikit-learn\n * version: >=0.24.2\n * license: BSD 3-Clause\n * ontology:\n * name: EDAM\n * schema: http://edamontology.org/EDAM.owl\n\n \"\"\"\n\n def __init__(self, input_model_path, output_results_path, \n input_dataset_path=None, properties=None, **kwargs) -> None:\n properties = properties or {}\n\n # Call parent class constructor\n super().__init__(properties)\n\n # Input/Output files\n self.io_dict = { \n \"in\": { \"input_model_path\": input_model_path, \"input_dataset_path\": input_dataset_path }, \n \"out\": { \"output_results_path\": output_results_path } \n }\n\n # Properties specific for BB\n self.predictions = properties.get('predictions', [])\n self.properties = properties\n\n # Check the properties\n self.check_properties(properties)\n\n def check_data_params(self, out_log, err_log):\n \"\"\" Checks all the input/output paths and parameters \"\"\"\n self.io_dict[\"in\"][\"input_model_path\"] = check_input_path(self.io_dict[\"in\"][\"input_model_path\"], \"input_model_path\", out_log, self.__class__.__name__)\n self.io_dict[\"out\"][\"output_results_path\"] = check_output_path(self.io_dict[\"out\"][\"output_results_path\"],\"output_results_path\", False, out_log, self.__class__.__name__)\n if self.io_dict[\"in\"][\"input_dataset_path\"]:\n self.io_dict[\"in\"][\"input_dataset_path\"] = check_input_path(self.io_dict[\"in\"][\"input_dataset_path\"], \"input_dataset_path\", out_log, self.__class__.__name__)\n\n @launchlogger\n def launch(self) -> int:\n \"\"\"Execute the :class:`ClassificationPredict <classification.classification_predict.ClassificationPredict>` classification.classification_predict.ClassificationPredict object.\"\"\"\n\n # check input/output paths and parameters\n self.check_data_params(self.out_log, self.err_log)\n\n # Setup Biobb\n if self.check_restart(): return 0\n self.stage_files()\n\n fu.log('Getting model from %s' % self.io_dict[\"in\"][\"input_model_path\"], self.out_log, self.global_log)\n\n with open(self.io_dict[\"in\"][\"input_model_path\"], \"rb\") as f:\n while True:\n try:\n m = joblib.load(f)\n if (isinstance(m, linear_model.LogisticRegression)\n or isinstance(m, KNeighborsClassifier)\n or isinstance(m, DecisionTreeClassifier)\n or isinstance(m, ensemble.RandomForestClassifier)\n or isinstance(m, svm.SVC)):\n new_model = m\n if isinstance(m, StandardScaler):\n scaler = m\n if isinstance(m, dict):\n variables = m\n except EOFError:\n break\n\n if self.io_dict[\"in\"][\"input_dataset_path\"]:\n # load dataset from input_dataset_path file\n fu.log('Getting dataset from %s' % self.io_dict[\"in\"][\"input_dataset_path\"], self.out_log, self.global_log)\n if 'columns' in variables['independent_vars']:\n labels = getHeader(self.io_dict[\"in\"][\"input_dataset_path\"])\n skiprows = 1\n else:\n labels = None\n skiprows = None\n new_data_table = pd.read_csv(self.io_dict[\"in\"][\"input_dataset_path\"], header = None, sep=\"\\s+|;|:|,|\\t\", engine=\"python\", skiprows=skiprows, names=labels)\n else:\n # load dataset from properties\n if 'columns' in variables['independent_vars']:\n # sorting self.properties in the correct order given by variables['independent_vars']['columns']\n index_map = { v: i for i, v in enumerate(variables['independent_vars']['columns']) }\n predictions = []\n for i, pred in enumerate(self.predictions):\n sorted_pred = sorted(pred.items(), key=lambda pair: index_map[pair[0]])\n predictions.append(dict(sorted_pred))\n new_data_table = pd.DataFrame(data=get_list_of_predictors(predictions),columns=get_keys_of_predictors(predictions))\n else:\n predictions = self.predictions\n new_data_table = pd.DataFrame(data=predictions) \n\n if variables['scale']: \n fu.log('Scaling dataset', self.out_log, self.global_log)\n new_data = scaler.transform(new_data_table)\n else: new_data = new_data_table\n\n p = new_model.predict_proba(new_data)\n\n # if headers, create target column with proper label\n if self.io_dict[\"in\"][\"input_dataset_path\"] or 'columns' in variables['independent_vars']:\n clss = ' (' + ', '.join(str(x) for x in variables['target_values']) + ')'\n new_data_table[variables['target']['column'] + ' ' + clss] = tuple(map(tuple, p))\n else:\n new_data_table[len(new_data_table.columns)] = tuple(map(tuple, p))\n fu.log('Predicting results\\n\\nPREDICTION RESULTS\\n\\n%s\\n' % new_data_table, self.out_log, self.global_log)\n fu.log('Saving results to %s' % self.io_dict[\"out\"][\"output_results_path\"], self.out_log, self.global_log)\n new_data_table.to_csv(self.io_dict[\"out\"][\"output_results_path\"], index = False, header=True, float_format='%.3f')\n\n return 0\n\ndef classification_predict(input_model_path: str, output_results_path: str, input_dataset_path: str = None, properties: dict = None, **kwargs) -> int:\n \"\"\"Execute the :class:`ClassificationPredict <classification.classification_predict.ClassificationPredict>` class and\n execute the :meth:`launch() <classification.classification_predict.ClassificationPredict.launch>` method.\"\"\"\n\n return ClassificationPredict(input_model_path=input_model_path, \n output_results_path=output_results_path, \n input_dataset_path=input_dataset_path,\n properties=properties, **kwargs).launch()\n\ndef main():\n \"\"\"Command line execution of this building block. Please check the command line documentation.\"\"\"\n parser = argparse.ArgumentParser(description=\"Makes predictions from an input dataset and a given classification model.\", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_model_path', required=True, help='Path to the input model. Accepted formats: pkl.')\n required_args.add_argument('--output_results_path', required=True, help='Path to the output results file. Accepted formats: csv.')\n parser.add_argument('--input_dataset_path', required=False, help='Path to the dataset to predict. Accepted formats: csv.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n classification_predict(input_model_path=args.input_model_path, \n output_results_path=args.output_results_path, \n input_dataset_path=args.input_dataset_path,\n properties=properties)\n\nif __name__ == '__main__':\n main()\n\n", "#!/usr/bin/env python3\n\n\"\"\"Module containing the KMeansCoefficient class and the command line interface.\"\"\"\nimport argparse\nfrom biobb_common.generic.biobb_object import BiobbObject\nfrom sklearn.preprocessing import StandardScaler\nfrom biobb_common.configuration import settings\nfrom biobb_common.tools import file_utils as fu\nfrom biobb_common.tools.file_utils import launchlogger\nfrom biobb_ml.clustering.common import *\n\n\nclass KMeansCoefficient(BiobbObject):\n \"\"\"\n | biobb_ml KMeansCoefficient\n | Wrapper of the scikit-learn KMeans method. \n | Clusters a given dataset and calculates best K coefficient. Visit the `KMeans documentation page <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_ in the sklearn official website for further information. \n\n Args:\n input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/clustering/dataset_k_means_coefficient.csv>`_. Accepted formats: csv (edam:format_3752).\n output_results_path (str): Table with WCSS (elbow method), Gap and Silhouette coefficients for each cluster. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/clustering/ref_output_results_k_means_coefficient.csv>`_. Accepted formats: csv (edam:format_3752).\n output_plot_path (str) (Optional): Path to the elbow method and gap statistics plot. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/clustering/ref_output_plot_k_means_coefficient.png>`_. Accepted formats: png (edam:format_3603).\n properties (dic - Python dictionary object containing the tool parameters, not input/output files):\n * **predictors** (*dict*) - ({}) Features or columns from your dataset you want to use for fitting. You can specify either a list of columns names from your input dataset, a list of columns indexes or a range of columns indexes. Formats: { \"columns\": [\"column1\", \"column2\"] } or { \"indexes\": [0, 2, 3, 10, 11, 17] } or { \"range\": [[0, 20], [50, 102]] }. In case of mulitple formats, the first one will be picked.\n * **max_clusters** (*int*) - (6) [1~100|1] Maximum number of clusters to use by default for kmeans queries.\n * **random_state_method** (*int*) - (5) [1~1000|1] Determines random number generation for centroid initialization.\n * **scale** (*bool*) - (False) Whether or not to scale the input dataset.\n * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.\n * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.\n\n Examples:\n This is a use example of how to use the building block from Python::\n\n from biobb_ml.clustering.k_means_coefficient import k_means_coefficient\n prop = { \n 'predictors': { \n 'columns': [ 'column1', 'column2', 'column3' ] \n }, \n 'max_clusters': 3 \n }\n k_means_coefficient(input_dataset_path='/path/to/myDataset.csv', \n output_results_path='/path/to/newTable.csv', \n output_plot_path='/path/to/newPlot.png', \n properties=prop)\n \n Info:\n * wrapped_software:\n * name: scikit-learn KMeans\n * version: >=0.24.2\n * license: BSD 3-Clause\n * ontology:\n * name: EDAM\n * schema: http://edamontology.org/EDAM.owl\n\n \"\"\"\n\n def __init__(self, input_dataset_path, output_results_path, \n output_plot_path=None, properties=None, **kwargs) -> None:\n properties = properties or {}\n\n # Call parent class constructor\n super().__init__(properties)\n\n # Input/Output files\n self.io_dict = { \n \"in\": { \"input_dataset_path\": input_dataset_path }, \n \"out\": { \"output_results_path\": output_results_path, \"output_plot_path\": output_plot_path } \n }\n\n # Properties specific for BB\n self.predictors = properties.get('predictors', {})\n self.max_clusters = properties.get('max_clusters', 6)\n self.random_state_method = properties.get('random_state_method', 5)\n self.scale = properties.get('scale', False)\n self.properties = properties\n\n # Check the properties\n self.check_properties(properties)\n\n def check_data_params(self, out_log, err_log):\n \"\"\" Checks all the input/output paths and parameters \"\"\"\n self.io_dict[\"in\"][\"input_dataset_path\"] = check_input_path(self.io_dict[\"in\"][\"input_dataset_path\"], \"input_dataset_path\", out_log, self.__class__.__name__)\n self.io_dict[\"out\"][\"output_results_path\"] = check_output_path(self.io_dict[\"out\"][\"output_results_path\"],\"output_results_path\", False, out_log, self.__class__.__name__)\n if self.io_dict[\"out\"][\"output_plot_path\"]:\n self.io_dict[\"out\"][\"output_plot_path\"] = check_output_path(self.io_dict[\"out\"][\"output_plot_path\"],\"output_plot_path\", True, out_log, self.__class__.__name__)\n\n @launchlogger\n def launch(self) -> int:\n \"\"\"Execute the :class:`KMeansCoefficient <clustering.k_means_coefficient.KMeansCoefficient>` clustering.k_means_coefficient.KMeansCoefficient object.\"\"\"\n\n # check input/output paths and parameters\n self.check_data_params(self.out_log, self.err_log)\n\n # Setup Biobb\n if self.check_restart(): return 0\n self.stage_files()\n \n # load dataset\n fu.log('Getting dataset from %s' % self.io_dict[\"in\"][\"input_dataset_path\"], self.out_log, self.global_log)\n if 'columns' in self.predictors:\n labels = getHeader(self.io_dict[\"in\"][\"input_dataset_path\"])\n skiprows = 1\n else:\n labels = None\n skiprows = None\n data = pd.read_csv(self.io_dict[\"in\"][\"input_dataset_path\"], header = None, sep=\"\\s+|;|:|,|\\t\", engine=\"python\", skiprows=skiprows, names=labels)\n\n # the features are the predictors\n predictors = getIndependentVars(self.predictors, data, self.out_log, self.__class__.__name__)\n fu.log('Predictors: [%s]' % (getIndependentVarsList(self.predictors)), self.out_log, self.global_log)\n\n # Hopkins test\n H = hopkins(predictors)\n fu.log('Performing Hopkins test over dataset. H = %f' % H, self.out_log, self.global_log)\n\n # scale dataset\n if self.scale: \n fu.log('Scaling dataset', self.out_log, self.global_log)\n scaler = StandardScaler()\n predictors = scaler.fit_transform(predictors)\n\n # calculate wcss for each cluster\n fu.log('Calculating Within-Clusters Sum of Squares (WCSS) for each %d clusters' % self.max_clusters, self.out_log, self.global_log)\n wcss = getWCSS('kmeans', self.max_clusters, predictors)\n \n # wcss table\n wcss_table = pd.DataFrame(data={'cluster': np.arange(1, self.max_clusters + 1), 'WCSS': wcss})\n fu.log('Calculating WCSS for each cluster\\n\\nWCSS TABLE\\n\\n%s\\n' % wcss_table.to_string(index=False), self.out_log, self.global_log)\n\n # get best cluster elbow method\n best_k, elbow_index = get_best_K(wcss)\n fu.log('Optimal number of clusters according to the Elbow Method is %d' % best_k, self.out_log, self.global_log)\n\n # calculate gap\n best_g, gap = getGap('kmeans', predictors, nrefs=5, maxClusters=(self.max_clusters + 1))\n\n # gap table\n gap_table = pd.DataFrame(data={'cluster': np.arange(1, self.max_clusters + 1), 'GAP': gap['gap']})\n fu.log('Calculating Gap for each cluster\\n\\nGAP TABLE\\n\\n%s\\n' % gap_table.to_string(index=False), self.out_log, self.global_log)\n\n # log best cluster gap method\n fu.log('Optimal number of clusters according to the Gap Statistics Method is %d' % best_g, self.out_log, self.global_log)\n\n # calculate silhouette\n silhouette_list, s_list = getSilhouetthe(method = 'kmeans', X = predictors, max_clusters = self.max_clusters, random_state = self.random_state_method)\n\n # silhouette table\n silhouette_table = pd.DataFrame(data={'cluster': np.arange(1, self.max_clusters + 1), 'SILHOUETTE': silhouette_list})\n fu.log('Calculating Silhouette for each cluster\\n\\nSILHOUETTE TABLE\\n\\n%s\\n' % silhouette_table.to_string(index=False), self.out_log, self.global_log)\n\n # get best cluster silhouette method\n key = silhouette_list.index(max(silhouette_list))\n best_s = s_list.__getitem__(key)\n fu.log('Optimal number of clusters according to the Silhouette Method is %d' % best_s, self.out_log, self.global_log)\n\n # save results table\n results_table = pd.DataFrame(data={'method': ['elbow', 'gap', 'silhouette'], 'coefficient': [wcss[elbow_index], max(gap['gap']) ,max(silhouette_list)], 'clusters': [best_k, best_g ,best_s]})\n fu.log('Gathering results\\n\\nRESULTS TABLE\\n\\n%s\\n' % results_table.to_string(index=False), self.out_log, self.global_log)\n fu.log('Saving results to %s' % self.io_dict[\"out\"][\"output_results_path\"], self.out_log, self.global_log)\n results_table.to_csv(self.io_dict[\"out\"][\"output_results_path\"], index = False, header=True, float_format='%.3f')\n\n # wcss plot\n if self.io_dict[\"out\"][\"output_plot_path\"]: \n fu.log('Saving methods plot to %s' % self.io_dict[\"out\"][\"output_plot_path\"], self.out_log, self.global_log)\n plot = plotKmeansTrain(self.max_clusters, wcss, gap['gap'], silhouette_list, best_k, best_g, best_s)\n plot.savefig(self.io_dict[\"out\"][\"output_plot_path\"], dpi=150)\n\n return 0\n\ndef k_means_coefficient(input_dataset_path: str, output_results_path: str, output_plot_path: str = None, properties: dict = None, **kwargs) -> int:\n \"\"\"Execute the :class:`KMeansCoefficient <clustering.k_means_coefficient.KMeansCoefficient>` class and\n execute the :meth:`launch() <clustering.k_means_coefficient.KMeansCoefficient.launch>` method.\"\"\"\n\n return KMeansCoefficient(input_dataset_path=input_dataset_path, \n output_results_path=output_results_path, \n output_plot_path=output_plot_path,\n properties=properties, **kwargs).launch()\n\ndef main():\n \"\"\"Command line execution of this building block. Please check the command line documentation.\"\"\"\n parser = argparse.ArgumentParser(description=\"Wrapper of the scikit-learn KMeans method.\", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')\n required_args.add_argument('--output_results_path', required=True, help='Table with WCSS (elbow method), Gap and Silhouette coefficients for each cluster. Accepted formats: csv.')\n parser.add_argument('--output_plot_path', required=False, help='Path to the elbow and gap methods plot. Accepted formats: png.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n k_means_coefficient(input_dataset_path=args.input_dataset_path,\n output_results_path=args.output_results_path, \n output_plot_path=args.output_plot_path, \n properties=properties)\n\nif __name__ == '__main__':\n main()\n", "#!/usr/bin/env python3\n\n\"\"\"Module containing the SupportVectorMachine class and the command line interface.\"\"\"\nimport argparse\nimport joblib\nfrom biobb_common.generic.biobb_object import BiobbObject\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, classification_report, log_loss\nfrom sklearn import svm\nfrom biobb_common.configuration import settings\nfrom biobb_common.tools import file_utils as fu\nfrom biobb_common.tools.file_utils import launchlogger\nfrom biobb_ml.classification.common import *\n\n\nclass SupportVectorMachine(BiobbObject):\n \"\"\"\n | biobb_ml SupportVectorMachine\n | Wrapper of the scikit-learn SupportVectorMachine method.\n | Trains and tests a given dataset and saves the model and scaler. Visit the `SupportVectorMachine documentation page <https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>`_ in the sklearn official website for further information. \n\n Args:\n input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/classification/dataset_support_vector_machine.csv>`_. Accepted formats: csv (edam:format_3752).\n output_model_path (str): Path to the output model file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_model_support_vector_machine.pkl>`_. Accepted formats: pkl (edam:format_3653).\n output_test_table_path (str) (Optional): Path to the test table file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_test_support_vector_machine.csv>`_. Accepted formats: csv (edam:format_3752).\n output_plot_path (str) (Optional): Path to the statistics plot. If target is binary it shows confusion matrix, distributions of the predicted probabilities of both classes and ROC curve. If target is non-binary it shows confusion matrix. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_plot_support_vector_machine.png>`_. Accepted formats: png (edam:format_3603).\n properties (dic - Python dictionary object containing the tool parameters, not input/output files):\n * **independent_vars** (*dict*) - ({}) Independent variables you want to train from your dataset. You can specify either a list of columns names from your input dataset, a list of columns indexes or a range of columns indexes. Formats: { \"columns\": [\"column1\", \"column2\"] } or { \"indexes\": [0, 2, 3, 10, 11, 17] } or { \"range\": [[0, 20], [50, 102]] }. In case of mulitple formats, the first one will be picked.\n * **target** (*dict*) - ({}) Dependent variable you want to predict from your dataset. You can specify either a column name or a column index. Formats: { \"column\": \"column3\" } or { \"index\": 21 }. In case of mulitple formats, the first one will be picked.\n * **weight** (*dict*) - ({}) Weight variable from your dataset. You can specify either a column name or a column index. Formats: { \"column\": \"column3\" } or { \"index\": 21 }. In case of mulitple formats, the first one will be picked.\n * **kernel** (*string*) - (\"rbf\") Specifies the kernel type to be used in the algorithm. Values: linear (It's used when the data is Linearly separable; that is; it can be separated using a single Line), poly (Represents the similarity of vectors -training samples- in a feature space over polynomials of the original variables; allowing learning of non-linear models), rbf (It's a function whose value depends on the distance from the origin or from some point), sigmoid (In Neural Networks field the bipolar sigmoid function is often used as an activation function for artificial neurons), precomputed (Precomputed kernel).\n * **normalize_cm** (*bool*) - (False) Whether or not to normalize the confusion matrix.\n * **random_state_method** (*int*) - (5) [1~1000|1] Controls the randomness of the estimator.\n * **random_state_train_test** (*int*) - (5) [1~1000|1] Controls the shuffling applied to the data before applying the split.\n * **test_size** (*float*) - (0.2) [0~1|0.05] Represents the proportion of the dataset to include in the test split. It should be between 0.0 and 1.0.\n * **scale** (*bool*) - (False) Whether or not to scale the input dataset.\n * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.\n * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.\n\n Examples:\n This is a use example of how to use the building block from Python::\n\n from biobb_ml.classification.support_vector_machine import support_vector_machine\n prop = { \n 'independent_vars': { \n 'columns': [ 'column1', 'column2', 'column3' ] \n }, \n 'target': { \n 'column': 'target' \n }, \n 'kernel': 'rbf', \n 'test_size': 0.2 \n }\n support_vector_machine(input_dataset_path='/path/to/myDataset.csv', \n output_model_path='/path/to/newModel.pkl', \n output_test_table_path='/path/to/newTable.csv', \n output_plot_path='/path/to/newPlot.png', \n properties=prop)\n\n\n Info:\n * wrapped_software:\n * name: scikit-learn SupportVectorMachine\n * version: >=0.24.2\n * license: BSD 3-Clause\n * ontology:\n * name: EDAM\n * schema: http://edamontology.org/EDAM.owl\n\n \"\"\"\n\n def __init__(self, input_dataset_path, output_model_path, \n output_test_table_path=None, output_plot_path=None, properties=None, **kwargs) -> None:\n properties = properties or {}\n\n # Call parent class constructor\n super().__init__(properties)\n\n # Input/Output files\n self.io_dict = { \n \"in\": { \"input_dataset_path\": input_dataset_path }, \n \"out\": { \"output_model_path\": output_model_path, \"output_test_table_path\": output_test_table_path, \"output_plot_path\": output_plot_path } \n }\n\n # Properties specific for BB\n self.independent_vars = properties.get('independent_vars', {})\n self.target = properties.get('target', {})\n self.weight = properties.get('weight', {})\n self.kernel = properties.get('kernel', 'rbf')\n self.normalize_cm = properties.get('normalize_cm', False)\n self.random_state_method = properties.get('random_state_method', 5)\n self.random_state_train_test = properties.get('random_state_train_test', 5)\n self.test_size = properties.get('test_size', 0.2)\n self.scale = properties.get('scale', False)\n self.properties = properties\n\n # Check the properties\n self.check_properties(properties)\n\n def check_data_params(self, out_log, err_log):\n \"\"\" Checks all the input/output paths and parameters \"\"\"\n self.io_dict[\"in\"][\"input_dataset_path\"] = check_input_path(self.io_dict[\"in\"][\"input_dataset_path\"], \"input_dataset_path\", out_log, self.__class__.__name__)\n self.io_dict[\"out\"][\"output_model_path\"] = check_output_path(self.io_dict[\"out\"][\"output_model_path\"],\"output_model_path\", False, out_log, self.__class__.__name__)\n if self.io_dict[\"out\"][\"output_test_table_path\"]:\n self.io_dict[\"out\"][\"output_test_table_path\"] = check_output_path(self.io_dict[\"out\"][\"output_test_table_path\"],\"output_test_table_path\", True, out_log, self.__class__.__name__)\n if self.io_dict[\"out\"][\"output_plot_path\"]:\n self.io_dict[\"out\"][\"output_plot_path\"] = check_output_path(self.io_dict[\"out\"][\"output_plot_path\"],\"output_plot_path\", True, out_log, self.__class__.__name__)\n\n @launchlogger\n def launch(self) -> int:\n \"\"\"Execute the :class:`SupportVectorMachine <classification.support_vector_machine.SupportVectorMachine>` classification.support_vector_machine.SupportVectorMachine object.\"\"\"\n\n # check input/output paths and parameters\n self.check_data_params(self.out_log, self.err_log)\n\n # Setup Biobb\n if self.check_restart(): return 0\n self.stage_files()\n\n # load dataset\n fu.log('Getting dataset from %s' % self.io_dict[\"in\"][\"input_dataset_path\"], self.out_log, self.global_log)\n if 'columns' in self.independent_vars:\n labels = getHeader(self.io_dict[\"in\"][\"input_dataset_path\"])\n skiprows = 1\n else:\n labels = None\n skiprows = None\n data = pd.read_csv(self.io_dict[\"in\"][\"input_dataset_path\"], header = None, sep=\"\\s+|;|:|,|\\t\", engine=\"python\", skiprows=skiprows, names=labels)\n\n # declare inputs, targets and weights\n # the inputs are all the independent variables\n X = getIndependentVars(self.independent_vars, data, self.out_log, self.__class__.__name__)\n fu.log('Independent variables: [%s]' % (getIndependentVarsList(self.independent_vars)), self.out_log, self.global_log)\n # target\n y = getTarget(self.target, data, self.out_log, self.__class__.__name__)\n fu.log('Target: %s' % (getTargetValue(self.target)), self.out_log, self.global_log)\n # weights\n if self.weight:\n w = getWeight(self.weight, data, self.out_log, self.__class__.__name__)\n fu.log('Weight column provided', self.out_log, self.global_log)\n\n # train / test split\n fu.log('Creating train and test sets', self.out_log, self.global_log)\n arrays_sets = (X, y)\n # if user provide weights\n if self.weight:\n arrays_sets = arrays_sets + (w,)\n X_train, X_test, y_train, y_test, w_train, w_test = train_test_split(*arrays_sets, test_size=self.test_size, random_state = self.random_state_train_test)\n else:\n X_train, X_test, y_train, y_test = train_test_split(*arrays_sets, test_size=self.test_size, random_state = self.random_state_train_test)\n\n # scale dataset\n if self.scale: \n fu.log('Scaling dataset', self.out_log, self.global_log)\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n\n # classification\n fu.log('Training dataset applying support vector machine', self.out_log, self.global_log)\n model = svm.SVC(kernel = self.kernel, probability = True, random_state = self.random_state_method)\n arrays_fit = (X_train, y_train)\n # if user provide weights\n if self.weight:\n arrays_fit = arrays_fit + (w_train,)\n\n model.fit(*arrays_fit)\n\n y_hat_train = model.predict(X_train)\n # classification report\n cr_train = classification_report(y_train, y_hat_train)\n # log loss\n yhat_prob_train = model.predict_proba(X_train)\n l_loss_train = log_loss(y_train, yhat_prob_train)\n fu.log('Calculating scores and report for training dataset\\n\\nCLASSIFICATION REPORT\\n\\n%s\\nLog loss: %.3f\\n' % (cr_train, l_loss_train), self.out_log, self.global_log)\n\n # compute confusion matrix\n cnf_matrix_train = confusion_matrix(y_train, y_hat_train)\n np.set_printoptions(precision=2)\n if self.normalize_cm:\n cnf_matrix_train = cnf_matrix_train.astype('float') / cnf_matrix_train.sum(axis=1)[:, np.newaxis]\n cm_type = 'NORMALIZED CONFUSION MATRIX'\n else:\n cm_type = 'CONFUSION MATRIX, WITHOUT NORMALIZATION'\n\n fu.log('Calculating confusion matrix for training dataset\\n\\n%s\\n\\n%s\\n' % (cm_type, cnf_matrix_train), self.out_log, self.global_log)\n\n if self.scale:\n X_test = scaler.transform(X_test)\n y_hat_test = model.predict(X_test)\n test_table = pd.DataFrame()\n y_hat_prob = model.predict_proba(X_test)\n y_hat_prob = np.around(y_hat_prob, decimals=2)\n y_hat_prob = tuple(map(tuple, y_hat_prob))\n test_table['P' + np.array2string(np.unique(y_test))] = y_hat_prob\n y_test = y_test.reset_index(drop=True)\n test_table['target'] = y_test\n fu.log('Testing\\n\\nTEST DATA\\n\\n%s\\n' % test_table, self.out_log, self.global_log)\n\n # classification report\n cr = classification_report(y_test, y_hat_test)\n # log loss\n yhat_prob = model.predict_proba(X_test)\n l_loss = log_loss(y_test, yhat_prob)\n fu.log('Calculating scores and report for testing dataset\\n\\nCLASSIFICATION REPORT\\n\\n%s\\nLog loss: %.3f\\n' % (cr, l_loss), self.out_log, self.global_log)\n\n # compute confusion matrix\n cnf_matrix = confusion_matrix(y_test, y_hat_test)\n np.set_printoptions(precision=2)\n if self.normalize_cm:\n cnf_matrix = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis]\n cm_type = 'NORMALIZED CONFUSION MATRIX'\n else:\n cm_type = 'CONFUSION MATRIX, WITHOUT NORMALIZATION'\n\n fu.log('Calculating confusion matrix for testing dataset\\n\\n%s\\n\\n%s\\n' % (cm_type, cnf_matrix), self.out_log, self.global_log)\n\n if(self.io_dict[\"out\"][\"output_test_table_path\"]): \n fu.log('Saving testing data to %s' % self.io_dict[\"out\"][\"output_test_table_path\"], self.out_log, self.global_log)\n test_table.to_csv(self.io_dict[\"out\"][\"output_test_table_path\"], index = False, header=True)\n\n # plot \n if self.io_dict[\"out\"][\"output_plot_path\"]: \n vs = y.unique().tolist()\n vs.sort()\n if len(vs) > 2:\n plot = plotMultipleCM(cnf_matrix_train, cnf_matrix, self.normalize_cm, vs)\n fu.log('Saving confusion matrix plot to %s' % self.io_dict[\"out\"][\"output_plot_path\"], self.out_log, self.global_log)\n else:\n plot = plotBinaryClassifier(model, yhat_prob_train, yhat_prob, cnf_matrix_train, cnf_matrix, y_train, y_test, normalize=self.normalize_cm)\n fu.log('Saving binary classifier evaluator plot to %s' % self.io_dict[\"out\"][\"output_plot_path\"], self.out_log, self.global_log)\n plot.savefig(self.io_dict[\"out\"][\"output_plot_path\"], dpi=150)\n\n # save model, scaler and parameters\n tv = y.unique().tolist()\n tv.sort()\n variables = {\n 'target': self.target,\n 'independent_vars': self.independent_vars,\n 'scale': self.scale,\n 'target_values': tv\n }\n fu.log('Saving model to %s' % self.io_dict[\"out\"][\"output_model_path\"], self.out_log, self.global_log)\n with open(self.io_dict[\"out\"][\"output_model_path\"], \"wb\") as f:\n joblib.dump(model, f)\n if self.scale: joblib.dump(scaler, f)\n joblib.dump(variables, f)\n\n return 0\n\ndef support_vector_machine(input_dataset_path: str, output_model_path: str, output_test_table_path: str = None, output_plot_path: str = None, properties: dict = None, **kwargs) -> int:\n \"\"\"Execute the :class:`SupportVectorMachine <classification.support_vector_machine.SupportVectorMachine>` class and\n execute the :meth:`launch() <classification.support_vector_machine.SupportVectorMachine.launch>` method.\"\"\"\n\n return SupportVectorMachine(input_dataset_path=input_dataset_path, \n output_model_path=output_model_path, \n output_test_table_path=output_test_table_path, \n output_plot_path=output_plot_path,\n properties=properties, **kwargs).launch()\n\ndef main():\n \"\"\"Command line execution of this building block. Please check the command line documentation.\"\"\"\n parser = argparse.ArgumentParser(description=\"Wrapper of the scikit-learn SupportVectorMachine method.\", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')\n required_args.add_argument('--output_model_path', required=True, help='Path to the output model file. Accepted formats: pkl.')\n parser.add_argument('--output_test_table_path', required=False, help='Path to the test table file. Accepted formats: csv.')\n parser.add_argument('--output_plot_path', required=False, help='Path to the statistics plot. If target is binary it shows confusion matrix, distributions of the predicted probabilities of both classes and ROC curve. If target is non-binary it shows confusion matrix. Accepted formats: png.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n support_vector_machine(input_dataset_path=args.input_dataset_path,\n output_model_path=args.output_model_path, \n output_test_table_path=args.output_test_table_path, \n output_plot_path=args.output_plot_path, \n properties=properties)\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ], [ "sklearn.preprocessing.StandardScaler" ], [ "sklearn.model_selection.train_test_split", "sklearn.metrics.confusion_matrix", "sklearn.metrics.log_loss", "sklearn.svm.SVC", "sklearn.preprocessing.StandardScaler", "sklearn.metrics.classification_report" ] ]
YuXie96/time
[ "8539d55d2449c712f54331b06720ab7faf3593df" ]
[ "evaluate.py" ]
[ "import torch\nfrom utils.data_util import char_list\nfrom utils.train_util import data_init, model_init\n\n\ndef eval_total_acc(config):\n # initialize data loaders\n test_loader = data_init(mode='test', use_velocity=config.use_velocity,\n t_scale=config.t_scale, batch_s=config.batch_s,\n context=config.context, context_w=config.context_w)\n # initialize model\n inp_size = 3\n if config.context is not None:\n inp_size += config.context_w\n model = model_init(mode='test', model_type=config.rnn_type,\n input_size=inp_size, hidden_size=config.hidden_size,\n save_path=config.save_path)\n\n correct = 0\n total = 0\n with torch.no_grad():\n for test_data, test_label in test_loader:\n hid = model.init_hidden(config.batch_s)\n for t_ in range(test_data.shape[0]):\n output, hid = model(test_data[t_], hid)\n if t_ >= test_data.shape[0] - config.readout_steps:\n _, predicted = torch.max(output.detach(), 1)\n total += test_label.size(0)\n correct += (predicted == test_label).sum().item()\n # Accuracy and loss of the network on the test set:\n test_acc = 100 * correct / total\n print(\"Test Accuracy is: {:.1f} %\".format(test_acc))\n return test_acc\n\n\ndef eval_class_acc(config):\n # initialize data loaders\n test_loader = data_init(mode='test', use_velocity=config.use_velocity,\n t_scale=config.t_scale, batch_s=config.batch_s,\n context=config.context, context_w=config.context_w)\n\n # initialize model\n inp_size = 3\n if config.context is not None:\n inp_size += config.context_w\n model = model_init(mode='test', model_type=config.rnn_type,\n input_size=inp_size, hidden_size=config.hidden_size,\n save_path=config.save_path)\n\n # prepare to count predictions for each class\n classes = char_list\n correct_pred = {classname: 0 for classname in classes}\n total_pred = {classname: 0 for classname in classes}\n\n # again no gradients needed\n with torch.no_grad():\n for test_data, test_label in test_loader:\n hid = model.init_hidden(config.batch_s)\n for t_ in range(test_data.shape[0]):\n output, hid = model(test_data[t_], hid)\n if t_ >= test_data.shape[0] - config.readout_steps:\n _, predictions = torch.max(output.detach(), 1)\n # collect the correct predictions for each class\n for lab, prediction in zip(test_label, predictions):\n if lab == prediction:\n correct_pred[classes[lab]] += 1\n total_pred[classes[lab]] += 1\n\n # print accuracy for each class\n for classname, correct_count in correct_pred.items():\n accuracy = 100 * float(correct_count) / total_pred[classname]\n print(\"Accuracy for class {} is: {:.1f} %\".format(classname, accuracy))\n" ]
[ [ "torch.no_grad" ] ]
nuannuanhcc/mmdetection
[ "26162d7fd49d2b87ead2bf5d9d8fbabd2b8933bb" ]
[ "mmdet/apis/runner/base_runner.py" ]
[ "# Copyright (c) Open-MMLab. All rights reserved.\nimport logging\nimport os.path as osp\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nfrom torch.optim import Optimizer\n\nimport mmcv\nfrom mmcv.parallel import is_module_wrapper\nfrom .checkpoint import load_checkpoint\nfrom .dist_utils import get_dist_info\nfrom .hooks import HOOKS, Hook, IterTimerHook\nfrom .log_buffer import LogBuffer\nfrom .priority import get_priority\nfrom .utils import get_time_str\n\n\nclass BaseRunner(metaclass=ABCMeta):\n \"\"\"The base class of Runner, a training helper for PyTorch.\n\n All subclasses should implement the following APIs:\n\n - ``run()``\n - ``train()``\n - ``val()``\n - ``save_checkpoint()``\n\n Args:\n model (:obj:`torch.nn.Module`): The model to be run.\n batch_processor (callable): A callable method that process a data\n batch. The interface of this method should be\n `batch_processor(model, data, train_mode) -> dict`\n optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an\n optimizer (in most cases) or a dict of optimizers (in models that\n requires more than one optimizer, e.g., GAN).\n work_dir (str, optional): The working directory to save checkpoints\n and logs. Defaults to None.\n logger (:obj:`logging.Logger`): Logger used during training.\n Defaults to None. (The default value is just for backward\n compatibility)\n meta (dict | None): A dict records some import information such as\n environment info and seed, which will be logged in logger hook.\n Defaults to None.\n max_epochs (int, optional): Total training epochs.\n max_iters (int, optional): Total training iterations.\n \"\"\"\n\n def __init__(self,\n model,\n batch_processor=None,\n optimizer=None,\n work_dir=None,\n logger=None,\n meta=None,\n max_iters=None,\n max_epochs=None):\n if batch_processor is not None:\n if not callable(batch_processor):\n raise TypeError('batch_processor must be callable, '\n f'but got {type(batch_processor)}')\n warnings.warn('batch_processor is deprecated, please implement '\n 'train_step() and val_step() in the model instead.')\n # raise an error is `batch_processor` is not None and\n # `model.train_step()` exists.\n if is_module_wrapper(model):\n _model = model.module\n else:\n _model = model\n if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):\n raise RuntimeError(\n 'batch_processor and model.train_step()/model.val_step() '\n 'cannot be both available.')\n else:\n assert hasattr(model, 'train_step')\n\n # check the type of `optimizer`\n if isinstance(optimizer, dict):\n for name, optim in optimizer.items():\n if not isinstance(optim, Optimizer):\n raise TypeError(\n f'optimizer must be a dict of torch.optim.Optimizers, '\n f'but optimizer[\"{name}\"] is a {type(optim)}')\n elif not isinstance(optimizer, Optimizer) and optimizer is not None:\n raise TypeError(\n f'optimizer must be a torch.optim.Optimizer object '\n f'or dict or None, but got {type(optimizer)}')\n\n # check the type of `logger`\n if not isinstance(logger, logging.Logger):\n raise TypeError(f'logger must be a logging.Logger object, '\n f'but got {type(logger)}')\n\n # check the type of `meta`\n if meta is not None and not isinstance(meta, dict):\n raise TypeError(\n f'meta must be a dict or None, but got {type(meta)}')\n\n self.model = model\n self.batch_processor = batch_processor\n self.optimizer = optimizer\n self.logger = logger\n self.meta = meta\n\n # create work_dir\n if mmcv.is_str(work_dir):\n self.work_dir = osp.abspath(work_dir)\n mmcv.mkdir_or_exist(self.work_dir)\n elif work_dir is None:\n self.work_dir = None\n else:\n raise TypeError('\"work_dir\" must be a str or None')\n\n # get model name from the model class\n if hasattr(self.model, 'module'):\n self._model_name = self.model.module.__class__.__name__\n else:\n self._model_name = self.model.__class__.__name__\n\n self._rank, self._world_size = get_dist_info()\n self.timestamp = get_time_str()\n self.mode = None\n self._hooks = []\n self._epoch = 0\n self._iter = 0\n self._inner_iter = 0\n\n if max_epochs is not None and max_iters is not None:\n raise ValueError(\n 'Only one of `max_epochs` or `max_iters` can be set.')\n\n self._max_epochs = max_epochs\n self._max_iters = max_iters\n # TODO: Redesign LogBuffer, it is not flexible and elegant enough\n self.log_buffer = LogBuffer()\n\n @property\n def model_name(self):\n \"\"\"str: Name of the model, usually the module class name.\"\"\"\n return self._model_name\n\n @property\n def rank(self):\n \"\"\"int: Rank of current process. (distributed training)\"\"\"\n return self._rank\n\n @property\n def world_size(self):\n \"\"\"int: Number of processes participating in the job.\n (distributed training)\"\"\"\n return self._world_size\n\n @property\n def hooks(self):\n \"\"\"list[:obj:`Hook`]: A list of registered hooks.\"\"\"\n return self._hooks\n\n @property\n def epoch(self):\n \"\"\"int: Current epoch.\"\"\"\n return self._epoch\n\n @property\n def iter(self):\n \"\"\"int: Current iteration.\"\"\"\n return self._iter\n\n @property\n def inner_iter(self):\n \"\"\"int: Iteration in an epoch.\"\"\"\n return self._inner_iter\n\n @property\n def max_epochs(self):\n \"\"\"int: Maximum training epochs.\"\"\"\n return self._max_epochs\n\n @property\n def max_iters(self):\n \"\"\"int: Maximum training iterations.\"\"\"\n return self._max_iters\n\n @abstractmethod\n def train(self):\n pass\n\n @abstractmethod\n def val(self):\n pass\n\n @abstractmethod\n def run(self, data_loaders, workflow, **kwargs):\n pass\n\n @abstractmethod\n def save_checkpoint(self,\n out_dir,\n filename_tmpl,\n save_optimizer=True,\n meta=None,\n create_symlink=True):\n pass\n\n def current_lr(self):\n \"\"\"Get current learning rates.\n\n Returns:\n list[float] | dict[str, list[float]]: Current learning rates of all\n param groups. If the runner has a dict of optimizers, this\n method will return a dict.\n \"\"\"\n if isinstance(self.optimizer, torch.optim.Optimizer):\n lr = [group['lr'] for group in self.optimizer.param_groups]\n elif isinstance(self.optimizer, dict):\n lr = dict()\n for name, optim in self.optimizer.items():\n lr[name] = [group['lr'] for group in optim.param_groups]\n else:\n raise RuntimeError(\n 'lr is not applicable because optimizer does not exist.')\n return lr\n\n def current_momentum(self):\n \"\"\"Get current momentums.\n\n Returns:\n list[float] | dict[str, list[float]]: Current momentums of all\n param groups. If the runner has a dict of optimizers, this\n method will return a dict.\n \"\"\"\n\n def _get_momentum(optimizer):\n momentums = []\n for group in optimizer.param_groups:\n if 'momentum' in group.keys():\n momentums.append(group['momentum'])\n elif 'betas' in group.keys():\n momentums.append(group['betas'][0])\n else:\n momentums.append(0)\n return momentums\n\n if self.optimizer is None:\n raise RuntimeError(\n 'momentum is not applicable because optimizer does not exist.')\n elif isinstance(self.optimizer, torch.optim.Optimizer):\n momentums = _get_momentum(self.optimizer)\n elif isinstance(self.optimizer, dict):\n momentums = dict()\n for name, optim in self.optimizer.items():\n momentums[name] = _get_momentum(optim)\n return momentums\n\n def register_hook(self, hook, priority='NORMAL'):\n \"\"\"Register a hook into the hook list.\n\n The hook will be inserted into a priority queue, with the specified\n priority (See :class:`Priority` for details of priorities).\n For hooks with the same priority, they will be triggered in the same\n order as they are registered.\n\n Args:\n hook (:obj:`Hook`): The hook to be registered.\n priority (int or str or :obj:`Priority`): Hook priority.\n Lower value means higher priority.\n \"\"\"\n assert isinstance(hook, Hook)\n if hasattr(hook, 'priority'):\n raise ValueError('\"priority\" is a reserved attribute for hooks')\n priority = get_priority(priority)\n hook.priority = priority\n # insert the hook to a sorted list\n inserted = False\n for i in range(len(self._hooks) - 1, -1, -1):\n if priority >= self._hooks[i].priority:\n self._hooks.insert(i + 1, hook)\n inserted = True\n break\n if not inserted:\n self._hooks.insert(0, hook)\n\n def register_hook_from_cfg(self, hook_cfg):\n \"\"\"Register a hook from its cfg.\n\n Args:\n hook_cfg (dict): Hook config. It should have at least keys 'type'\n and 'priority' indicating its type and priority.\n\n Notes:\n The specific hook class to register should not use 'type' and\n 'priority' arguments during initialization.\n \"\"\"\n hook_cfg = hook_cfg.copy()\n priority = hook_cfg.pop('priority', 'NORMAL')\n hook = mmcv.build_from_cfg(hook_cfg, HOOKS)\n self.register_hook(hook, priority=priority)\n\n def call_hook(self, fn_name):\n \"\"\"Call all hooks.\n\n Args:\n fn_name (str): The function name in each hook to be called, such as\n \"before_train_epoch\".\n \"\"\"\n for hook in self._hooks:\n getattr(hook, fn_name)(self)\n\n def load_checkpoint(self, filename, map_location='cpu', strict=False):\n self.logger.info('load checkpoint from %s', filename)\n return load_checkpoint(self.model, filename, map_location, strict,\n self.logger)\n\n def resume(self,\n checkpoint,\n resume_optimizer=True,\n map_location='default'):\n if map_location == 'default':\n if torch.cuda.is_available():\n device_id = torch.cuda.current_device()\n checkpoint = self.load_checkpoint(\n checkpoint,\n map_location=lambda storage, loc: storage.cuda(device_id))\n else:\n checkpoint = self.load_checkpoint(checkpoint)\n else:\n checkpoint = self.load_checkpoint(\n checkpoint, map_location=map_location)\n\n self._epoch = checkpoint['meta']['epoch']\n self._iter = checkpoint['meta']['iter']\n if 'optimizer' in checkpoint and resume_optimizer:\n if isinstance(self.optimizer, Optimizer):\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n elif isinstance(self.optimizer, dict):\n for k in self.optimizer.keys():\n self.optimizer[k].load_state_dict(\n checkpoint['optimizer'][k])\n else:\n raise TypeError(\n 'Optimizer should be dict or torch.optim.Optimizer '\n f'but got {type(self.optimizer)}')\n\n self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)\n\n def register_lr_hook(self, lr_config):\n if isinstance(lr_config, dict):\n assert 'policy' in lr_config\n policy_type = lr_config.pop('policy')\n # If the type of policy is all in lower case, e.g., 'cyclic',\n # then its first letter will be capitalized, e.g., to be 'Cyclic'.\n # This is for the convenient usage of Lr updater.\n # Since this is not applicable for `\n # CosineAnnealingLrUpdater`,\n # the string will not be changed if it contains capital letters.\n if policy_type == policy_type.lower():\n policy_type = policy_type.title()\n hook_type = policy_type + 'LrUpdaterHook'\n lr_config['type'] = hook_type\n hook = mmcv.build_from_cfg(lr_config, HOOKS)\n else:\n hook = lr_config\n self.register_hook(hook)\n\n def register_momentum_hook(self, momentum_config):\n if momentum_config is None:\n return\n if isinstance(momentum_config, dict):\n assert 'policy' in momentum_config\n policy_type = momentum_config.pop('policy')\n # If the type of policy is all in lower case, e.g., 'cyclic',\n # then its first letter will be capitalized, e.g., to be 'Cyclic'.\n # This is for the convenient usage of momentum updater.\n # Since this is not applicable for\n # `CosineAnnealingMomentumUpdater`,\n # the string will not be changed if it contains capital letters.\n if policy_type == policy_type.lower():\n policy_type = policy_type.title()\n hook_type = policy_type + 'MomentumUpdaterHook'\n momentum_config['type'] = hook_type\n hook = mmcv.build_from_cfg(momentum_config, HOOKS)\n else:\n hook = momentum_config\n self.register_hook(hook)\n\n def register_optimizer_hook(self, optimizer_config):\n if optimizer_config is None:\n return\n if isinstance(optimizer_config, dict):\n optimizer_config.setdefault('type', 'OptimizerHook')\n hook = mmcv.build_from_cfg(optimizer_config, HOOKS)\n else:\n hook = optimizer_config\n self.register_hook(hook)\n\n def register_checkpoint_hook(self, checkpoint_config):\n if checkpoint_config is None:\n return\n if isinstance(checkpoint_config, dict):\n checkpoint_config.setdefault('type', 'CheckpointHook')\n hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)\n else:\n hook = checkpoint_config\n self.register_hook(hook)\n\n def register_logger_hooks(self, log_config):\n if log_config is None:\n return\n log_interval = log_config['interval']\n for info in log_config['hooks']:\n logger_hook = mmcv.build_from_cfg(\n info, HOOKS, default_args=dict(interval=log_interval))\n self.register_hook(logger_hook, priority='VERY_LOW')\n\n def register_training_hooks(self,\n lr_config,\n optimizer_config=None,\n checkpoint_config=None,\n log_config=None,\n momentum_config=None):\n \"\"\"Register default hooks for training.\n\n Default hooks include:\n\n - LrUpdaterHook\n - MomentumUpdaterHook\n - OptimizerStepperHook\n - CheckpointSaverHook\n - IterTimerHook\n - LoggerHook(s)\n \"\"\"\n self.register_lr_hook(lr_config)\n self.register_momentum_hook(momentum_config)\n self.register_optimizer_hook(optimizer_config)\n self.register_checkpoint_hook(checkpoint_config)\n self.register_hook(IterTimerHook())\n self.register_logger_hooks(log_config)\n" ]
[ [ "torch.cuda.is_available", "torch.cuda.current_device" ] ]
ZJULearning/SRDet
[ "12d9302fad742f64ca3c8e05cd601d7dca1bf81e", "12d9302fad742f64ca3c8e05cd601d7dca1bf81e" ]
[ "mmdet3d/ops/furthest_point_sample/points_sampler.py", "mmdet3d/models/model_utils/utils.py" ]
[ "import torch\nfrom mmcv.runner import force_fp32\nfrom torch import nn as nn\nfrom typing import List\n\nfrom .furthest_point_sample import (furthest_point_sample,\n furthest_point_sample_with_dist)\nfrom .utils import calc_square_dist\n\n\ndef get_sampler_type(sampler_type):\n \"\"\"Get the type and mode of points sampler.\n\n Args:\n sampler_type (str): The type of points sampler.\n The valid value are \"D-FPS\", \"F-FPS\", or \"FS\".\n\n Returns:\n class: Points sampler type.\n \"\"\"\n if sampler_type == 'D-FPS':\n sampler = DFPS_Sampler\n elif sampler_type == 'F-FPS':\n sampler = FFPS_Sampler\n elif sampler_type == 'FS':\n sampler = FS_Sampler\n elif sampler_type == 'RS':\n sampler = RS_Sampler\n else:\n raise ValueError('Only \"sampler_type\" of \"D-FPS\", \"F-FPS\", or \"FS\"'\n f' are supported, got {sampler_type}')\n\n return sampler\n\n\nclass Points_Sampler(nn.Module):\n \"\"\"Points sampling.\n\n Args:\n num_point (list[int]): Number of sample points.\n fps_mod_list (list[str]: Type of FPS method, valid mod\n ['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].\n F-FPS: using feature distances for FPS.\n D-FPS: using Euclidean distances of points for FPS.\n FS: using F-FPS and D-FPS simultaneously.\n fps_sample_range_list (list[int]): Range of points to apply FPS.\n Default: [-1].\n \"\"\"\n\n def __init__(self,\n num_point: List[int],\n fps_mod_list: List[str] = ['D-FPS'],\n fps_sample_range_list: List[int] = [-1]):\n super(Points_Sampler, self).__init__()\n # FPS would be applied to different fps_mod in the list,\n # so the length of the num_point should be equal to\n # fps_mod_list and fps_sample_range_list.\n assert len(num_point) == len(fps_mod_list) == len(\n fps_sample_range_list)\n self.num_point = num_point\n self.fps_sample_range_list = fps_sample_range_list\n self.samplers = nn.ModuleList()\n for fps_mod in fps_mod_list:\n self.samplers.append(get_sampler_type(fps_mod)())\n self.fp16_enabled = False\n\n @force_fp32()\n def forward(self, points_xyz, features):\n \"\"\"forward.\n\n Args:\n points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.\n features (Tensor): (B, C, N) Descriptors of the features.\n\n Return:\n Tensor: (B, npoint, sample_num) Indices of sampled points.\n \"\"\"\n indices = []\n last_fps_end_index = 0\n\n for fps_sample_range, sampler, npoint in zip(\n self.fps_sample_range_list, self.samplers, self.num_point):\n assert fps_sample_range < points_xyz.shape[1]\n\n if fps_sample_range == -1:\n sample_points_xyz = points_xyz[:, last_fps_end_index:]\n sample_features = features[:, :, last_fps_end_index:] if \\\n features is not None else None\n else:\n sample_points_xyz = \\\n points_xyz[:, last_fps_end_index:fps_sample_range]\n sample_features = \\\n features[:, :, last_fps_end_index:fps_sample_range] if \\\n features is not None else None\n\n fps_idx = sampler(sample_points_xyz.contiguous(), sample_features,\n npoint)\n\n indices.append(fps_idx + last_fps_end_index)\n last_fps_end_index += fps_sample_range\n indices = torch.cat(indices, dim=1)\n\n return indices\n\n\nclass DFPS_Sampler(nn.Module):\n \"\"\"DFPS_Sampling.\n\n Using Euclidean distances of points for FPS.\n \"\"\"\n\n def __init__(self):\n super(DFPS_Sampler, self).__init__()\n\n def forward(self, points, features, npoint):\n \"\"\"Sampling points with D-FPS.\"\"\"\n fps_idx = furthest_point_sample(points.contiguous(), npoint)\n return fps_idx\n\n\nclass FFPS_Sampler(nn.Module):\n \"\"\"FFPS_Sampler.\n\n Using feature distances for FPS.\n \"\"\"\n\n def __init__(self):\n super(FFPS_Sampler, self).__init__()\n\n def forward(self, points, features, npoint):\n \"\"\"Sampling points with F-FPS.\"\"\"\n assert features is not None, \\\n 'feature input to FFPS_Sampler should not be None'\n features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)\n features_dist = calc_square_dist(\n features_for_fps, features_for_fps, norm=False)\n fps_idx = furthest_point_sample_with_dist(features_dist, npoint)\n return fps_idx\n\n\nclass FS_Sampler(nn.Module):\n \"\"\"FS_Sampling.\n\n Using F-FPS and D-FPS simultaneously.\n \"\"\"\n\n def __init__(self):\n super(FS_Sampler, self).__init__()\n\n def forward(self, points, features, npoint):\n \"\"\"Sampling points with FS_Sampling.\"\"\"\n assert features is not None, \\\n 'feature input to FS_Sampler should not be None'\n features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)\n features_dist = calc_square_dist(\n features_for_fps, features_for_fps, norm=False)\n fps_idx_ffps = furthest_point_sample_with_dist(features_dist, npoint)\n fps_idx_dfps = furthest_point_sample(points, npoint)\n fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1)\n return fps_idx\n\n\nclass RS_Sampler(nn.Module):\n\n def __init__(self):\n super(RS_Sampler, self).__init__()\n\n def forward(self, points, features, npoint):\n fps_idx = []\n for _ in range(points.shape[0]):\n fps_idx.append(torch.randperm(points.shape[1], dtype=torch.int32)[:npoint])\n fps_idx = torch.stack(fps_idx, dim=0).to(points.device)\n return fps_idx\n\n", "import copy\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom scipy.optimize import linear_sum_assignment\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet3d.ops import build_sa_module, spconv as spconv\nfrom mmdet3d.ops.pointnet_modules.point_fp_module import PointFPModule\nfrom mmdet3d.ops.roiaware_pool3d import RoIAwarePool3d\nfrom mmdet3d.ops import QueryAndGroup\nfrom mmdet3d.core.bbox import bbox_overlaps_3d, DepthInstance3DBoxes\n\n\ndef sigmoid_focal_loss(\n inputs: torch.Tensor,\n targets: torch.Tensor,\n alpha: float = -1,\n gamma: float = 2,\n reduction: str = \"none\",\n) -> torch.Tensor:\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n reduction: 'none' | 'mean' | 'sum'\n 'none': No reduction will be applied to the output.\n 'mean': The output will be averaged.\n 'sum': The output will be summed.\n Returns:\n Loss tensor with the reduction option applied.\n \"\"\"\n p = torch.sigmoid(inputs)\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = p * targets + (1 - p) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if reduction == \"mean\":\n loss = loss.mean()\n elif reduction == \"sum\":\n loss = loss.sum()\n\n return loss\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef pool_roi(points_feat, points_xyz, rois, roi_pooler, rcnn_sp_conv=None):\n roi_feats = []\n for batch_i in range(rois.shape[0]):\n roi_feat = roi_pooler( # [roi-num, out_x, out_y, out_z, C]\n rois[batch_i], points_xyz[batch_i],\n points_feat[batch_i].transpose(0, 1).contiguous())\n roi_feats.append(roi_feat)\n roi_feats = torch.cat(roi_feats, dim=0) # [batch * roi-num, out_x, out_y, out_z, C]\n\n if rcnn_sp_conv is None:\n proposal_feats = roi_feats.permute(0, 4, 1, 2, 3)\n else:\n extend_batch_size = roi_feats.shape[0]\n sparse_shape = roi_feats.shape[1:4]\n sparse_idx = roi_feats.sum(dim=-1).nonzero(as_tuple=False)\n roi_features = roi_feats[sparse_idx[:, 0], sparse_idx[:, 1],\n sparse_idx[:, 2], sparse_idx[:, 3]]\n coords = sparse_idx.int()\n roi_features = spconv.SparseConvTensor(roi_features, coords, sparse_shape,\n extend_batch_size)\n proposal_feats = rcnn_sp_conv(roi_features).dense()\n return proposal_feats # (batch * roi-num, C, out_x, out_y, out_z)\n\n\ndef roi_head_match(pred_logits, pred_centers, pred_size_res_norm, targets,\n class_weight, bbox_weight, center_weight,\n use_focal=False, use_iou=False, iou_weight=1.0, mean_size=None):\n with torch.no_grad():\n if use_focal:\n out_prob = pred_logits.flatten(0, 1).sigmoid()\n else:\n out_prob = pred_logits.flatten(0, 1).softmax(-1)\n out_center = pred_centers.flatten(0, 1) # [batch_size * num_queries, 3]\n out_size_res_norm = pred_size_res_norm.flatten(0, 1)\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_cts = torch.cat([v[\"centers\"] for v in targets])\n tgt_sizes_res_norm = torch.cat([v[\"sizes_res_norm\"] for v in targets])\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n if use_focal:\n # Compute the classification cost.\n alpha = 0.25\n gamma = 2.0\n neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n else:\n cost_class = -out_prob[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox_xyz = torch.cdist(out_center, tgt_cts, p=1)\n # both out_size_res and tgt_sizes are normed!\n cost_bbox_size = torch.cdist(out_size_res_norm, tgt_sizes_res_norm, p=1)\n cost_bbox = cost_bbox_xyz * center_weight + cost_bbox_size\n\n if use_iou:\n out_dir_res = torch.zeros([out_center.shape[0], 1]).to(out_center.device)\n out_size = out_size_res_norm * mean_size + mean_size\n pred_bbox = torch.cat([out_center, torch.clamp(out_size, 0), out_dir_res], dim=-1)\n tgt_sizes = tgt_sizes_res_norm * mean_size + mean_size\n tgt_dir_res = torch.zeros([tgt_cts.shape[0], 1]).to(out_center.device)\n target_bbox = torch.cat([tgt_cts, tgt_sizes, tgt_dir_res], dim=-1)\n iou_3d = bbox_overlaps_3d(pred_bbox, target_bbox, coordinate='lidar')\n cost_bbox = cost_bbox + iou_3d * iou_weight\n\n # Final cost matrix\n C = bbox_weight * cost_bbox + class_weight * cost_class\n\n bs, num_queries = pred_logits.shape[:2]\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"labels\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices]\n\n\ndef roi_head_matchv2(pred_logits, pred_centers, pred_size_res_norm, pred_angles, targets,\n class_weight, bbox_weight, center_weight, use_focal=False,\n use_iou=False, iou_weight=1.0, mean_size=None, with_roi=False,\n feat_points=None, point_weight=1.):\n with torch.no_grad():\n if use_focal:\n out_prob = pred_logits.flatten(0, 1).sigmoid()\n else:\n out_prob = pred_logits.flatten(0, 1).softmax(-1)\n out_center = pred_centers.flatten(0, 1) # [batch_size * num_queries, 3]\n out_size_res_norm = pred_size_res_norm.flatten(0, 1)\n out_angle = pred_angles.flatten(0, 1)\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_cts = torch.cat([v[\"centers\"] for v in targets])\n tgt_sizes_res_norm = torch.cat([v[\"sizes_res_norm\"] for v in targets])\n tgt_angles = torch.cat([v[\"angles\"] for v in targets])\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n # Compute the classification cost.\n if use_focal:\n alpha = 0.25\n gamma = 2.0\n neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n else:\n cost_class = -out_prob[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox_xyz = torch.cdist(out_center, tgt_cts, p=1)\n # both out_size_res and tgt_sizes are normed!\n cost_bbox_size = torch.cdist(out_size_res_norm, tgt_sizes_res_norm, p=1)\n cost_bbox = cost_bbox_xyz * center_weight + cost_bbox_size\n\n if use_iou:\n out_size = out_size_res_norm * mean_size + mean_size\n if not with_roi:\n out_angle = torch.zeros([out_center.shape[0], 1]).to(out_center.device)\n pred_bbox = torch.cat([out_center, torch.clamp(out_size, 0), out_angle], dim=-1)\n tgt_sizes = tgt_sizes_res_norm * mean_size + mean_size\n target_bbox = torch.cat([tgt_cts, tgt_sizes, tgt_angles.unsqueeze(-1)], dim=-1)\n iou_3d = bbox_overlaps_3d(pred_bbox, target_bbox, coordinate='lidar')\n cost_iou = -iou_3d\n cost_bbox = cost_bbox + cost_iou * iou_weight\n\n bs, num_queries = pred_logits.shape[:2]\n sizes = [len(v[\"labels\"]) for v in targets]\n if feat_points is not None:\n assert use_iou\n pred_bbox = pred_bbox.view(bs, -1, 7)\n target_bbox = target_bbox.split(sizes, 0)\n cost_points = torch.zeros_like(cost_bbox)\n px, py = 0, 0\n for i in range(bs):\n pred_bbox3d = DepthInstance3DBoxes(pred_bbox[i], origin=(0.5, 0.5, 0.5))\n target_bbox3d = DepthInstance3DBoxes(target_bbox[i], origin=(0.5, 0.5, 0.5))\n p = feat_points[i]\n cost_point = -torch.mm(pred_bbox3d.points_in_boxes(p).transpose(0, 1).float(),\n target_bbox3d.points_in_boxes(p).float())\n dx, dy = cost_point.shape\n cost_points[px:px + dx, py:py + dy] = cost_point\n px += dx\n py += dy\n cost_bbox += cost_points * point_weight\n\n # Final cost matrix\n C = bbox_weight * cost_bbox + class_weight * cost_class\n C = C.view(bs, num_queries, -1).cpu()\n\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices]\n\n\ndef get_src_permutation_idx(indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n\nclass DynamicConv(nn.Module):\n\n def __init__(self,\n hidden_dim=256,\n dim_dynamic=64,\n num_dynamic=2,\n pooler_resolution=7):\n super().__init__()\n\n self.hidden_dim = hidden_dim\n self.dim_dynamic = dim_dynamic\n self.num_dynamic = num_dynamic\n self.num_params = self.hidden_dim * self.dim_dynamic\n self.dynamic_layer = nn.Linear(self.hidden_dim, self.num_dynamic * self.num_params)\n\n self.norm1 = nn.LayerNorm(self.dim_dynamic)\n self.norm2 = nn.LayerNorm(self.hidden_dim)\n\n self.activation = nn.ReLU(inplace=True)\n\n if isinstance(pooler_resolution, (list, tuple)):\n pooler_grid_num = sum(p ** 3 for p in pooler_resolution)\n else:\n pooler_grid_num = pooler_resolution ** 3\n\n num_output = self.hidden_dim * pooler_grid_num\n self.out_layer = nn.Linear(num_output, self.hidden_dim)\n self.norm3 = nn.LayerNorm(self.hidden_dim)\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, self.d_model)\n roi_features: (7*7*7, N * nr_boxes, self.d_model)\n '''\n features = roi_features.permute(1, 0, 2) # (N*roi-num, 7*7*7, C)\n # (N*roi-num, 1, 2*hidden-dim*dim-dynamic)\n parameters = self.dynamic_layer(pro_features).permute(1, 0, 2)\n\n param1 = parameters[:, :, :self.num_params].view(-1, self.hidden_dim, self.dim_dynamic)\n param2 = parameters[:, :, self.num_params:].view(-1, self.dim_dynamic, self.hidden_dim)\n\n features = torch.bmm(features, param1)\n features = self.norm1(features)\n features = self.activation(features)\n\n features = torch.bmm(features, param2)\n features = self.norm2(features)\n features = self.activation(features)\n\n features = features.flatten(1)\n features = self.out_layer(features)\n features = self.norm3(features)\n features = self.activation(features)\n\n return features, None\n\n\nclass DynamicConvOneSimple(nn.Module):\n\n def __init__(self,\n hidden_dim=256,\n pooler_resolution=7,\n pooler_grid_num=None,\n proposal_feat_dim=None,\n use_bmm=False,\n remove_inter=False):\n super().__init__()\n\n self.hidden_dim = hidden_dim\n self.proposal_feat_dim = proposal_feat_dim\n self.use_bmm = use_bmm\n self.remove_inter = remove_inter\n\n if self.proposal_feat_dim is None:\n self.proposal_feat_dim = self.hidden_dim\n\n if not self.remove_inter:\n dynamic_out = self.hidden_dim * self.hidden_dim if self.use_bmm else self.hidden_dim\n self.dynamic_layer = nn.Linear(self.proposal_feat_dim, dynamic_out)\n\n self.norm1 = nn.LayerNorm(self.hidden_dim)\n self.activation = nn.ReLU(inplace=True)\n\n if pooler_grid_num is None:\n if isinstance(pooler_resolution, (list, tuple)):\n pooler_grid_num = sum(p ** 3 for p in pooler_resolution)\n else:\n pooler_grid_num = pooler_resolution ** 3\n\n num_output = self.hidden_dim * pooler_grid_num\n self.out_layer = nn.Linear(num_output, self.proposal_feat_dim)\n self.norm2 = nn.LayerNorm(self.proposal_feat_dim)\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, proposal-feat-dim)\n roi_features: (7*7*7, N * nr_boxes, C)\n '''\n features = roi_features.permute(1, 0, 2) # (N*roi-num, 7*7*7, C)\n if not self.remove_inter:\n # (N*roi-num, 1, 2*hidden-dim*dim-dynamic)\n pro_features = self.dynamic_layer(pro_features)\n if self.use_bmm:\n pro_features = pro_features.view(-1, self.hidden_dim, self.hidden_dim)\n features = torch.bmm(features, pro_features)\n else:\n pro_features = pro_features.view(-1, 1, self.hidden_dim)\n features = features * pro_features\n\n mid_features = features\n features = self.norm1(features)\n features = self.activation(features) # (N*roi-num, 7*7*7, C)\n\n features = features.flatten(1)\n features = self.out_layer(features) # (N*roi-num, proposal-feat-dim)\n\n features = self.norm2(features)\n features = self.activation(features)\n\n return features, mid_features\n\n\nclass DynamicMultiConvOneSimple(nn.Module):\n\n def __init__(self,\n hidden_dim=256,\n dim_dynamic=64,\n num_dynamic=2,\n pooler_resolution=7,\n roi_out=[3, 5],\n proposal_feat_dim=None,\n use_interact=True,\n use_bmm=False,\n share_norm=False,\n share_with_act=False):\n super().__init__()\n\n self.hidden_dim = hidden_dim\n self.roi_out = roi_out\n self.proposal_feat_dim = proposal_feat_dim\n self.use_interact = use_interact\n self.use_bmm = use_bmm\n self.share_norm = share_norm\n self.share_with_act = share_with_act\n\n if self.proposal_feat_dim is None:\n self.proposal_feat_dim = self.hidden_dim\n\n if self.use_interact:\n dynamic_out = self.hidden_dim * self.hidden_dim if self.use_bmm else self.hidden_dim\n self.dynamic_layer = nn.Linear(self.proposal_feat_dim, dynamic_out)\n\n self.activation = nn.ReLU(inplace=True)\n\n self.norm1 = nn.ModuleList()\n self.norm2 = nn.ModuleList()\n self.out_layer = nn.ModuleList()\n for roi_size in self.roi_out:\n self.norm1.append(nn.LayerNorm(self.hidden_dim))\n num_output = self.hidden_dim * roi_size ** 3\n self.out_layer.append(nn.Linear(num_output, self.proposal_feat_dim))\n self.norm2.append(nn.LayerNorm(self.proposal_feat_dim))\n\n if self.share_norm:\n self.norm2 = nn.LayerNorm(self.proposal_feat_dim)\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, proposal-feat-dim)\n roi_features: (5*5*5 + 7*7*7, N * nr_boxes, C)\n '''\n roi_feats = []\n roi_grid_num_last = 0\n roi_grid_num = 0\n for roi_size in self.roi_out:\n roi_grid_num += roi_size ** 3\n roi_feats.append(roi_features[roi_grid_num_last:roi_grid_num])\n roi_grid_num_last = roi_grid_num\n\n # (N*roi-num, 1, 2*hidden-dim*dim-dynamic)\n if self.use_interact:\n pro_features = self.dynamic_layer(pro_features)\n feats = []\n for i, roi_feat in enumerate(roi_feats):\n feat = roi_feat.permute(1, 0, 2) # (N*roi-num, 5*5*5, C)\n if self.use_interact:\n if self.use_bmm:\n pro_features = pro_features.view(-1, self.hidden_dim, self.hidden_dim)\n m_feat = torch.bmm(feat, pro_features)\n else:\n pro_features = pro_features.view(-1, 1, self.hidden_dim)\n m_feat = feat * pro_features\n else:\n m_feat = feat\n\n features = self.norm1[i](m_feat)\n features = self.activation(features) # (N*roi-num, 7*7*7, C)\n\n features = features.flatten(1)\n features = self.out_layer[i](features) # (N*roi-num, proposal-feat-dim)\n\n if not self.share_norm:\n features = self.norm2[i](features)\n features = self.activation(features)\n feats.append(features)\n\n features = sum(feats)\n if self.share_norm:\n features = self.norm2(features)\n if self.share_with_act:\n features = self.activation(features)\n\n return features, None\n\n\nclass DynamicConvOne(nn.Module):\n\n def __init__(self,\n hidden_dim=256,\n pooler_resolution=7,\n simple_dy=False,\n dy_add=False,\n simple_out=False,\n no_norm1=False,\n no_norm2=False,\n cls_mult=False,\n cls_add=False,\n cls_multd5=False,\n use_dy_layer=True,\n dy_use_norm=False,\n dy_use_norm_act_lin=False,\n proposal_feat_dim=None):\n super().__init__()\n\n self.hidden_dim = hidden_dim\n self.simple_dy = simple_dy\n self.dy_add = dy_add\n if self.dy_add:\n assert self.simple_dy\n self.simple_out = simple_out\n self.no_norm1 = no_norm1\n self.no_norm2 = no_norm2\n self.cls_mult = cls_mult\n self.cls_add = cls_add\n self.cls_multd5 = cls_multd5\n self.use_dy_layer = use_dy_layer\n self.dy_use_norm = dy_use_norm\n self.dy_use_norm_act_lin = dy_use_norm_act_lin\n self.proposal_feat_dim = proposal_feat_dim\n\n if self.proposal_feat_dim:\n self.reduction = nn.Linear(self.proposal_feat_dim, self.hidden_dim)\n else:\n self.proposal_feat_dim = self.hidden_dim\n\n if not self.use_dy_layer or self.dy_use_norm or self.dy_use_norm_act_lin:\n assert self.simple_dy\n\n if not self.simple_dy:\n self.dynamic_layer = nn.Linear(self.hidden_dim, self.hidden_dim * self.hidden_dim)\n elif self.use_dy_layer:\n if self.dy_use_norm:\n self.dynamic_layer = nn.Sequential(\n nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.LayerNorm(self.hidden_dim))\n elif self.dy_use_norm_act_lin:\n self.dynamic_layer = nn.Sequential(\n nn.Linear(self.hidden_dim, self.hidden_dim),\n nn.LayerNorm(self.hidden_dim),\n nn.ReLU(inplace=True),\n nn.Linear(self.hidden_dim, self.hidden_dim))\n else:\n self.dynamic_layer = nn.Linear(self.hidden_dim, self.hidden_dim)\n\n if not self.no_norm1:\n self.norm1 = nn.LayerNorm(self.hidden_dim)\n self.activation = nn.ReLU(inplace=True)\n\n if isinstance(pooler_resolution, (list, tuple)):\n pooler_grid_num = sum(p ** 3 for p in pooler_resolution)\n else:\n pooler_grid_num = pooler_resolution ** 3\n\n if self.simple_out:\n num_output = pooler_grid_num\n self.out_layer = nn.Linear(num_output, 1)\n else:\n num_output = self.hidden_dim * pooler_grid_num\n self.out_layer = nn.Linear(num_output, self.hidden_dim)\n if not self.no_norm2:\n self.norm2 = nn.LayerNorm(self.hidden_dim)\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, self.d_model)\n roi_features: (7*7*7, N * nr_boxes, self.d_model)\n '''\n features = roi_features.permute(1, 0, 2) # (N*roi-num, 7*7*7, C)\n # (N*roi-num, 1, 2*hidden-dim*dim-dynamic)\n if self.simple_dy:\n if self.use_dy_layer:\n pro_features = self.dynamic_layer(pro_features).view(-1, 1, self.hidden_dim)\n else:\n pro_features = pro_features.view(-1, 1, self.hidden_dim)\n if self.dy_add:\n features = features + pro_features\n else:\n features = features * pro_features\n if self.cls_mult or self.cls_add or self.cls_multd5:\n seeds_cls_feat = kwargs.get('seeds_cls_feat', None)\n assert seeds_cls_feat is not None\n seeds_cls_feat = seeds_cls_feat.transpose(0, 1) # (N*roi-num, 7*7*7, 1)\n if self.cls_mult:\n features = features * seeds_cls_feat\n elif self.cls_add:\n features = features + seeds_cls_feat\n elif self.cls_multd5:\n features = features * (seeds_cls_feat + 0.5)\n\n else:\n parameters = self.dynamic_layer(pro_features).permute(1, 0, 2).view(\n -1, self.hidden_dim, self.hidden_dim) # (N*nr_boxes, C, C)\n features = torch.bmm(features, parameters)\n mid_features = features\n if not self.no_norm1:\n features = self.norm1(features)\n features = self.activation(features) # (N*roi-num, 7*7*7, C)\n\n if self.simple_out:\n features = features.transpose(1, 2)\n features = self.out_layer(features).squeeze(-1)\n else:\n features = features.flatten(1)\n features = self.out_layer(features)\n\n if not self.no_norm2:\n features = self.norm2(features)\n features = self.activation(features)\n\n return features, mid_features\n\n\nclass DynamicConvOneAware(nn.Module):\n\n def __init__(self,\n hidden_dim=256,\n pooler_resolution=5,\n proposal_feat_dim=None):\n super().__init__()\n\n self.hidden_dim = hidden_dim\n self.pooler_resolution = pooler_resolution\n self.proposal_feat_dim = proposal_feat_dim\n\n if self.proposal_feat_dim is None:\n self.proposal_feat_dim = self.hidden_dim\n\n if isinstance(pooler_resolution, (list, tuple)):\n pooler_grid_num = sum(p ** 3 for p in pooler_resolution)\n else:\n pooler_grid_num = pooler_resolution ** 3\n self.pooler_grid_num = pooler_grid_num\n\n num_output = self.hidden_dim * pooler_grid_num\n self.dynamic_layer = nn.Linear(self.proposal_feat_dim, num_output)\n\n self.norm1 = nn.LayerNorm(self.hidden_dim)\n self.activation = nn.ReLU(inplace=True)\n\n self.out_layer = nn.Linear(num_output, self.proposal_feat_dim)\n self.norm2 = nn.LayerNorm(self.proposal_feat_dim)\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, proposal-feat-dim)\n roi_features: (5*5*5, N * nr_boxes, C)\n '''\n pro_features = self.dynamic_layer(pro_features)\n pro_features = pro_features.view(-1, self.hidden_dim, # (5*5*5, N * nr_boxes, C)\n self.pooler_grid_num).permute(2, 0, 1)\n\n features = roi_features * pro_features\n mid_features = features\n\n features = self.norm1(features)\n features = self.activation(features) # (5*5*5, N*nr_boxes, C)\n\n features = self.out_layer(features.transpose(0, 1).flatten(1))\n\n features = self.norm2(features) # (N*roi-num, proposal-feat-dim)\n features = self.activation(features)\n\n return features, mid_features\n\n\nclass DynamicConvMHA(nn.Module):\n\n def __init__(self,\n hidden_dim=256,\n nhead=4,\n dropout=0.,\n use_normact=True,\n dropoutnorm=False,\n plus_pro_feat=False,\n use_roi_att=False,\n roi_dropoutnorm=False,\n use_ldn=False):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.use_normact = use_normact\n self.dropoutnorm = dropoutnorm\n self.plus_pro_feat = plus_pro_feat\n self.use_roi_att = use_roi_att\n self.roi_dropoutnorm = roi_dropoutnorm\n self.use_ldn = use_ldn\n\n if self.use_roi_att:\n self.roi_att = nn.MultiheadAttention(self.hidden_dim, nhead, dropout=dropout)\n if self.roi_dropoutnorm:\n self.roi_dropout = nn.Dropout(dropout)\n self.roi_norm = nn.LayerNorm(self.hidden_dim)\n\n self.dynamic_layer = nn.MultiheadAttention(self.hidden_dim, nhead, dropout=dropout)\n if self.use_normact:\n self.norm = nn.LayerNorm(self.hidden_dim)\n self.activation = nn.ReLU(inplace=True)\n elif self.dropoutnorm:\n self.dropout = nn.Dropout(dropout)\n self.norm = nn.LayerNorm(self.hidden_dim)\n\n if self.use_ldn:\n self.linear1 = nn.Linear(self.hidden_dim, self.hidden_dim)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(self.hidden_dim)\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, self.d_model)\n roi_features: (7*7*7, N * nr_boxes, self.d_model)\n '''\n if self.use_roi_att:\n roi_features2 = self.roi_att(roi_features, roi_features, roi_features)[0]\n if self.roi_dropoutnorm:\n roi_features2 = roi_features + self.roi_dropout(roi_features2)\n roi_features2 = self.roi_norm(roi_features2)\n roi_features = roi_features2\n features = self.dynamic_layer(pro_features, roi_features, roi_features)[0]\n if self.use_normact:\n features = self.norm(features)\n features = self.activation(features)\n elif self.dropoutnorm:\n features = self.dropout(features)\n features = self.norm(features)\n if self.plus_pro_feat:\n features = features + pro_features\n\n if self.use_ldn:\n features2 = self.linear1(features)\n features2 = features + self.dropout1(features2)\n features = self.norm1(features2)\n\n return features.squeeze(0), None\n\n\nclass DynamicConvCLSMHA(nn.Module):\n\n def __init__(self,\n hidden_dim=256,\n nhead=4,\n dropout=0.,\n dropoutnorm=False):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.dropoutnorm = dropoutnorm\n\n self.dynamic_layer = nn.MultiheadAttention(self.hidden_dim, nhead, dropout=dropout)\n if self.dropoutnorm:\n self.dropout = nn.Dropout(dropout)\n self.norm = nn.LayerNorm(self.hidden_dim)\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, self.d_model)\n roi_features: (7*7*7, N * nr_boxes, self.d_model)\n '''\n ex_features = torch.cat([pro_features, roi_features], dim=0)\n features = self.dynamic_layer(ex_features, ex_features, ex_features)[0][[0]]\n if self.dropoutnorm:\n features = self.dropout(features)\n features = self.norm(features)\n\n return features.squeeze(0), None\n\n\nclass MultiGroupMLPMerge(nn.Module):\n\n def __init__(self,\n proposal_dim=256,\n int_mlp_group=1,\n roi_out=[3, 5]):\n super().__init__()\n self.proposal_dim = proposal_dim\n self.int_mlp_group = int_mlp_group\n self.roi_out = roi_out\n\n assert self.proposal_dim % self.int_mlp_group == 0\n dim_per_group = self.proposal_dim // self.int_mlp_group\n self.roi_feat_mlp = nn.ModuleList()\n for roi_size in self.roi_out:\n inc = roi_size ** 3 * dim_per_group\n outc = dim_per_group\n self.roi_feat_mlp.append(nn.Linear(inc, outc))\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, self.d_model)\n roi_features: (5*5*5 + 7*7*7, N * nr_boxes, self.d_model)\n '''\n roi_feats = []\n roi_grid_num_last = 0\n roi_grid_num = 0\n for roi_size in self.roi_out:\n roi_grid_num += roi_size ** 3\n roi_feats.append(roi_features[roi_grid_num_last:roi_grid_num])\n roi_grid_num_last = roi_grid_num\n\n roi_feats_merge = []\n n = roi_features.shape[1]\n for roi_feat, mlp in zip(roi_feats, self.roi_feat_mlp):\n rf = roi_feat.permute(1, 2, 0).contiguous().view(n, self.int_mlp_group, -1)\n roi_feats_merge.append(mlp(rf).view(1, n, -1))\n\n pro_features = sum(roi_feats_merge) + pro_features\n return pro_features, None\n\n\nclass MultiConv3dMerge(nn.Module):\n\n def __init__(self,\n proposal_dim=256,\n roi_out=[3, 5],\n use_ln=False):\n super().__init__()\n # assert list(roi_out) == [3] or list(roi_out) == [3, 5] or list(roi_out) == [1, 3, 5]\n self.proposal_dim = proposal_dim\n self.roi_out = roi_out\n self.use_ln = use_ln\n\n self.roi_feat_conv = nn.ModuleList()\n self.roi_feat_norm = nn.ModuleList()\n for roi_size in self.roi_out:\n if roi_size == 1:\n self.roi_feat_conv.append(nn.Conv3d(proposal_dim, proposal_dim, 1, 1, 0))\n else:\n self.roi_feat_conv.append(nn.Conv3d(proposal_dim, proposal_dim, 3, 1, 0))\n self.roi_feat_norm.append(nn.LayerNorm(proposal_dim))\n\n def forward(self, pro_features, roi_features, **kwargs):\n '''\n pro_features: (1, N * nr_boxes, self.d_model)\n roi_features: (5*5*5 + 7*7*7, N * nr_boxes, self.d_model)\n '''\n C = roi_features.shape[-1]\n roi_feats = []\n roi_grid_num_last = 0\n roi_grid_num = 0\n for roi_size in self.roi_out:\n roi_grid_num += roi_size ** 3\n roi_feats.append(\n roi_features[roi_grid_num_last:roi_grid_num].permute(1, 2, 0).contiguous().view(\n -1, C, roi_size, roi_size, roi_size)) # (N*nr_boxes, C, 5, 5, 5)\n roi_grid_num_last = roi_grid_num\n\n pre_feat = None\n for i, (roi_feat, conv, roi_size) in enumerate(zip(\n roi_feats[::-1], self.roi_feat_conv[::-1], self.roi_out[::-1])):\n # 5x5x5, 3x3x3 or 5x5x5, 3x3x3, 1x1x1\n if pre_feat is not None:\n feat = roi_feat + pre_feat\n else:\n feat = roi_feat\n feat = conv(feat)\n if self.use_ln:\n feat = feat.permute(0, 2, 3, 4, 1)\n feat = self.roi_feat_norm[i](feat).permute(0, 4, 1, 2, 3)\n pre_feat = feat\n\n feat = feat.flatten(2).permute(2, 0, 1)\n pro_features = feat + pro_features\n return pro_features, None\n\n\nclass ROIConv(nn.Module):\n\n def __init__(self,\n in_channels=256,\n conv_channels=(128, 128),\n connect_from=None,\n conv_cfg=dict(type='Conv1d'),\n norm_cfg=dict(type='BN1d'),\n act_cfg=dict(type='ReLU')):\n super(ROIConv, self).__init__()\n if connect_from is not None:\n ex_conv_channels = [in_channels] + list(conv_channels)\n assert ex_conv_channels[connect_from] == ex_conv_channels[-1]\n\n self.in_channels = in_channels\n self.conv_channels = conv_channels\n self.connect_from = connect_from\n\n self.roi_conv_list = nn.ModuleList()\n for k in range(len(self.conv_channels)):\n self.roi_conv_list.append(\n ConvModulev2(\n in_channels,\n self.conv_channels[k],\n 1,\n padding=0,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n bias=True,\n inplace=True))\n in_channels = self.conv_channels[k]\n\n def forward(self, x):\n feats = [x]\n for conv in self.roi_conv_list:\n in_feat = feats[-1]\n feats.append(conv(in_feat))\n\n if self.connect_from is None:\n return feats[-1]\n return feats[-1] + feats[self.connect_from]\n\n\nclass RoIAggPool3d(nn.Module):\n\n def __init__(self,\n use_agg=True,\n use_pre_conv=True,\n radii=0.3,\n sample_num=16,\n mlp_channels=[256, 256],\n normalize_xyz=True,\n norm_cfg=dict(type='BN2d'),\n bias='auto',\n pre_roi_conv_cfg=dict(\n in_channels=256,\n conv_channels=(128, 128),\n connect_from=None,\n conv_cfg=dict(type='Conv1d'),\n norm_cfg=dict(type='BN1d'),\n act_cfg=dict(type='ReLU')),\n roi_pooler='RoIPool3d',\n roi_pool_cfg=dict(\n out_size=5,\n max_pts_per_voxel=64,\n mode='max')):\n super(RoIAggPool3d, self).__init__()\n self.use_agg = use_agg\n self.use_pre_conv = use_pre_conv\n\n if self.use_agg:\n self.grouper = QueryAndGroup(\n radii,\n sample_num,\n min_radius=0,\n use_xyz=True,\n normalize_xyz=normalize_xyz)\n\n mlp_spec = copy.deepcopy(mlp_channels)\n mlp_spec[0] += 3\n\n self.mlp = nn.Sequential()\n for i in range(len(mlp_spec) - 1):\n self.mlp.add_module(\n f'layer{i}',\n ConvModulev2(\n mlp_spec[i],\n mlp_spec[i + 1],\n kernel_size=(1, 1),\n stride=(1, 1),\n conv_cfg=dict(type='Conv2d'),\n norm_cfg=norm_cfg,\n bias=bias))\n if self.use_pre_conv:\n self.pre_roi_conv = ROIConv(**pre_roi_conv_cfg)\n self.roi_pooler = eval(roi_pooler)(**roi_pool_cfg)\n\n def forward(self, points_xyz, features, rois):\n if self.use_agg:\n # (B, C, num_point, nsample)\n new_features = self.grouper(points_xyz, points_xyz, features)\n # (B, mlp[-1], num_point, nsample)\n new_features = self.mlp(new_features)\n # (B, mlp[-1], num_point, 1)\n new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)])\n features = new_features.squeeze(-1) # (B, C, num_point)\n\n if self.use_pre_conv:\n features = self.pre_roi_conv(features)\n # [batch * roi-num, C, out_x*out_y*out_z]\n roi_feats = self.roi_pooler(points_xyz, features, rois)\n return features, roi_feats\n\n\nclass RoIPool3d(nn.Module):\n\n def __init__(self,\n out_size=5,\n max_pts_per_voxel=64,\n mode='max'):\n super(RoIPool3d, self).__init__()\n self.roi_pooler = RoIAwarePool3d(out_size, max_pts_per_voxel, mode)\n\n def forward(self, points_xyz, features, rois):\n roi_feats = []\n for batch_i in range(rois.shape[0]):\n roi_feat = self.roi_pooler( # [roi-num, out_x, out_y, out_z, C]\n rois[batch_i], points_xyz[batch_i],\n features[batch_i].transpose(0, 1).contiguous())\n roi_feats.append(roi_feat)\n roi_feats = torch.cat(roi_feats, dim=0) # [batch * roi-num, out_x, out_y, out_z, C]\n # [batch * roi-num, C, out_x*out_y*out_z]\n roi_feats = roi_feats.permute(0, 4, 1, 2, 3).flatten(2)\n return roi_feats\n\n\nclass MultiRoIPool3d(nn.Module):\n\n def __init__(self,\n out_size=(3, 5),\n max_pts_per_voxel=(96, 96),\n mode='max'):\n super(MultiRoIPool3d, self).__init__()\n assert len(out_size) == len(max_pts_per_voxel)\n self.roi_poolers = nn.ModuleList()\n for o, pts in zip(out_size, max_pts_per_voxel):\n self.roi_poolers.append(RoIAwarePool3d(o, pts, mode))\n\n def forward(self, points_xyz, features, rois):\n roi_feats = []\n for batch_i in range(rois.shape[0]):\n roi_feat = []\n for pooler in self.roi_poolers:\n roi_f = pooler( # [roi-num, out_x, out_y, out_z, C]\n rois[batch_i], points_xyz[batch_i],\n features[batch_i].transpose(0, 1).contiguous())\n roi_f = roi_f.permute(0, 4, 1, 2, 3).flatten(2) # [roi-num, C, out_x*out_y*out_z]\n roi_feat.append(roi_f)\n roi_feat = torch.cat(roi_feat, dim=2) # [roi-num, C, M]\n roi_feats.append(roi_feat)\n roi_feats = torch.cat(roi_feats, dim=0) # [batch * roi-num, C, M]\n return roi_feats\n\n\nclass RoIFPPool3d(nn.Module):\n\n def __init__(self,\n out_size=5,\n fp_module_cfg=dict(\n mlp_channels=(128, 128),\n norm_cfg=dict(type='BN2d')),\n use_ray=False):\n super(RoIFPPool3d, self).__init__()\n self.out_size = out_size\n # assert self.out_size % 2 == 1\n self.fp_module = PointFPModule(**fp_module_cfg)\n if use_ray:\n self.base_grid = get_ray_grid(self.out_size)\n else:\n self.base_grid = get_grid(self.out_size)\n\n def forward(self, pts, pts_feature, rois):\n \"\"\"RoIAwarePool3d module forward.\n\n Args:\n pts (torch.Tensor): [B, npoints, 3]\n pts_feature (torch.Tensor): [B, C, npoints]\n rois (torch.Tensor): [B, N, 7],in LiDAR coordinate,\n (x, y, z) is the bottom center of rois\n \"\"\"\n B, roi_num = rois.shape[:2]\n C = pts_feature.shape[1]\n rois[..., 2] = rois[..., 2] + rois[..., 5] / 2.\n base_grid = self.base_grid.to(rois.device) # (out_x*out_y*out_z, 3)\n grid_rois = []\n flatten_rois = rois.view(-1, 7)\n for roi in flatten_rois:\n grid_roi = base_grid * roi[3:6] + roi[0:3] # (out_x*out_y*out_z, 3)\n grid_rois.append(grid_roi)\n grid_rois = torch.stack(grid_rois, dim=0) # (B*roi-num, out_x*out_y*out_z, 3)\n grid_rois = grid_rois.view(B, -1, 3)\n # (B, C, roi-num*out_x*out_y*out_z)\n rois_feats = self.fp_module(grid_rois, pts, None, pts_feature)\n rois_feats = rois_feats.transpose(1, 2).contiguous().view(\n B * roi_num, -1, C).transpose(1, 2)\n return rois_feats\n\n\nclass RoISAPool3d(nn.Module):\n\n def __init__(self,\n out_size=5,\n sa_module_cfg=dict(\n type='PointSAModule',\n num_point=None,\n radius=0.3,\n num_sample=16,\n mlp_channels=[128, 128, 128, 128],\n use_xyz=True,\n normalize_xyz=True),\n use_ray=False):\n super(RoISAPool3d, self).__init__()\n self.out_size = out_size\n self.use_ray = use_ray\n self.sa_module = build_sa_module(sa_module_cfg)\n if self.use_ray:\n self.base_grid = get_ray_grid(self.out_size)\n else:\n self.base_grid = get_grid(self.out_size)\n\n def forward(self, pts, pts_feature, rois):\n \"\"\"RoIAwarePool3d module forward.\n\n Args:\n pts (torch.Tensor): [B, npoints, 3]\n pts_feature (torch.Tensor): [B, C, npoints]\n rois (torch.Tensor): [B, N, 7],in LiDAR coordinate,\n (x, y, z) is the bottom center of rois\n \"\"\"\n B, roi_num = rois.shape[:2]\n C = pts_feature.shape[1]\n rois[..., 2] = rois[..., 2] + rois[..., 5] / 2.\n base_grid = self.base_grid.to(rois.device) # (out_x*out_y*out_z, 3)\n grid_rois = []\n flatten_rois = rois.view(-1, 7)\n for roi in flatten_rois:\n grid_roi = base_grid * roi[3:6] + roi[0:3] # (out_x*out_y*out_z, 3)\n grid_rois.append(grid_roi)\n grid_rois = torch.stack(grid_rois, dim=0) # (B*roi-num, out_x*out_y*out_z, 3)\n grid_rois = grid_rois.view(B, -1, 3) # (B, roi-num*out_x*out_y*out_z, 3)\n # (B, roi-num*out_x*out_y*out_z, C)\n rois_feats = self.sa_module(pts, features=pts_feature, target_xyz=grid_rois)[1]\n rois_feats = rois_feats.view(B * roi_num, -1, C).transpose(1, 2)\n return rois_feats\n\n\ndef get_grid(out_size):\n base_grid_x = torch.arange(0, 1, 1 / out_size) - (out_size - 1) / (2 * out_size)\n base_grid_y = torch.arange(0, 1, 1 / out_size) - (out_size - 1) / (2 * out_size)\n base_grid_z = torch.arange(0, 1, 1 / out_size) - (out_size - 1) / (2 * out_size)\n base_grid_x = base_grid_x.view(-1, 1, 1).repeat(1, out_size, out_size)\n base_grid_y = base_grid_y.view(1, -1, 1).repeat(out_size, 1, out_size)\n base_grid_z = base_grid_z.view(1, 1, -1).repeat(out_size, out_size, 1)\n base_grid = torch.stack([base_grid_x, base_grid_y, base_grid_z], dim=-1).view(-1, 3)\n return base_grid\n\n\ndef get_ray_grid(out_size):\n assert out_size == 13\n face_center = torch.tensor([[0.5, 0, 0], [-0.5, 0, 0], [0, 0.5, 0],\n [0, -0.5, 0], [0, 0, 0.5], [0, 0, -0.5]])\n base_grid = torch.cat([face_center, face_center / 2., torch.zeros(1, 3, dtype=torch.float)],\n dim=0)\n return base_grid\n\n\ndef get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n\n\nclass ConvModulev2(ConvModule):\n\n def forward(self, x, activate=True, norm=True):\n for layer in self.order:\n if layer == 'conv':\n if self.with_explicit_padding:\n x = self.padding_layer(x)\n x = self.conv(x)\n elif layer == 'norm' and norm and self.with_norm:\n if isinstance(self.norm, nn.LayerNorm):\n if x.dim() == 3:\n x = self.norm(x.transpose(1, 2)).transpose(1, 2)\n elif x.dim() == 4:\n x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n else:\n raise NotImplementedError\n else:\n x = self.norm(x)\n elif layer == 'act' and activate and self.with_activation:\n x = self.activate(x)\n return x\n\n\nclass TransPool3d(nn.Module):\n\n def __init__(self,\n use_agg=True,\n use_pre_conv=True,\n radii=0.3,\n sample_num=16,\n mlp_channels=[256, 256],\n normalize_xyz=True,\n norm_cfg=dict(type='BN2d'),\n bias='auto',\n pre_roi_conv_cfg=dict(\n in_channels=256,\n conv_channels=(128, 128),\n connect_from=None,\n conv_cfg=dict(type='Conv1d'),\n norm_cfg=dict(type='BN1d'),\n act_cfg=dict(type='ReLU')),\n roi_pool_cfg=dict(\n out_size=5,\n max_pts_per_voxel=64,\n mode='max')):\n super(RoIAggPool3d, self).__init__()\n self.use_agg = use_agg\n self.use_pre_conv = use_pre_conv\n\n if self.use_agg:\n self.grouper = QueryAndGroup(\n radii,\n sample_num,\n min_radius=0,\n use_xyz=True,\n normalize_xyz=normalize_xyz)\n\n mlp_spec = copy.deepcopy(mlp_channels)\n mlp_spec[0] += 3\n\n self.mlp = nn.Sequential()\n for i in range(len(mlp_spec) - 1):\n self.mlp.add_module(\n f'layer{i}',\n ConvModulev2(\n mlp_spec[i],\n mlp_spec[i + 1],\n kernel_size=(1, 1),\n stride=(1, 1),\n conv_cfg=dict(type='Conv2d'),\n norm_cfg=norm_cfg,\n bias=bias))\n if self.use_pre_conv:\n self.pre_roi_conv = ROIConv(**pre_roi_conv_cfg)\n self.roi_pooler = RoIPool3d(**roi_pool_cfg)\n\n def forward(self, points_xyz, features, rois):\n if self.use_agg:\n # (B, C, num_point, nsample)\n new_features = self.grouper(points_xyz, points_xyz, features)\n # (B, mlp[-1], num_point, nsample)\n new_features = self.mlp(new_features)\n # (B, mlp[-1], num_point, 1)\n new_features = F.max_pool2d(new_features, kernel_size=[1, new_features.size(3)])\n features = new_features.squeeze(-1) # (B, C, num_point)\n\n if self.use_pre_conv:\n features = self.pre_roi_conv(features)\n roi_feats = self.roi_pooler(points_xyz, features, rois)\n return features, roi_feats\n" ]
[ [ "torch.stack", "torch.nn.ModuleList", "torch.randperm", "torch.cat" ], [ "torch.cat", "torch.zeros", "torch.cdist", "torch.no_grad", "torch.full_like", "scipy.optimize.linear_sum_assignment", "torch.nn.Dropout", "torch.clamp", "torch.nn.MultiheadAttention", "torch.tensor", "torch.bmm", "torch.arange", "torch.nn.Sequential", "torch.sigmoid", "torch.nn.functional.binary_cross_entropy_with_logits", "torch.nn.ModuleList", "torch.distributed.is_initialized", "torch.zeros_like", "torch.nn.Linear", "torch.nn.Conv3d", "torch.distributed.is_available", "torch.stack", "torch.distributed.get_world_size", "torch.as_tensor", "torch.nn.LayerNorm", "torch.nn.ReLU" ] ]
mdebony/gammapy
[ "015206d2418b1d254f1c9d3ea819ab0c5ece99e9", "29541fbfd90b0895ccc04fd3b9814a6f95511e14" ]
[ "gammapy/datasets/io.py", "gammapy/irf/psf/gauss.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nfrom pathlib import Path\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom gammapy.data import GTI\nfrom gammapy.utils.scripts import make_path\nfrom gammapy.maps import RegionNDMap\nfrom gammapy.irf import EDispKernelMap, EDispKernel\nfrom .spectrum import SpectrumDatasetOnOff\n\n\nclass DatasetReader(abc.ABC):\n \"\"\"Dataset reader base class\"\"\"\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n @abc.abstractmethod\n def read(self):\n pass\n\n\nclass DatasetWriter(abc.ABC):\n \"\"\"Dataset writer base class\"\"\"\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n @abc.abstractmethod\n def write(self, dataset):\n pass\n\n\nclass OGIPDatasetWriter(DatasetWriter):\n \"\"\"Write OGIP files.\n\n If you want to use the written files with Sherpa you have to use the\n ``ogip-sherpa`` format. Then all files will be written in units of 'keV' and\n 'cm2'.\n\n The naming scheme is fixed as following:\n\n * PHA file is named filename.fits\n * BKG file is named filename_bkg.fits\n * ARF file is named filename_arf.fits\n * RMF file is named filename_rmf.fits\n\n Parameters\n ----------\n filename : `pathlib.Path` or str\n Filename.\n format : {\"ogip\", \"ogip-sherpa\"}\n Which format to use.\n overwrite : bool\n Overwrite existing files?\n \"\"\"\n tag = [\"ogip\", \"ogip-sherpa\"]\n\n def __init__(self, filename, format=\"ogip\", overwrite=False):\n filename = make_path(filename)\n filename.parent.mkdir(exist_ok=True, parents=True)\n\n self.filename = filename\n self.format = format\n self.overwrite = overwrite\n\n @staticmethod\n def get_filenames(filename):\n \"\"\"Get filenames\n\n Parameters\n ----------\n filename : `~pathlib.Path`\n Filename\n\n Returns\n -------\n filenames : dict\n Dict of filenames.\n \"\"\"\n suffix = \"\".join(filename.suffixes)\n name = filename.name.replace(suffix, \"\")\n name = f\"{name}{{}}{suffix}\"\n return {\n \"respfile\": name.format(\"_rmf\"),\n \"backfile\": name.format(\"_bkg\"),\n \"ancrfile\": name.format(\"_arf\")\n }\n\n def get_ogip_meta(self, dataset, is_bkg=False):\n \"\"\"Meta info for the OGIP data format\"\"\"\n try:\n livetime = dataset.exposure.meta[\"livetime\"]\n except KeyError:\n raise ValueError(\n \"Storing in ogip format require the livetime \"\n \"to be defined in the exposure meta data\"\n )\n\n hdu_class = \"BKG\" if is_bkg else \"TOTAL\"\n\n meta = {\n \"HDUCLAS2\": hdu_class,\n \"HDUCLAS3\": \"COUNT\",\n \"HDUCLAS4\": \"TYPE:1\",\n \"EXPOSURE\": livetime.to_value(\"s\"),\n \"OBS_ID\": dataset.name,\n }\n\n filenames = OGIPDatasetWriter.get_filenames(self.filename)\n meta[\"ANCRFILE\"] = filenames[\"ancrfile\"]\n\n if dataset.edisp:\n meta[\"BACKFILE\"] = filenames[\"backfile\"]\n\n if dataset.counts_off:\n meta[\"RESPFILE\"] = filenames[\"respfile\"]\n\n return meta\n\n def write(self, dataset):\n \"\"\"Write dataset to files\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n \"\"\"\n filenames = self.get_filenames(self.filename)\n\n self.write_pha(dataset, filename=self.filename)\n\n path = self.filename.parent\n self.write_arf(dataset, filename=path / filenames[\"ancrfile\"])\n\n if dataset.counts_off:\n self.write_bkg(dataset, filename=path / filenames[\"backfile\"])\n\n if dataset.edisp:\n self.write_rmf(dataset, filename=path / filenames[\"respfile\"])\n\n def write_rmf(self, dataset, filename):\n \"\"\"Write energy dispersion.\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n filename : str or `Path`\n Filename to use.\n \"\"\"\n kernel = dataset.edisp.get_edisp_kernel()\n kernel.write(\n filename=filename,\n overwrite=self.overwrite,\n format=self.format\n )\n\n def write_arf(self, dataset, filename):\n \"\"\"Write effective area\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n filename : str or `Path`\n Filename to use.\n\n \"\"\"\n aeff = dataset.exposure / dataset.exposure.meta[\"livetime\"]\n aeff.write(\n filename=filename,\n overwrite=self.overwrite,\n format=self.format.replace(\"ogip\", \"ogip-arf\"),\n )\n\n def to_counts_hdulist(self, dataset, is_bkg=False):\n \"\"\"Convert counts region map to hdulist\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n is_bkg : bool\n Whether to use counts off.\n \"\"\"\n counts = dataset.counts_off if is_bkg else dataset.counts\n acceptance = dataset.acceptance_off if is_bkg else dataset.acceptance\n\n hdulist = counts.to_hdulist()\n\n table = Table.read(hdulist[\"SPECTRUM\"])\n meta = self.get_ogip_meta(dataset, is_bkg=is_bkg)\n\n if dataset.mask_safe is not None:\n mask_array = dataset.mask_safe.data[:, 0, 0]\n else:\n mask_array = np.ones(acceptance.data.size)\n\n table[\"QUALITY\"] = np.logical_not(mask_array)\n del table.meta[\"QUALITY\"]\n\n table[\"BACKSCAL\"] = acceptance.data[:, 0, 0]\n del table.meta[\"BACKSCAL\"]\n\n # adapt meta data\n table.meta.update(meta)\n hdulist[\"SPECTRUM\"] = fits.BinTableHDU(table)\n return hdulist\n\n def write_pha(self, dataset, filename):\n \"\"\"Write counts file\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n filename : str or `Path`\n Filename to use.\n\n \"\"\"\n hdulist = self.to_counts_hdulist(dataset)\n\n if dataset.gti:\n hdu = fits.BinTableHDU(dataset.gti.table, name=\"GTI\")\n hdulist.append(hdu)\n\n hdulist.writeto(filename, overwrite=self.overwrite)\n\n def write_bkg(self, dataset, filename):\n \"\"\"Write off counts file\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n filename : str or `Path`\n Filename to use.\n \"\"\"\n hdulist = self.to_counts_hdulist(dataset, is_bkg=True)\n hdulist.writeto(filename, overwrite=self.overwrite)\n\n\nclass OGIPDatasetReader(DatasetReader):\n \"\"\"Read `~gammapy.datasets.SpectrumDatasetOnOff` from OGIP files.\n\n BKG file, ARF, and RMF must be set in the PHA header and be present in\n the same folder.\n\n The naming scheme is fixed to the following scheme:\n\n * PHA file is named ``pha_obs{name}.fits``\n * BKG file is named ``bkg_obs{name}.fits``\n * ARF file is named ``arf_obs{name}.fits``\n * RMF file is named ``rmf_obs{name}.fits``\n with ``{name}`` the dataset name.\n\n Parameters\n ----------\n filename : str or `~pathlib.Path`\n OGIP PHA file to read\n \"\"\"\n tag = \"ogip\"\n\n def __init__(self, filename):\n self.filename = make_path(filename)\n\n def get_valid_path(self, filename):\n \"\"\"Get absolute or relative path\n\n The relative path is with respect to the name of the reference file.\n\n Parameters\n ----------\n filename : str or `Path`\n Filename\n\n Returns\n -------\n filename : `Path`\n Valid path\n \"\"\"\n filename = make_path(filename)\n\n if not filename.exists():\n return self.filename.parent / filename\n else:\n return filename\n\n def get_filenames(self, pha_meta):\n \"\"\"Get filenames\n\n Parameters\n ----------\n pha_meta : dict\n Meta data from the PHA file\n\n Returns\n -------\n filenames : dict\n Dict with filenames of \"arffile\", \"rmffile\" (optional)\n and \"bkgfile\" (optional)\n \"\"\"\n filenames = {\n \"arffile\": self.get_valid_path(pha_meta[\"ANCRFILE\"])\n }\n\n if \"BACKFILE\" in pha_meta:\n filenames[\"bkgfile\"] = self.get_valid_path(pha_meta[\"BACKFILE\"])\n\n if \"RESPFILE\" in pha_meta:\n filenames[\"rmffile\"] = self.get_valid_path(pha_meta[\"RESPFILE\"])\n\n return filenames\n\n @staticmethod\n def read_pha(filename):\n \"\"\"Read PHA file\n\n Parameters\n ----------\n filename : str or `Path`\n PHA file name\n\n Returns\n -------\n data : dict\n Dict with counts, acceptance and mask_safe\n \"\"\"\n data = {}\n\n with fits.open(filename, memmap=False) as hdulist:\n data[\"counts\"] = RegionNDMap.from_hdulist(hdulist, format=\"ogip\")\n data[\"acceptance\"] = RegionNDMap.from_hdulist(\n hdulist, format=\"ogip\", ogip_column=\"BACKSCAL\"\n )\n\n if \"GTI\" in hdulist:\n data[\"gti\"] = GTI(Table.read(hdulist[\"GTI\"]))\n\n data[\"mask_safe\"] = RegionNDMap.from_hdulist(\n hdulist, format=\"ogip\", ogip_column=\"QUALITY\"\n )\n\n return data\n\n @staticmethod\n def read_bkg(filename):\n \"\"\"Read PHA background file\n\n Parameters\n ----------\n filename : str or `Path`\n PHA file name\n\n Returns\n -------\n data : dict\n Dict with counts_off and acceptance_off\n \"\"\"\n with fits.open(filename, memmap=False) as hdulist:\n counts_off = RegionNDMap.from_hdulist(hdulist, format=\"ogip\")\n acceptance_off = RegionNDMap.from_hdulist(\n hdulist, ogip_column=\"BACKSCAL\"\n )\n return {\"counts_off\": counts_off, \"acceptance_off\": acceptance_off}\n\n @staticmethod\n def read_rmf(filename, exposure):\n \"\"\"Read RMF file\n\n Parameters\n ----------\n filename : str or `Path`\n PHA file name\n exposure : `RegionNDMap`\n Exposure map\n\n Returns\n -------\n data : `EDispKernelMap`\n Dict with edisp\n \"\"\"\n kernel = EDispKernel.read(filename)\n edisp = EDispKernelMap.from_edisp_kernel(kernel, geom=exposure.geom)\n\n # TODO: resolve this separate handling of exposure for edisp\n edisp.exposure_map.data = exposure.data[:, :, np.newaxis, :]\n return edisp\n\n @staticmethod\n def read_arf(filename, livetime):\n \"\"\"Read ARF file\n\n Parameters\n ----------\n filename : str or `Path`\n PHA file name\n livetime : `Quantity`\n Livetime\n\n Returns\n -------\n data : `RegionNDMap`\n Exposure map\n \"\"\"\n aeff = RegionNDMap.read(filename, format=\"ogip-arf\")\n exposure = aeff * livetime\n exposure.meta[\"livetime\"] = livetime\n return exposure\n\n def read(self):\n \"\"\"Read dataset\n\n Returns\n -------\n dataset : SpectrumDatasetOnOff\n Spectrum dataset\n \"\"\"\n kwargs = self.read_pha(self.filename)\n pha_meta = kwargs[\"counts\"].meta\n\n name = str(pha_meta[\"OBS_ID\"])\n livetime = pha_meta[\"EXPOSURE\"] * u.s\n\n filenames = self.get_filenames(pha_meta=pha_meta)\n exposure = self.read_arf(filenames[\"arffile\"], livetime=livetime)\n\n if \"bkgfile\" in filenames:\n bkg = self.read_bkg(filenames[\"bkgfile\"])\n kwargs.update(bkg)\n\n if \"rmffile\" in filenames:\n kwargs[\"edisp\"] = self.read_rmf(filenames[\"rmffile\"], exposure=exposure)\n\n return SpectrumDatasetOnOff(name=name, exposure=exposure, **kwargs)\n", "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import Angle\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom gammapy.maps import MapAxes, MapAxis\nfrom gammapy.utils.array import array_stats_str\nfrom gammapy.utils.gauss import MultiGauss2D\nfrom gammapy.utils.interpolation import ScaledRegularGridInterpolator\nfrom gammapy.utils.scripts import make_path\nfrom .table import PSF3D, EnergyDependentTablePSF\n\n__all__ = [\"EnergyDependentMultiGaussPSF\"]\n\nlog = logging.getLogger(__name__)\n\n\nclass EnergyDependentMultiGaussPSF:\n \"\"\"Triple Gauss analytical PSF depending on energy and theta.\n\n To evaluate the PSF call the ``to_energy_dependent_table_psf`` or ``psf_at_energy_and_theta`` methods.\n\n Parameters\n ----------\n energy_axis_true : `MapAxis`\n True energy axis\n offset_axis : `MapAxis`\n Offset axis.\n sigmas : list of 'numpy.ndarray'\n Triple Gauss sigma parameters, where every entry is\n a two dimensional 'numpy.ndarray' containing the sigma\n value for every given energy and theta.\n norms : list of 'numpy.ndarray'\n Triple Gauss norm parameters, where every entry is\n a two dimensional 'numpy.ndarray' containing the norm\n value for every given energy and theta. Norm corresponds\n to the value of the Gaussian at theta = 0.\n meta : dict\n Meta data\n\n Examples\n --------\n Plot R68 of the PSF vs. theta and energy:\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n from gammapy.irf import EnergyDependentMultiGaussPSF\n filename = '$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits'\n psf = EnergyDependentMultiGaussPSF.read(filename, hdu='POINT SPREAD FUNCTION')\n psf.plot_containment(0.68)\n plt.show()\n \"\"\"\n\n tag = \"psf_3gauss\"\n\n def __init__(\n self,\n energy_axis_true,\n offset_axis,\n sigmas,\n norms,\n meta,\n ):\n energy_axis_true.assert_name(\"energy_true\")\n offset_axis.assert_name(\"offset\")\n\n self._energy_axis_true = energy_axis_true\n self._offset_axis = offset_axis\n\n sigmas[0][sigmas[0] == 0] = 1\n sigmas[1][sigmas[1] == 0] = 1\n sigmas[2][sigmas[2] == 0] = 1\n self.sigmas = sigmas\n\n self.norms = norms\n self.meta = meta or {}\n self._interp_norms = self._setup_interpolators(self.norms)\n self._interp_sigmas = self._setup_interpolators(self.sigmas)\n\n @property\n def energy_thresh_lo(self):\n \"\"\"Low energy threshold\"\"\"\n return self.meta[\"LO_THRES\"] * u.TeV\n\n @property\n def energy_thresh_hi(self):\n \"\"\"High energy threshold\"\"\"\n return self.meta[\"HI_THRES\"] * u.TeV\n\n @property\n def energy_axis_true(self):\n return self._energy_axis_true\n\n @property\n def offset_axis(self):\n return self._offset_axis\n\n def _setup_interpolators(self, values_list):\n interps = []\n for values in values_list:\n interp = ScaledRegularGridInterpolator(\n points=(self.offset_axis.center, self.energy_axis_true.center),\n values=values,\n )\n interps.append(interp)\n return interps\n\n @classmethod\n def read(cls, filename, hdu=\"PSF_2D_GAUSS\"):\n \"\"\"Create `EnergyDependentMultiGaussPSF` from FITS file.\n\n Parameters\n ----------\n filename : str\n File name\n \"\"\"\n with fits.open(str(make_path(filename)), memmap=False) as hdulist:\n return cls.from_table_hdu(hdulist[hdu])\n\n @classmethod\n def from_table_hdu(cls, hdu):\n \"\"\"Create `EnergyDependentMultiGaussPSF` from HDU list.\n\n Parameters\n ----------\n hdu : `~astropy.io.fits.BinTableHDU`\n HDU\n \"\"\"\n table = Table.read(hdu)\n\n energy_axis_true = MapAxis.from_table(\n table, column_prefix=\"ENERG\", format=\"gadf-dl3\"\n )\n offset_axis = MapAxis.from_table(\n table, column_prefix=\"THETA\", format=\"gadf-dl3\"\n )\n\n # Get sigmas\n shape = (offset_axis.nbin, energy_axis_true.nbin)\n sigmas = []\n for key in [\"SIGMA_1\", \"SIGMA_2\", \"SIGMA_3\"]:\n sigma = hdu.data[key].reshape(shape).copy()\n sigmas.append(sigma)\n\n # Get amplitudes\n norms = []\n for key in [\"SCALE\", \"AMPL_2\", \"AMPL_3\"]:\n norm = hdu.data[key].reshape(shape).copy()\n norms.append(norm)\n\n return cls(\n energy_axis_true=energy_axis_true,\n offset_axis=offset_axis,\n sigmas=sigmas,\n norms=norms,\n meta=dict(hdu.header)\n )\n\n def to_hdulist(self):\n \"\"\"\n Convert psf table data to FITS hdu list.\n\n Returns\n -------\n hdu_list : `~astropy.io.fits.HDUList`\n PSF in HDU list format.\n \"\"\"\n # Set up data\n names = [\n \"SCALE\",\n \"SIGMA_1\",\n \"AMPL_2\",\n \"SIGMA_2\",\n \"AMPL_3\",\n \"SIGMA_3\",\n ]\n units = [\"\", \"deg\", \"\", \"deg\", \"\", \"deg\"]\n\n data = [\n self.norms[0],\n self.sigmas[0],\n self.norms[1],\n self.sigmas[1],\n self.norms[2],\n self.sigmas[2],\n ]\n\n axes = MapAxes([self.energy_axis_true, self.offset_axis])\n table = axes.to_table(format=\"gadf-dl3\")\n\n for name_, data_, unit_ in zip(names, data, units):\n table[name_] = [data_]\n table[name_].unit = unit_\n\n # Create hdu and hdu list\n hdu = fits.BinTableHDU(table)\n hdu.header.update(self.meta)\n return fits.HDUList([fits.PrimaryHDU(), hdu])\n\n def write(self, filename, *args, **kwargs):\n \"\"\"Write PSF to FITS file.\n\n Calls `~astropy.io.fits.HDUList.writeto`, forwarding all arguments.\n \"\"\"\n self.to_hdulist().writeto(str(make_path(filename)), *args, **kwargs)\n\n def psf_at_energy_and_theta(self, energy, theta):\n \"\"\"\n Get `~gammapy.modeling.models.MultiGauss2D` model for given energy and theta.\n\n No interpolation is used.\n\n Parameters\n ----------\n energy : `~astropy.units.u.Quantity`\n Energy at which a PSF is requested.\n theta : `~astropy.coordinates.Angle`\n Offset angle at which a PSF is requested.\n\n Returns\n -------\n psf : `~gammapy.utils.gauss.MultiGauss2D`\n Multigauss PSF object.\n \"\"\"\n energy = u.Quantity(energy)\n theta = u.Quantity(theta)\n\n sigmas, norms = [], []\n\n pars = {\"A_1\": 1}\n\n for interp_sigma in self._interp_sigmas:\n sigma = interp_sigma((theta, energy))\n sigmas.append(sigma)\n\n for name, interp_norm in zip([\"scale\", \"A_2\", \"A_3\"], self._interp_norms):\n pars[name] = interp_norm((theta, energy))\n\n for idx, sigma in enumerate(sigmas):\n a = pars[f\"A_{idx + 1}\"]\n norm = pars[\"scale\"] * 2 * a * sigma ** 2\n norms.append(norm)\n\n m = MultiGauss2D(sigmas, norms)\n m.normalize()\n return m\n\n def containment_radius(self, energy, theta, fraction=0.68):\n \"\"\"Compute containment for all energy and theta values\"\"\"\n # This is a false positive from pylint\n # See https://github.com/PyCQA/pylint/issues/2435\n energies = u.Quantity(\n energy\n ).flatten() # pylint:disable=assignment-from-no-return\n thetas = Angle(theta).flatten()\n radius = np.empty((theta.size, energy.size))\n\n for idx, energy in enumerate(energies):\n for jdx, theta in enumerate(thetas):\n try:\n psf = self.psf_at_energy_and_theta(energy, theta)\n radius[jdx, idx] = psf.containment_radius(fraction)\n except ValueError:\n log.debug(\n f\"Computing containment failed for energy = {energy:.2f}\"\n f\" and theta={theta:.2f}\"\n )\n log.debug(f\"Sigmas: {psf.sigmas} Norms: {psf.norms}\")\n radius[jdx, idx] = np.nan\n return Angle(radius, \"deg\")\n\n def plot_containment(self, fraction=0.68, ax=None, add_cbar=True, **kwargs):\n \"\"\"\n Plot containment image with energy and theta axes.\n\n Parameters\n ----------\n fraction : float\n Containment fraction between 0 and 1.\n add_cbar : bool\n Add a colorbar\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n energy = self.energy_axis_true.center\n offset = self.offset_axis.center\n\n # Set up and compute data\n containment = self.containment_radius(energy, offset, fraction)\n\n # plotting defaults\n kwargs.setdefault(\"cmap\", \"GnBu\")\n kwargs.setdefault(\"vmin\", np.nanmin(containment.value))\n kwargs.setdefault(\"vmax\", np.nanmax(containment.value))\n\n # Plotting\n x = energy.value\n y = offset.value\n caxes = ax.pcolormesh(x, y, containment.value, **kwargs)\n\n # Axes labels and ticks, colobar\n ax.semilogx()\n ax.set_ylabel(f\"Offset ({offset.unit})\")\n ax.set_xlabel(f\"Energy ({energy.unit})\")\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.min(), y.max())\n\n try:\n self._plot_safe_energy_range(ax)\n except KeyError:\n pass\n\n if add_cbar:\n label = f\"Containment radius R{100 * fraction:.0f} ({containment.unit})\"\n ax.figure.colorbar(caxes, ax=ax, label=label)\n\n return ax\n\n def _plot_safe_energy_range(self, ax):\n \"\"\"add safe energy range lines to the plot\"\"\"\n esafe = self.energy_thresh_lo\n omin = self.offset_axis.center.min()\n omax = self.offset_axis.center.max()\n ax.vlines(x=esafe.value, ymin=omin.value, ymax=omax.value)\n label = f\"Safe energy threshold: {esafe:3.2f}\"\n ax.text(x=1.1 * esafe.value, y=0.3, s=label, va=\"top\")\n\n def plot_containment_vs_energy(\n self, fractions=[0.68, 0.95], thetas=Angle([0, 1], \"deg\"), ax=None, **kwargs\n ):\n \"\"\"Plot containment fraction as a function of energy.\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n energy = self.energy_axis_true.center\n\n for theta in thetas:\n for fraction in fractions:\n radius = self.containment_radius(energy, theta, fraction).squeeze()\n kwargs.setdefault(\"label\", f\"{theta.deg} deg, {100 * fraction:.1f}%\")\n ax.plot(energy.value, radius.value, **kwargs)\n\n ax.semilogx()\n ax.legend(loc=\"best\")\n ax.set_xlabel(\"Energy (TeV)\")\n ax.set_ylabel(\"Containment radius (deg)\")\n\n def peek(self, figsize=(15, 5)):\n \"\"\"Quick-look summary plots.\"\"\"\n import matplotlib.pyplot as plt\n\n fig, axes = plt.subplots(nrows=1, ncols=3, figsize=figsize)\n\n self.plot_containment(fraction=0.68, ax=axes[0])\n self.plot_containment(fraction=0.95, ax=axes[1])\n self.plot_containment_vs_energy(ax=axes[2])\n\n # TODO: implement this plot\n # psf = self.psf_at_energy_and_theta(energy='1 TeV', theta='1 deg')\n # psf.plot_components(ax=axes[2])\n\n plt.tight_layout()\n\n def info(\n self,\n fractions=[0.68, 0.95],\n energies=u.Quantity([1.0, 10.0], \"TeV\"),\n thetas=u.Quantity([0.0], \"deg\"),\n ):\n \"\"\"\n Print PSF summary info.\n\n The containment radius for given fraction, energies and thetas is\n computed and printed on the command line.\n\n Parameters\n ----------\n fractions : list\n Containment fraction to compute containment radius for.\n energies : `~astropy.units.u.Quantity`\n Energies to compute containment radius for.\n thetas : `~astropy.units.u.Quantity`\n Thetas to compute containment radius for.\n\n Returns\n -------\n ss : string\n Formatted string containing the summary info.\n \"\"\"\n ss = \"\\nSummary PSF info\\n\"\n ss += \"----------------\\n\"\n ss += array_stats_str(self.offset_axis.center.to(\"deg\"), \"Theta\")\n ss += array_stats_str(self.energy_axis_true.edges[1:], \"Energy hi\")\n ss += array_stats_str(self.energy_axis_true.edges[:-1], \"Energy lo\")\n ss += f\"Safe energy threshold lo: {self.energy_thresh_lo:6.3f}\\n\"\n ss += f\"Safe energy threshold hi: {self.energy_thresh_hi:6.3f}\\n\"\n\n for fraction in fractions:\n containment = self.containment_radius(energies, thetas, fraction)\n for i, energy in enumerate(energies):\n for j, theta in enumerate(thetas):\n radius = containment[j, i]\n ss += (\n \"{:2.0f}% containment radius at theta = {} and \"\n \"E = {:4.1f}: {:5.8f}\\n\"\n \"\".format(100 * fraction, theta, energy, radius)\n )\n return ss\n\n def to_energy_dependent_table_psf(self, theta=None, rad=None, exposure=None):\n \"\"\"Convert triple Gaussian PSF ot table PSF.\n\n Parameters\n ----------\n theta : `~astropy.coordinates.Angle`\n Offset in the field of view. Default theta = 0 deg\n rad : `~astropy.coordinates.Angle`\n Offset from PSF center used for evaluating the PSF on a grid.\n Default offset = [0, 0.005, ..., 1.495, 1.5] deg.\n exposure : `~astropy.units.u.Quantity`\n Energy dependent exposure. Should be in units equivalent to 'cm^2 s'.\n Default exposure = 1.\n\n Returns\n -------\n tabe_psf : `~gammapy.irf.EnergyDependentTablePSF`\n Instance of `EnergyDependentTablePSF`.\n \"\"\"\n # Convert energies to log center\n energies = self.energy_axis_true.center\n # Defaults and input handling\n if theta is None:\n theta = Angle(0, \"deg\")\n else:\n theta = Angle(theta)\n\n if rad is None:\n rad = Angle(np.arange(0, 1.5, 0.005), \"deg\")\n\n rad_axis = MapAxis.from_nodes(rad, name=\"rad\")\n\n psf_value = u.Quantity(np.zeros((energies.size, rad.size)), \"deg^-2\")\n\n for idx, energy in enumerate(energies):\n psf_gauss = self.psf_at_energy_and_theta(energy, theta)\n psf_value[idx] = u.Quantity(psf_gauss(rad), \"deg^-2\")\n\n return EnergyDependentTablePSF(\n energy_axis_true=self.energy_axis_true,\n rad_axis=rad_axis,\n exposure=exposure,\n data=psf_value,\n )\n\n def to_psf3d(self, rad=None):\n \"\"\"Create a PSF3D from an analytical PSF.\n\n Parameters\n ----------\n rad : `~astropy.units.u.Quantity` or `~astropy.coordinates.Angle`\n the array of position errors (rad) on which the PSF3D will be defined\n\n Returns\n -------\n psf3d : `~gammapy.irf.PSF3D`\n the PSF3D. It will be defined on the same energy and offset values than the input psf.\n \"\"\"\n offsets = self.offset_axis.center\n energy = self.energy_axis_true.center\n\n if rad is None:\n rad = np.linspace(0, 0.66, 67) * u.deg\n\n rad_axis = MapAxis.from_edges(rad, name=\"rad\")\n\n shape = (self.energy_axis_true.nbin, self.offset_axis.nbin, rad_axis.nbin)\n psf_value = np.zeros(shape) * u.Unit(\"sr-1\")\n\n for idx, offset in enumerate(offsets):\n table_psf = self.to_energy_dependent_table_psf(offset)\n psf_value[:, idx, :] = table_psf.evaluate(energy, rad_axis.center)\n\n return PSF3D(\n energy_axis_true=self.energy_axis_true,\n rad_axis=rad_axis,\n offset_axis=self.offset_axis,\n data=psf_value,\n meta=self.meta.copy()\n )\n" ]
[ [ "numpy.logical_not", "numpy.ones" ], [ "numpy.nanmax", "matplotlib.pyplot.gca", "matplotlib.pyplot.tight_layout", "numpy.linspace", "numpy.arange", "numpy.nanmin", "matplotlib.pyplot.subplots", "numpy.zeros", "numpy.empty" ] ]
QUANHAO-NCU/pytorch-visual-block
[ "f024541add5581026343aaaaeaf27d8415f3d4fe", "f024541add5581026343aaaaeaf27d8415f3d4fe" ]
[ "Working/oc-cnn-master-Q/src/main/getAUC.py", "Working/myMethods/Train.py" ]
[ "import numpy as np\nimport h5py\n\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import svm\n\n# path variables\nscore_path = '../../temp_files/scores.mat'\nlabel_path = '../../temp_files/labels.mat'\n\nwith h5py.File(score_path, 'r') as f:\n test_features = f['scores'][()]\nwith h5py.File(label_path, 'r') as f:\n test_label = f['test_label'][()]\n\nfpr, tpr, thresholds = metrics.roc_curve(np.transpose(test_label), np.transpose(test_features))\nprint(metrics.auc(fpr, tpr))\n", "import torch\nimport visdom\nfrom Modules import *\nfrom Dataset import *\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\ndef train_test(cfg):\n \"\"\"\n ALOCC 复现\n \n \"\"\"\"\"\n print(\"Prepare...\")\n device = torch.device(cfg['device'])\n weight_decay = cfg['weight_decay']\n encoder = cfg['encoder'].to(device)\n decoder = cfg['decoder'].to(device)\n discriminator = cfg['discriminator'].to(device)\n lr = cfg['init_lr']\n # start_calculate = cfg['calculate_c']\n opt_encoder = optim.Adam([{'params': encoder.parameters(), 'initial_lr': lr}], lr=lr, weight_decay=weight_decay)\n opt_decoder = optim.Adam(decoder.parameters(), lr=lr, weight_decay=weight_decay)\n opt_discriminator = optim.Adam([{'params': discriminator.parameters(), 'initial_lr': lr}], lr=lr,\n weight_decay=weight_decay)\n scheduler_encoder = optim.lr_scheduler.MultiStepLR(opt_encoder, milestones=[50, 60, 70, 80], gamma=0.8)\n scheduler_decoder = optim.lr_scheduler.MultiStepLR(opt_decoder, milestones=[50, 60, 70, 80], gamma=0.8)\n scheduler_discriminator = optim.lr_scheduler.MultiStepLR(opt_discriminator, milestones=[50, 60, 70, 80], gamma=0.8)\n viz = visdom.Visdom()\n epochs = cfg['epochs']\n batch_size = cfg['batch_size']\n positive = cfg['positive']\n dataset.setMode('train', positive_class=positive)\n datasetLoader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True)\n\n # # 模型参数:超球中心点c\n # c = hyperSphereCenter = None\n # 统计训练信息\n globalTrainStep = 0\n total_loss_dis_true = 0\n total_loss_dis_fake = 0\n total_loss_dis = 0\n total_loss_gen_fake = 0\n total_loss_gen_mse = 0\n # total_loss_gen_dist = 0\n total_loss_gen = 0\n total_ssim = 0\n best_precision = 0\n best_recall = 0\n best_accuracy = 0\n best_f1 = 0\n best_precision_epoch = 0\n best_recall_epoch = 0\n best_accuracy_epoch = 0\n best_f1_epoch = 0\n\n for epoch in range(epochs):\n\n # # 第50轮时,假定编码器能够正确提取到正样本数据特征\n # # 计算超球中心点c\n # if epoch == start_calculate:\n # print('Calculate HyperSphere Center...')\n # c = torch.zeros(cfg['feature_dim'], device=device)\n # n_samples = 0\n # eps = 0.1\n # with torch.no_grad():\n # dataset.setMode('train', positive_class=positive)\n # encoder.eval()\n # decoder.eval()\n # for sourceImage, label in datasetLoader:\n # sourceImage = sourceImage.to(device)\n # feature_vectors = encoder(sourceImage)\n # n_samples += feature_vectors.shape[0]\n # c += torch.sum(feature_vectors, dim=0)\n # c /= n_samples\n # # 确保超球中心不要太接近于特征空间的原点\n # c[(abs(c) < eps) & (c < 0)] = -eps\n # c[(abs(c) < eps) & (c > 0)] = eps\n # 训练\n print(f'Train{epoch}...')\n dataset.setMode('train', positive_class=positive)\n encoder.train()\n decoder.train()\n discriminator.train()\n for sourceImage, label in datasetLoader:\n viz.images(sourceImage, nrow=16, win='Train-sourceImage', opts=dict(title='Train-sourceImage'))\n sourceImage = Variable(sourceImage.to(device))\n # 原图片添加噪声\n _, noisyImage = addNoise(sourceImage, 0.1, device)\n # 用编码器和解码器重建图像\n feature_vectors = encoder(noisyImage)\n reconstruction = decoder(feature_vectors)\n # 判别器分别对原图像和重建的图像进行打分\n # 对原图打分应趋近于1,对重建图像打分应趋近于0,用交叉熵分别计算损失,整个判别器的损失为原图损失和重建图像损失的和\n scores_true = discriminator(sourceImage)\n ones = torch.ones(len(scores_true), dtype=torch.long, device=device)\n loss_true = F.cross_entropy(scores_true, ones)\n scores_fake = discriminator(reconstruction)\n zeros = torch.zeros(len(scores_fake), dtype=torch.long, device=device)\n loss_fake = F.cross_entropy(scores_fake, zeros)\n\n loss_dis = torch.add(loss_true, loss_fake)\n\n opt_decoder.zero_grad()\n opt_encoder.zero_grad()\n opt_discriminator.zero_grad()\n loss_dis.backward(retain_graph=True)\n opt_discriminator.step()\n scheduler_discriminator.step()\n\n # 训练生成器\n # 生成器生成的图像应该尽量和原图相似\n # 用均方误差MSE评价重建图像与原图的误差,这个参数用于反向传播\n # 用更新过的判别器评判重建图像分数,令其趋于1-->生成器的工作是要欺骗判别器,用交叉熵计算这个损失\n # 用结构相似性SSIM评价重建图像与原图的相似性,这个参数不参与反向传播\n # 生成器的损失为欺骗损失和结构损失之和\n scores_fake = discriminator(reconstruction)\n ones2 = torch.ones(len(scores_fake), dtype=torch.long, device=device)\n loss_gen_mse = F.cross_entropy(scores_fake, ones2)\n\n loss_gen_l2 = F.mse_loss(sourceImage, reconstruction)\n loss_gen = torch.add(loss_gen_mse, torch.mul(0.2, loss_gen_l2))\n\n # # 第50轮后,计算出超球中心,约束,使提取到的特征向量到超球中心距离尽量小\n # if epoch >= start_calculate:\n # loss_dist = torch.sum((feature_vectors - c) ** 2, dim=1)\n # loss_dist = torch.mean(loss_dist)\n # loss_dist = torch.mul(loss_dist, 0.2)\n # loss_gen = torch.add(loss_gen, loss_dist)\n\n loss_ssim = SSIM(reconstruction, sourceImage)\n\n opt_decoder.zero_grad()\n opt_encoder.zero_grad()\n opt_discriminator.zero_grad()\n loss_gen.backward()\n opt_decoder.step()\n opt_encoder.step()\n scheduler_encoder.step()\n scheduler_decoder.step()\n\n total_loss_dis_true += loss_true.item()\n total_loss_dis_fake += loss_fake.item()\n total_loss_dis += loss_dis.item()\n total_loss_gen_fake += loss_gen_l2.item()\n total_loss_gen_mse += loss_gen_mse.item()\n # if epoch >= start_calculate:\n # total_loss_gen_dist += loss_dist.item()\n total_loss_gen += loss_gen.item()\n total_ssim += loss_ssim\n globalTrainStep += 1\n\n viz.line([total_loss_dis_true / globalTrainStep], [globalTrainStep], win='Train-Discriminator True Loss',\n update='append', opts=dict(title='Train-Discriminator True Loss'))\n viz.line([total_loss_dis_fake / globalTrainStep], [globalTrainStep], win='Train-Discriminator Fake Loss',\n update='append', opts=dict(title='Train-Discriminator Fake Loss'))\n viz.line([total_loss_dis / globalTrainStep], [globalTrainStep], win='Train-Discriminator Loss',\n update='append', opts=dict(title='Train-Discriminator Loss'))\n viz.line([total_loss_gen_fake / globalTrainStep], [globalTrainStep], win='Train-Generate Fake Loss',\n update='append', opts=dict(title='Train-Generate Fake Loss'))\n viz.line([total_loss_gen_mse / globalTrainStep], [globalTrainStep], win='Train-Generate MSE Loss',\n update='append', opts=dict(title='Train-Generate MSE Loss'))\n viz.line([total_loss_gen / globalTrainStep], [globalTrainStep], win='Train-Generate Loss',\n update='append', opts=dict(title='Train-Generate Loss'))\n viz.line([total_ssim / globalTrainStep], [globalTrainStep], win='Train-SSIM', update='append',\n opts=dict(title='Train-SSIM'))\n # if epoch >= start_calculate:\n # viz.line([total_loss_gen_dist / globalTrainStep], [globalTrainStep], win='Train-Generate Dist Loss',\n # update='append', opts=dict(title='Train-Generate Dist Loss'))\n viz.images(reconstruction, nrow=16, win='Reconstruction',\n opts=dict(title='Reconstruction'))\n log.write(\n f'Epoch-{epoch}:[[DiscriminatorTrueLoss,{total_loss_dis_true / globalTrainStep}],'\n f'[DiscriminatorFakeLoss,{total_loss_dis_fake / globalTrainStep}],'\n f'[DiscriminatorLoss,{total_loss_dis_fake / globalTrainStep}],'\n f'[Generate Fake Loss,{total_loss_gen_fake / globalTrainStep}],'\n f'[Generate MSE Loss,{total_loss_gen_mse / globalTrainStep}],'\n f'[Generate Loss,{total_loss_gen / globalTrainStep}],'\n f'[SSIM,{total_ssim / globalTrainStep}]]\\n')\n log.flush()\n # 测试验证\n with torch.no_grad():\n print('Test...')\n dataset.setMode('test', positive_class=positive, np_proportion=3)\n encoder.eval()\n decoder.eval()\n discriminator.eval()\n total_recall = 0\n total_precision = 0\n total_accuracy = 0\n total_f1 = 0\n validate_step = 0\n\n for sourceImage, labels in datasetLoader:\n viz.images(sourceImage, nrow=16, win='Validate-sourceImage', opts=dict(title='Validate-sourceImage'))\n sourceImage = Variable(sourceImage.to(device))\n labels = Variable(labels).float().to(device)\n # 顺便验证一下生成器的重建效果\n feature_vectors = encoder(sourceImage)\n reconstruction = decoder(feature_vectors)\n # 判别器直接对测试用图片打分\n scores = discriminator(sourceImage)\n # 判别器应该将正类的打分更高\n predict = torch.argmax(scores, dim=1)\n predict = predict.cpu().detach().numpy()\n labels = labels.cpu().detach().numpy()\n # 计算准确率Accuracy,查准率Precision,查全率Recall,F1 参数\n accuracy = metrics.accuracy_score(labels, predict)\n precision, recall, f, _ = metrics.precision_recall_fscore_support(labels, predict, zero_division=0)\n validate_step += 1\n total_recall += np.max(recall)\n total_precision += np.max(precision)\n total_accuracy += accuracy\n total_f1 += np.max(f)\n viz.images(reconstruction, nrow=16, win='Validate-reconstruction',\n opts=dict(title='Validate-reconstruction'))\n\n recall = total_recall / validate_step\n precision = total_precision / validate_step\n accuracy = total_accuracy / validate_step\n f1 = total_f1 / validate_step\n viz.line([recall], [epoch + 1], win='Test-Recall', update='append',\n opts=dict(title='Train-Recall'))\n viz.line([precision], [epoch + 1], win='Test-Precision', update='append',\n opts=dict(title='Train-Precision'))\n viz.line([accuracy], [epoch + 1], win='Test-Accuracy', update='append',\n opts=dict(title='Train-Accuracy'))\n viz.line([f1], [epoch + 1], win='Test-F1 Score', update='append',\n opts=dict(title='Train-F1 Score'))\n print('F1 Score:', f1)\n print('Recall:', recall)\n print('Precision:', precision)\n print('Accuracy:', accuracy)\n log.write(\n f'Epoch-{epoch}:[[Accuracy,{accuracy}],[Precision,{precision}],[Recall,{recall}],[F1,{f1}]]\\n')\n log.flush()\n if recall > best_recall:\n best_recall = recall\n best_recall_epoch = epoch\n torch.save(encoder.state_dict(), 'best_recall_encoder.pth')\n torch.save(decoder.state_dict(), 'best_recall_decoder.pth')\n torch.save(discriminator.state_dict(), 'best_recall_discriminator.pth')\n if precision > best_precision:\n best_precision = precision\n best_precision_epoch = epoch\n torch.save(encoder.state_dict(), 'best_precision_encoder.pth')\n torch.save(decoder.state_dict(), 'best_precision_decoder.pth')\n torch.save(discriminator.state_dict(), 'best_precision_discriminator.pth')\n if accuracy > best_accuracy:\n best_accuracy = accuracy\n best_accuracy_epoch = epoch\n torch.save(encoder.state_dict(), 'best_accuracy_encoder.pth')\n torch.save(decoder.state_dict(), 'best_accuracy_decoder.pth')\n torch.save(discriminator.state_dict(), 'best_accuracy_discriminator.pth')\n if f1 > best_f1:\n best_f1 = f1\n best_f1_epoch = epoch\n torch.save(encoder.state_dict(), 'best_f1_encoder.pth')\n torch.save(decoder.state_dict(), 'best_f1_decoder.pth')\n torch.save(discriminator.state_dict(), 'best_f1_discriminator.pth')\n # 记录最好情况\n log.write(f'best_recall_epoch:{best_recall_epoch},best_recall:{best_recall},'\n f'best_precision_epoch:{best_precision_epoch},best_precision:{best_precision},'\n f'best_accuracy_epoch:{best_accuracy_epoch},best_accuracy:{best_accuracy},'\n f'best_f1_epoch:{best_f1_epoch},best_f1:{best_f1},')\n log.flush()\n print(f'best_recall_epoch:{best_recall_epoch},best_recall:{best_recall},'\n f'best_precision_epoch:{best_precision_epoch},best_precision:{best_precision},'\n f'best_accuracy_epoch:{best_accuracy_epoch},best_accuracy:{best_accuracy},'\n f'best_f1_epoch:{best_f1_epoch},best_f1:{best_f1},')\n\n\nif __name__ == '__main__':\n dataset = myDataset('MNIST', '.')\n feature_dim = 128\n encoder = MNISTEncoder(feature_dim)\n decoder = MNISTDecoder(feature_dim)\n discriminator = Discriminator()\n log = open('MNISTlog.txt', mode='w+')\n\n cfg = {\n 'dataset': dataset,\n 'device': 'cuda:0',\n 'encoder': encoder,\n 'decoder': decoder,\n 'discriminator': discriminator,\n 'init_lr': 1e-3,\n 'weight_decay': 5e-5,\n 'epochs': 100,\n 'batch_size': 64,\n 'positive': 0,\n 'log': log,\n 'feature_dim': 128,\n }\n train_test(cfg)\n log.close()\n" ]
[ [ "sklearn.metrics.auc", "numpy.transpose" ], [ "torch.optim.lr_scheduler.MultiStepLR", "torch.add", "torch.nn.functional.cross_entropy", "torch.autograd.Variable", "torch.nn.functional.mse_loss", "torch.mul", "torch.no_grad", "torch.device", "torch.argmax" ] ]
SaitejaUtpala/geomstats
[ "5d4e16b3f30a86aab4725142f2263d8f10a30508" ]
[ "geomstats/geometry/hypersphere.py" ]
[ "\"\"\"The n-dimensional hypersphere.\n\nThe n-dimensional hypersphere embedded in (n+1)-dimensional\nEuclidean space.\n\"\"\"\n\nimport logging\nimport math\nfrom itertools import product\n\nfrom scipy.stats import beta\n\nimport geomstats.algebra_utils as utils\nimport geomstats.backend as gs\nfrom geomstats.geometry.base import EmbeddedManifold\nfrom geomstats.geometry.euclidean import Euclidean, EuclideanMetric\nfrom geomstats.geometry.riemannian_metric import RiemannianMetric\n\n\nclass _Hypersphere(EmbeddedManifold):\n \"\"\"Private class for the n-dimensional hypersphere.\n\n Class for the n-dimensional hypersphere embedded in the\n (n+1)-dimensional Euclidean space.\n\n By default, points are parameterized by their extrinsic\n (n+1)-coordinates.\n\n Parameters\n ----------\n dim : int\n Dimension of the hypersphere.\n \"\"\"\n\n def __init__(self, dim):\n super(_Hypersphere, self).__init__(\n dim=dim, embedding_space=Euclidean(dim + 1),\n submersion=lambda x: gs.sum(x ** 2, axis=-1), value=1.,\n tangent_submersion=lambda v, x: 2 * gs.sum(x * v, axis=-1))\n\n def projection(self, point):\n \"\"\"Project a point on the hypersphere.\n\n Parameters\n ----------\n point : array-like, shape=[..., dim + 1]\n Point in embedding Euclidean space.\n\n Returns\n -------\n projected_point : array-like, shape=[..., dim + 1]\n Point projected on the hypersphere.\n \"\"\"\n norm = gs.linalg.norm(point, axis=-1)\n projected_point = gs.einsum('...,...i->...i', 1. / norm, point)\n\n return projected_point\n\n def to_tangent(self, vector, base_point):\n \"\"\"Project a vector to the tangent space.\n\n Project a vector in Euclidean space\n on the tangent space of the hypersphere at a base point.\n\n Parameters\n ----------\n vector : array-like, shape=[..., dim + 1]\n Vector in Euclidean space.\n base_point : array-like, shape=[..., dim + 1]\n Point on the hypersphere defining the tangent space,\n where the vector will be projected.\n\n Returns\n -------\n tangent_vec : array-like, shape=[..., dim + 1]\n Tangent vector in the tangent space of the hypersphere\n at the base point.\n \"\"\"\n sq_norm = gs.sum(base_point ** 2, axis=-1)\n inner_prod = self.embedding_metric.inner_product(base_point, vector)\n coef = inner_prod / sq_norm\n tangent_vec = vector - gs.einsum('...,...j->...j', coef, base_point)\n\n return tangent_vec\n\n def spherical_to_extrinsic(self, point_spherical):\n \"\"\"Convert point from spherical to extrinsic coordinates.\n\n Convert from the spherical coordinates in the hypersphere\n to the extrinsic coordinates in Euclidean space.\n Only implemented in dimension 2.\n\n Parameters\n ----------\n point_spherical : array-like, shape=[..., dim]\n Point on the sphere, in spherical coordinates.\n\n Returns\n -------\n point_extrinsic : array_like, shape=[..., dim + 1]\n Point on the sphere, in extrinsic coordinates in Euclidean space.\n \"\"\"\n if self.dim != 2:\n raise NotImplementedError(\n 'The conversion from spherical coordinates'\n ' to extrinsic coordinates is implemented'\n ' only in dimension 2.')\n\n theta = point_spherical[..., 0]\n phi = point_spherical[..., 1]\n\n point_extrinsic = gs.stack(\n [gs.sin(theta) * gs.cos(phi),\n gs.sin(theta) * gs.sin(phi),\n gs.cos(theta)],\n axis=-1)\n\n if not gs.all(self.belongs(point_extrinsic)):\n raise ValueError('Points do not belong to the manifold.')\n\n return point_extrinsic\n\n def tangent_spherical_to_extrinsic(self, tangent_vec_spherical,\n base_point_spherical):\n \"\"\"Convert tangent vector from spherical to extrinsic coordinates.\n\n Convert from the spherical coordinates in the hypersphere\n to the extrinsic coordinates in Euclidean space for a tangent\n vector. Only implemented in dimension 2.\n\n Parameters\n ----------\n tangent_vec_spherical : array-like, shape=[..., dim]\n Tangent vector to the sphere, in spherical coordinates.\n base_point_spherical : array-like, shape=[..., dim]\n Point on the sphere, in spherical coordinates.\n\n Returns\n -------\n tangent_vec_extrinsic : array-like, shape=[..., dim + 1]\n Tangent vector to the sphere, at base point,\n in extrinsic coordinates in Euclidean space.\n \"\"\"\n if self.dim != 2:\n raise NotImplementedError(\n 'The conversion from spherical coordinates'\n ' to extrinsic coordinates is implemented'\n ' only in dimension 2.')\n\n axes = (2, 0, 1) if base_point_spherical.ndim == 2 else (0, 1)\n theta = base_point_spherical[..., 0]\n phi = base_point_spherical[..., 1]\n\n zeros = gs.zeros_like(theta)\n\n jac = gs.array([\n [gs.cos(theta) * gs.cos(phi), - gs.sin(theta) * gs.sin(phi)],\n [gs.cos(theta) * gs.sin(phi), gs.sin(theta) * gs.cos(phi)],\n [- gs.sin(theta), zeros]])\n jac = gs.transpose(jac, axes)\n\n tangent_vec_extrinsic = gs.einsum(\n '...ij,...j->...i', jac, tangent_vec_spherical)\n\n return tangent_vec_extrinsic\n\n def intrinsic_to_extrinsic_coords(self, point_intrinsic):\n \"\"\"Convert point from intrinsic to extrinsic coordinates.\n\n Convert from the intrinsic coordinates in the hypersphere,\n to the extrinsic coordinates in Euclidean space.\n\n Parameters\n ----------\n point_intrinsic : array-like, shape=[..., dim]\n Point on the hypersphere, in intrinsic coordinates.\n\n Returns\n -------\n point_extrinsic : array-like, shape=[..., dim + 1]\n Point on the hypersphere, in extrinsic coordinates in\n Euclidean space.\n \"\"\"\n sq_coord_0 = 1. - gs.sum(point_intrinsic ** 2, axis=-1)\n if gs.any(gs.less(sq_coord_0, 0.)):\n raise ValueError('Square-root of a negative number.')\n coord_0 = gs.sqrt(sq_coord_0)\n\n point_extrinsic = gs.concatenate([\n coord_0[..., None], point_intrinsic], axis=-1)\n\n return point_extrinsic\n\n def extrinsic_to_intrinsic_coords(self, point_extrinsic):\n \"\"\"Convert point from extrinsic to intrinsic coordinates.\n\n Convert from the extrinsic coordinates in Euclidean space,\n to some intrinsic coordinates in the hypersphere.\n\n Parameters\n ----------\n point_extrinsic : array-like, shape=[..., dim + 1]\n Point on the hypersphere, in extrinsic coordinates in\n Euclidean space.\n\n Returns\n -------\n point_intrinsic : array-like, shape=[..., dim]\n Point on the hypersphere, in intrinsic coordinates.\n \"\"\"\n point_intrinsic = point_extrinsic[..., 1:]\n\n return point_intrinsic\n\n def _replace_values(self, samples, new_samples, indcs):\n replaced_indices = [\n i for i, is_replaced in enumerate(indcs) if is_replaced]\n value_indices = list(product(replaced_indices, range(self.dim + 1)))\n return gs.assignment(samples, gs.flatten(new_samples), value_indices)\n\n def random_point(self, n_samples=1, bound=1.):\n \"\"\"Sample in the hypersphere from the uniform distribution.\n\n Parameters\n ----------\n n_samples : int\n Number of samples.\n Optional, default: 1.\n bound : unused\n\n Returns\n -------\n samples : array-like, shape=[..., dim + 1]\n Points sampled on the hypersphere.\n \"\"\"\n return self.random_uniform(n_samples)\n\n def random_uniform(self, n_samples=1):\n \"\"\"Sample in the hypersphere from the uniform distribution.\n\n Parameters\n ----------\n n_samples : int\n Number of samples.\n Optional, default: 1.\n\n Returns\n -------\n samples : array-like, shape=[..., dim + 1]\n Points sampled on the hypersphere.\n \"\"\"\n size = (n_samples, self.dim + 1)\n\n samples = gs.random.normal(size=size)\n while True:\n norms = gs.linalg.norm(samples, axis=1)\n indcs = gs.isclose(norms, 0.0, atol=gs.atol)\n num_bad_samples = gs.sum(indcs)\n if num_bad_samples == 0:\n break\n new_samples = gs.random.normal(\n size=(num_bad_samples, self.dim + 1))\n samples = self._replace_values(samples, new_samples, indcs)\n\n samples = gs.einsum('..., ...i->...i', 1 / norms, samples)\n if n_samples == 1:\n samples = gs.squeeze(samples, axis=0)\n return samples\n\n def random_von_mises_fisher(\n self, mu=None, kappa=10, n_samples=1, max_iter=100):\n \"\"\"Sample with the von Mises-Fisher distribution.\n\n This distribution corresponds to the maximum entropy distribution\n given a mean. In dimension 2, a closed form expression is available.\n In larger dimension, rejection sampling is used according to [Wood94]_\n\n References\n ----------\n https://en.wikipedia.org/wiki/Von_Mises-Fisher_distribution\n\n .. [Wood94] Wood, Andrew T. A. “Simulation of the von Mises Fisher\n Distribution.” Communications in Statistics - Simulation\n and Computation, June 27, 2007.\n https://doi.org/10.1080/03610919408813161.\n\n Parameters\n ----------\n mu : array-like, shape=[dim]\n Mean parameter of the distribution.\n kappa : float\n Kappa parameter of the von Mises distribution.\n Optional, default: 10.\n n_samples : int\n Number of samples.\n Optional, default: 1.\n max_iter : int\n Maximum number of trials in the rejection algorithm. In case it\n is reached, the current number of samples < n_samples is returned.\n Optional, default: 100.\n\n Returns\n -------\n point : array-like, shape=[n_samples, dim + 1]\n Points sampled on the sphere in extrinsic coordinates\n in Euclidean space of dimension dim + 1.\n \"\"\"\n dim = self.dim\n\n if dim == 2:\n angle = 2. * gs.pi * gs.random.rand(n_samples)\n angle = gs.to_ndarray(angle, to_ndim=2, axis=1)\n unit_vector = gs.hstack((gs.cos(angle), gs.sin(angle)))\n scalar = gs.random.rand(n_samples)\n\n coord_x = 1. + 1. / kappa * gs.log(\n scalar + (1. - scalar) * gs.exp(gs.array(-2. * kappa)))\n coord_x = gs.to_ndarray(coord_x, to_ndim=2, axis=1)\n coord_yz = gs.sqrt(1. - coord_x ** 2) * unit_vector\n sample = gs.hstack((coord_x, coord_yz))\n\n else:\n # rejection sampling in the general case\n sqrt = gs.sqrt(4 * kappa ** 2. + dim ** 2)\n envelop_param = (-2 * kappa + sqrt) / dim\n node = (1. - envelop_param) / (1. + envelop_param)\n correction = kappa * node + dim * gs.log(1. - node ** 2)\n\n n_accepted, n_iter = 0, 0\n result = []\n while (n_accepted < n_samples) and (n_iter < max_iter):\n sym_beta = beta.rvs(\n dim / 2, dim / 2, size=n_samples - n_accepted)\n sym_beta = gs.cast(sym_beta, node.dtype)\n coord_x = (1 - (1 + envelop_param) * sym_beta) / (\n 1 - (1 - envelop_param) * sym_beta)\n accept_tol = gs.random.rand(n_samples - n_accepted)\n criterion = (\n kappa * coord_x\n + dim * gs.log(1 - node * coord_x)\n - correction) > gs.log(accept_tol)\n result.append(coord_x[criterion])\n n_accepted += gs.sum(criterion)\n n_iter += 1\n if n_accepted < n_samples:\n logging.warning(\n 'Maximum number of iteration reached in rejection '\n 'sampling before n_samples were accepted.')\n coord_x = gs.concatenate(result)\n coord_rest = _Hypersphere(dim - 1).random_uniform(n_accepted)\n coord_rest = gs.einsum(\n '...,...i->...i', gs.sqrt(1 - coord_x ** 2), coord_rest)\n sample = gs.concatenate([coord_x[..., None], coord_rest], axis=1)\n\n if mu is not None:\n sample = utils.rotate_points(sample, mu)\n\n return sample if (n_samples > 1) else sample[0]\n\n def random_riemannian_normal(\n self, mean=None, precision=None, n_samples=1, max_iter=100):\n r\"\"\"Sample from the Riemannian normal distribution.\n\n The Riemannian normal distribution, or spherical normal in this case,\n is defined by the probability density function (with respect to the\n Riemannian volume measure) proportional to:\n .. math::\n \\exp \\Big \\left(- \\frac{\\lambda}{2} \\mathtm{arccos}^2(x^T\\mu)\n \\Big \\right)\n\n where :math: `\\mu` is the mean and :math: `\\lambda` is the isotropic\n precision. For the anisotropic case,\n :math: `\\log_{\\mu}(x)^T \\Lambda \\log_{\\mu}(x)` is used instead.\n\n A rejection algorithm is used to sample from this distribution [Hau18]_\n\n Parameters\n ----------\n mean : array-like, shape=[dim]\n Mean parameter of the distribution.\n Optional, default: (0,...,0,1) (the north pole).\n precision : float or array-like, shape=[dim, dim]\n Inverse of the covariance parameter of the normal distribution.\n If a float is passed, the covariance matrix is precision times\n identity.\n Optional, default: identity.\n n_samples : int\n Number of samples.\n Optional, default: 1.\n max_iter : int\n Maximum number of trials in the rejection algorithm. In case it\n is reached, the current number of samples < n_samples is returned.\n Optional, default: 100.\n\n Returns\n -------\n point : array-like, shape=[n_samples, dim + 1]\n Points sampled on the sphere.\n\n References\n ----------\n .. [Hau18] Hauberg, Soren. “Directional Statistics with the\n Spherical Normal Distribution.”\n In 2018 21st International Conference on Information\n Fusion (FUSION), 704–11, 2018.\n https://doi.org/10.23919/ICIF.2018.8455242.\n \"\"\"\n dim = self.dim\n n_accepted, n_iter = 0, 0\n result = []\n if precision is None:\n precision_ = gs.eye(self.dim)\n elif isinstance(precision, (float, int)):\n precision_ = precision * gs.eye(self.dim)\n else:\n precision_ = precision\n precision_2 = precision_ + (dim - 1) / gs.pi * gs.eye(dim)\n tangent_cov = gs.linalg.inv(precision_2)\n\n def threshold(random_v):\n \"\"\"Compute the acceptance threshold.\"\"\"\n squared_norm = gs.sum(random_v ** 2, axis=-1)\n sinc = utils.taylor_exp_even_func(\n squared_norm, utils.sinc_close_0) ** (dim - 1)\n threshold_val = sinc * gs.exp(squared_norm * (dim - 1) / 2 / gs.pi)\n return threshold_val, squared_norm ** .5\n\n while (n_accepted < n_samples) and (n_iter < max_iter):\n envelope = gs.random.multivariate_normal(\n gs.zeros(dim), tangent_cov, size=(n_samples - n_accepted,))\n thresh, norm = threshold(envelope)\n proposal = gs.random.rand(n_samples - n_accepted)\n criterion = gs.logical_and(norm <= gs.pi, proposal <= thresh)\n result.append(envelope[criterion])\n n_accepted += gs.sum(criterion)\n n_iter += 1\n if n_accepted < n_samples:\n logging.warning(\n 'Maximum number of iteration reached in rejection '\n 'sampling before n_samples were accepted.')\n tangent_sample_intr = gs.concatenate(result)\n tangent_sample = gs.concatenate(\n [tangent_sample_intr, gs.zeros(n_accepted)[:, None]], axis=1)\n\n metric = HypersphereMetric(dim)\n north_pole = gs.array([0.] * dim + [1.])\n if mean is not None:\n mean_from_north = metric.log(mean, north_pole)\n tangent_sample_at_pt = metric.parallel_transport(\n tangent_sample, mean_from_north, north_pole)\n else:\n tangent_sample_at_pt = tangent_sample\n mean = north_pole\n sample = metric.exp(tangent_sample_at_pt, mean)\n return sample[0] if (n_samples == 1) else sample\n\n\nclass HypersphereMetric(RiemannianMetric):\n \"\"\"Class for the Hypersphere Metric.\n\n Parameters\n ----------\n dim : int\n Dimension of the hypersphere.\n \"\"\"\n\n def __init__(self, dim):\n super(HypersphereMetric, self).__init__(\n dim=dim, signature=(dim, 0))\n self.embedding_metric = EuclideanMetric(dim + 1)\n self._space = _Hypersphere(dim=dim)\n\n def metric_matrix(self, base_point=None):\n \"\"\"Metric matrix at the tangent space at a base point.\n\n Parameters\n ----------\n base_point : array-like, shape=[..., dim + 1]\n Base point.\n Optional, default: None.\n\n Returns\n -------\n mat : array-like, shape=[..., dim + 1, dim + 1]\n Inner-product matrix.\n \"\"\"\n return gs.eye(self.dim + 1)\n\n def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):\n \"\"\"Compute the inner-product of two tangent vectors at a base point.\n\n Parameters\n ----------\n tangent_vec_a : array-like, shape=[..., dim + 1]\n First tangent vector at base point.\n tangent_vec_b : array-like, shape=[..., dim + 1]\n Second tangent vector at base point.\n base_point : array-like, shape=[..., dim + 1], optional\n Point on the hypersphere.\n\n Returns\n -------\n inner_prod : array-like, shape=[...,]\n Inner-product of the two tangent vectors.\n \"\"\"\n inner_prod = self.embedding_metric.inner_product(\n tangent_vec_a, tangent_vec_b, base_point)\n\n return inner_prod\n\n def squared_norm(self, vector, base_point=None):\n \"\"\"Compute the squared norm of a vector.\n\n Squared norm of a vector associated with the inner-product\n at the tangent space at a base point.\n\n Parameters\n ----------\n vector : array-like, shape=[..., dim + 1]\n Vector on the tangent space of the hypersphere at base point.\n base_point : array-like, shape=[..., dim + 1], optional\n Point on the hypersphere.\n\n Returns\n -------\n sq_norm : array-like, shape=[..., 1]\n Squared norm of the vector.\n \"\"\"\n sq_norm = self.embedding_metric.squared_norm(vector)\n return sq_norm\n\n def exp(self, tangent_vec, base_point):\n \"\"\"Compute the Riemannian exponential of a tangent vector.\n\n Parameters\n ----------\n tangent_vec : array-like, shape=[..., dim + 1]\n Tangent vector at a base point.\n base_point : array-like, shape=[..., dim + 1]\n Point on the hypersphere.\n\n Returns\n -------\n exp : array-like, shape=[..., dim + 1]\n Point on the hypersphere equal to the Riemannian exponential\n of tangent_vec at the base point.\n \"\"\"\n hypersphere = Hypersphere(dim=self.dim)\n proj_tangent_vec = hypersphere.to_tangent(tangent_vec, base_point)\n norm2 = self.embedding_metric.squared_norm(proj_tangent_vec)\n\n coef_1 = utils.taylor_exp_even_func(\n norm2, utils.cos_close_0, order=4)\n coef_2 = utils.taylor_exp_even_func(\n norm2, utils.sinc_close_0, order=4)\n exp = (gs.einsum('...,...j->...j', coef_1, base_point)\n + gs.einsum('...,...j->...j', coef_2, proj_tangent_vec))\n\n return exp\n\n def log(self, point, base_point, **kwargs):\n \"\"\"Compute the Riemannian logarithm of a point.\n\n Parameters\n ----------\n point : array-like, shape=[..., dim + 1]\n Point on the hypersphere.\n base_point : array-like, shape=[..., dim + 1]\n Point on the hypersphere.\n\n Returns\n -------\n log : array-like, shape=[..., dim + 1]\n Tangent vector at the base point equal to the Riemannian logarithm\n of point at the base point.\n \"\"\"\n inner_prod = self.embedding_metric.inner_product(base_point, point)\n cos_angle = gs.clip(inner_prod, -1., 1.)\n squared_angle = gs.arccos(cos_angle) ** 2\n coef_1_ = utils.taylor_exp_even_func(\n squared_angle, utils.inv_sinc_close_0, order=5)\n coef_2_ = utils.taylor_exp_even_func(\n squared_angle, utils.inv_tanc_close_0, order=5)\n log = (gs.einsum('...,...j->...j', coef_1_, point)\n - gs.einsum('...,...j->...j', coef_2_, base_point))\n\n return log\n\n def dist(self, point_a, point_b):\n \"\"\"Compute the geodesic distance between two points.\n\n Parameters\n ----------\n point_a : array-like, shape=[..., dim + 1]\n First point on the hypersphere.\n point_b : array-like, shape=[..., dim + 1]\n Second point on the hypersphere.\n\n Returns\n -------\n dist : array-like, shape=[..., 1]\n Geodesic distance between the two points.\n \"\"\"\n norm_a = self.embedding_metric.norm(point_a)\n norm_b = self.embedding_metric.norm(point_b)\n inner_prod = self.embedding_metric.inner_product(point_a, point_b)\n\n cos_angle = inner_prod / (norm_a * norm_b)\n cos_angle = gs.clip(cos_angle, -1, 1)\n\n dist = gs.arccos(cos_angle)\n\n return dist\n\n def squared_dist(self, point_a, point_b):\n \"\"\"Squared geodesic distance between two points.\n\n Parameters\n ----------\n point_a : array-like, shape=[..., dim]\n Point on the hypersphere.\n point_b : array-like, shape=[..., dim]\n Point on the hypersphere.\n\n Returns\n -------\n sq_dist : array-like, shape=[...,]\n \"\"\"\n return self.dist(point_a, point_b) ** 2\n\n @staticmethod\n def parallel_transport(tangent_vec_a, tangent_vec_b, base_point):\n r\"\"\"Compute the parallel transport of a tangent vector.\n\n Closed-form solution for the parallel transport of a tangent vector a\n along the geodesic defined by :math: `t \\mapsto exp_(base_point)(t*\n tangent_vec_b)`.\n\n Parameters\n ----------\n tangent_vec_a : array-like, shape=[..., dim + 1]\n Tangent vector at base point to be transported.\n tangent_vec_b : array-like, shape=[..., dim + 1]\n Tangent vector at base point, along which the parallel transport\n is computed.\n base_point : array-like, shape=[..., dim + 1]\n Point on the hypersphere.\n\n Returns\n -------\n transported_tangent_vec: array-like, shape=[..., dim + 1]\n Transported tangent vector at `exp_(base_point)(tangent_vec_b)`.\n \"\"\"\n theta = gs.linalg.norm(tangent_vec_b, axis=-1)\n eps = gs.where(theta == 0., 1., theta)\n normalized_b = gs.einsum('...,...i->...i', 1 / eps, tangent_vec_b)\n pb = gs.einsum('...i,...i->...', tangent_vec_a, normalized_b)\n p_orth = tangent_vec_a - gs.einsum('...,...i->...i', pb, normalized_b)\n transported = \\\n - gs.einsum('...,...i->...i', gs.sin(theta) * pb, base_point)\\\n + gs.einsum('...,...i->...i', gs.cos(theta) * pb, normalized_b)\\\n + p_orth\n return transported\n\n def christoffels(self, point, point_type='spherical'):\n \"\"\"Compute the Christoffel symbols at a point.\n\n Only implemented in dimension 2 and for spherical coordinates.\n\n Parameters\n ----------\n point : array-like, shape=[..., dim]\n Point on hypersphere where the Christoffel symbols are computed.\n\n point_type: str, {'spherical', 'intrinsic', 'extrinsic'}\n Coordinates in which to express the Christoffel symbols.\n Optional, default: 'spherical'.\n\n Returns\n -------\n christoffel : array-like, shape=[..., contravariant index, 1st\n covariant index, 2nd covariant index]\n Christoffel symbols at point.\n \"\"\"\n if self.dim != 2 or point_type != 'spherical':\n raise NotImplementedError(\n 'The Christoffel symbols are only implemented'\n ' for spherical coordinates in the 2-sphere')\n\n point = gs.to_ndarray(point, to_ndim=2)\n christoffel = []\n for sample in point:\n gamma_0 = gs.array(\n [[0, 0], [0, - gs.sin(sample[0]) * gs.cos(sample[0])]])\n gamma_1 = gs.array([[0, gs.cos(sample[0]) / gs.sin(sample[0])],\n [gs.cos(sample[0]) / gs.sin(sample[0]), 0]])\n christoffel.append(gs.stack([gamma_0, gamma_1]))\n\n christoffel = gs.stack(christoffel)\n if gs.ndim(christoffel) == 4 and gs.shape(christoffel)[0] == 1:\n christoffel = gs.squeeze(christoffel, axis=0)\n return christoffel\n\n def curvature(\n self, tangent_vec_a, tangent_vec_b, tangent_vec_c,\n base_point):\n r\"\"\"Compute the curvature.\n\n For three tangent vectors at a base point :math: `x,y,z`,\n the curvature is defined by\n :math: `R(x, y)z = \\nabla_{[x,y]}z\n - \\nabla_z\\nabla_y z + \\nabla_y\\nabla_x z`, where :math: `\\nabla`\n is the Levi-Civita connection. In the case of the hypersphere,\n we have the closed formula\n :math: `R(x,y)z = \\langle x, z \\rangle y - \\langle y,z \\rangle x`.\n\n Parameters\n ----------\n tangent_vec_a : array-like, shape=[..., dim]\n Tangent vector at `base_point`.\n tangent_vec_b : array-like, shape=[..., dim]\n Tangent vector at `base_point`.\n tangent_vec_c : array-like, shape=[..., dim]\n Tangent vector at `base_point`.\n base_point : array-like, shape=[..., dim]\n Point on the group. Optional, default is the identity.\n\n Returns\n -------\n curvature : array-like, shape=[..., dim]\n Tangent vector at `base_point`.\n \"\"\"\n inner_ac = self.inner_product(tangent_vec_a, tangent_vec_c)\n inner_bc = self.inner_product(tangent_vec_b, tangent_vec_c)\n first_term = gs.einsum('...,...i->...i', inner_bc, tangent_vec_a)\n second_term = gs.einsum('...,...i->...i', inner_ac, tangent_vec_b)\n return - first_term + second_term\n\n def _normalization_factor_odd_dim(self, variances):\n \"\"\"Compute the normalization factor - odd dimension.\"\"\"\n dim = self.dim\n half_dim = int((dim + 1) / 2)\n area = 2 * gs.pi ** half_dim / math.factorial(half_dim - 1)\n comb = gs.comb(dim - 1, half_dim - 1)\n\n erf_arg = gs.sqrt(variances / 2) * gs.pi\n first_term = area / (2 ** dim - 1) * comb * gs.sqrt(\n gs.pi / (2 * variances)) * gs.erf(erf_arg)\n\n def summand(k):\n exp_arg = - (dim - 1 - 2 * k) ** 2 / 2 / variances\n erf_arg_2 = (gs.pi * variances - (dim - 1 - 2 * k) * 1j) / gs.sqrt(\n 2 * variances)\n sign = (- 1.) ** k\n comb_2 = gs.comb(k, dim - 1)\n return sign * comb_2 * gs.exp(exp_arg) * gs.real(gs.erf(erf_arg_2))\n\n if half_dim > 2:\n sum_term = gs.sum(\n gs.stack([summand(k)] for k in range(half_dim - 2)))\n else:\n sum_term = summand(0)\n coef = area / 2 / erf_arg * gs.pi ** .5 * (- 1.) ** (half_dim - 1)\n\n return first_term + coef / 2 ** (dim - 2) * sum_term\n\n def _normalization_factor_even_dim(self, variances):\n \"\"\"Compute the normalization factor - even dimension.\"\"\"\n dim = self.dim\n half_dim = (dim + 1) / 2\n area = 2 * gs.pi ** half_dim / math.gamma(half_dim)\n\n def summand(k):\n exp_arg = - (dim - 1 - 2 * k) ** 2 / 2 / variances\n erf_arg_1 = (dim - 1 - 2 * k) * 1j / gs.sqrt(2 * variances)\n erf_arg_2 = (gs.pi * variances - (dim - 1 - 2 * k) * 1j) / gs.sqrt(\n 2 * variances)\n sign = (- 1.) ** k\n comb = gs.comb(dim - 1, k)\n erf_terms = gs.imag(gs.erf(erf_arg_2) + gs.erf(erf_arg_1))\n return sign * comb * gs.exp(exp_arg) * erf_terms\n\n half_dim_2 = int((dim - 2) / 2)\n if half_dim_2 > 0:\n sum_term = gs.sum(\n gs.stack([summand(k)] for k in range(half_dim_2)))\n else:\n sum_term = summand(0)\n coef = area * (- 1.) ** half_dim_2 / 2 ** (dim - 2) * gs.sqrt(\n gs.pi / 2 / variances)\n\n return coef * sum_term\n\n def normalization_factor(self, variances):\n \"\"\"Return normalization factor of the Gaussian distribution.\n\n Parameters\n ----------\n variances : array-like, shape=[n,]\n Variance of the distribution.\n\n Returns\n -------\n norm_func : array-like, shape=[n,]\n Normalisation factor for all given variances.\n \"\"\"\n if self.dim % 2 == 0:\n return self._normalization_factor_even_dim(variances)\n return self._normalization_factor_odd_dim(variances)\n\n def norm_factor_gradient(self, variances):\n \"\"\"Compute the gradient of the normalization factor.\n\n Parameters\n ----------\n variances : array-like, shape=[n,]\n Variance of the distribution.\n\n Returns\n -------\n norm_func : array-like, shape=[n,]\n Normalisation factor for all given variances.\n \"\"\"\n\n def func(var):\n return gs.sum(self.normalization_factor(var))\n\n _, grad = gs.autograd.value_and_grad(func)(variances)\n return _, grad\n\n\nclass Hypersphere(_Hypersphere):\n \"\"\"Class for the n-dimensional hypersphere.\n\n Class for the n-dimensional hypersphere embedded in the\n (n+1)-dimensional Euclidean space.\n\n By default, points are parameterized by their extrinsic\n (n+1)-coordinates.\n\n Parameters\n ----------\n dim : int\n Dimension of the hypersphere.\n \"\"\"\n\n def __init__(self, dim):\n super(Hypersphere, self).__init__(dim)\n self.metric = HypersphereMetric(dim)\n" ]
[ [ "scipy.stats.beta.rvs" ] ]
ankitshah009/Object_Detection_Tracking
[ "90b0d5a04f87155c2a84b0d51ecb009f757ebf85" ]
[ "obj_detect_tracking.py" ]
[ "# coding=utf-8\n# run script\n\nimport sys, os, argparse\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # so here won't have poll allocator info\n\n# remove all the annoying warnings from tf v1.10 to v1.13\nimport logging\nlogging.getLogger('tensorflow').disabled = True\n\nfrom tqdm import tqdm\nimport numpy as np\nimport tensorflow as tf\n\nimport cv2\n\nfrom models import get_model, resizeImage\nfrom nn import fill_full_mask\n\nimport math, time, json, random, operator\nimport pickle\nimport pycocotools.mask as cocomask\nfrom deep_sort import nn_matching\nfrom deep_sort.detection import Detection\nfrom deep_sort.tracker import Tracker\nfrom application_util import preprocessing\nfrom deep_sort.utils import create_obj_infos,linear_inter_bbox,filter_short_objs\nfrom utils import Dataset, Summary, get_op_tensor_name\n\n# for mask\nimport pycocotools.mask as cocomask\n\nfrom class_ids import targetClass2id_new_nopo\n\ntargetClass2id = targetClass2id_new_nopo\n\ntargetid2class = {targetClass2id[one]: one for one in targetClass2id}\nfrom class_ids import coco_obj_class_to_id, coco_obj_id_to_class, coco_obj_to_actev_obj\n\ndef get_args():\n global targetClass2id, targetid2class\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--video_dir\", default=None)\n parser.add_argument(\"--video_lst_file\", default=None, help=\"video_file_path = os.path.join(video_dir, $line)\")\n\n parser.add_argument(\"--out_dir\", default=None, help=\"out_dir/$basename/%%d.json, start from 0 index. This is the object box output. Leave this blank when use tracking to avoid saving the obj class output to save IO time.\")\n\n parser.add_argument(\"--frame_gap\", default=8, type=int)\n\n parser.add_argument(\"--threshold_conf\", default=0.0001, type=float)\n\n parser.add_argument(\"--is_load_from_pb\", action=\"store_true\", help=\"load from a frozen graph\")\n\n # ------ for box feature extraction\n parser.add_argument(\"--get_box_feat\", action=\"store_true\",\n help=\"this will generate (num_box, 256, 7, 7) tensor for each frame\")\n parser.add_argument(\"--box_feat_path\", default=None,\n help=\"output will be out_dir/$basename/%%d.npy, start from 0 index\")\n\n parser.add_argument(\"--version\", type=int, default=4, help=\"model version\")\n parser.add_argument(\"--is_coco_model\", action=\"store_true\",\n help=\"is coco model, will output coco classes instead\")\n\n # ---- gpu params\n parser.add_argument(\"--gpu\", default=1, type=int, help=\"number of gpu\")\n parser.add_argument(\"--gpuid_start\", default=0, type=int, help=\"start of gpu id\")\n parser.add_argument('--im_batch_size', type=int, default=1)\n parser.add_argument(\"--use_all_mem\", action=\"store_true\")\n\n # --- for internal visualization\n parser.add_argument(\"--visualize\", action=\"store_true\")\n parser.add_argument(\"--vis_path\", default=None)\n parser.add_argument(\"--vis_thres\", default=0.7, type=float)\n\n # ----------- model params\n parser.add_argument(\"--num_class\", type=int, default=15, help=\"num catagory + 1 background\")\n\n parser.add_argument(\"--model_path\", default=\"/app/object_detection_model\")\n\n parser.add_argument(\"--rpn_batch_size\", type=int, default=256, help=\"num roi per image for RPN training\")\n parser.add_argument(\"--frcnn_batch_size\", type=int, default=512, help=\"num roi per image for fastRCNN training\")\n\n parser.add_argument(\"--rpn_test_post_nms_topk\", type=int, default=1000, help=\"test post nms, input to fast rcnn\")\n\n parser.add_argument(\"--max_size\", type=int, default=1920, help=\"num roi per image for RPN and fastRCNN training\")\n parser.add_argument(\"--short_edge_size\", type=int, default=1080,\n help=\"num roi per image for RPN and fastRCNN training\")\n\n # ----------- tracking params\n parser.add_argument(\"--get_tracking\", action=\"store_true\",\n help=\"this will generate tracking results for each frame\")\n parser.add_argument(\"--tracking_dir\", default=\"/tmp\",\n help=\"output will be out_dir/$videoname.txt, start from 0 index\")\n parser.add_argument(\"--tracking_objs\", default=\"Person,Vehicle\",\n help=\"Objects to be tracked, default are Person and Vehicle\")\n parser.add_argument(\"--min_confidence\", default=0.85, type=float,\n help=\"Detection confidence threshold. Disregard all detections \"\n \"that have a confidence lower than this value.\")\n parser.add_argument(\"--min_detection_height\", default=0, type=int,\n help=\"Threshold on the detection bounding box height. Detections \"\n \"with height smaller than this value are disregarded\")\n parser.add_argument(\"--nms_max_overlap\", default=0.85, type=float,\n help=\"Non-maxima suppression threshold: Maximum detection overlap.\")\n parser.add_argument(\"--max_cosine_distance\", type=float, default=0.5,\n help=\"Gating threshold for cosine distance metric (object appearance).\")\n parser.add_argument(\"--nn_budget\", type=int, default=5,\n help=\"Maximum size of the appearance descriptors gallery. If None, no budget is enforced.\")\n\n parser.add_argument(\"--bupt_exp\", action=\"store_true\", help=\"activity box experiemnt\")\n # ---- tempory: for activity detection model\n parser.add_argument(\"--actasobj\", action=\"store_true\")\n parser.add_argument(\"--actmodel_path\", default=\"/app/activity_detection_model\")\n\n parser.add_argument(\"--resnet152\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--resnet50\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--resnet34\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--resnet18\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--use_se\", action=\"store_true\", help=\"use squeeze and excitation in backbone\")\n parser.add_argument(\"--use_frcnn_class_agnostic\", action=\"store_true\", help=\"use class agnostic fc head\")\n parser.add_argument(\"--use_resnext\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--use_att_frcnn_head\", action=\"store_true\",\n help=\"use attention to sum [K, 7, 7, C] feature into [K, C]\")\n\n # ---- COCO model\n parser.add_argument(\"--add_mask\", action=\"store_true\")\n\n # --------------- exp junk\n parser.add_argument(\"--use_dilations\", action=\"store_true\", help=\"use dilations=2 in res5\")\n parser.add_argument(\"--use_deformable\", action=\"store_true\", help=\"use deformable conv\")\n parser.add_argument(\"--add_act\", action=\"store_true\", help=\"add activitiy model\")\n parser.add_argument(\"--finer_resolution\", action=\"store_true\", help=\"fpn use finer resolution conv\")\n parser.add_argument(\"--fix_fpn_model\", action=\"store_true\",\n help=\"for finetuneing a fpn model, whether to fix the lateral and poshoc weights\")\n parser.add_argument(\"--is_cascade_rcnn\", action=\"store_true\", help=\"cascade rcnn on top of fpn\")\n parser.add_argument(\"--add_relation_nn\", action=\"store_true\", help=\"add relation network feature\")\n\n parser.add_argument(\"--test_frame_extraction\", action=\"store_true\")\n parser.add_argument(\"--use_my_naming\", action=\"store_true\")\n\n # for efficient use of COCO model classes\n parser.add_argument(\"--use_partial_classes\", action=\"store_true\")\n\n args = parser.parse_args()\n\n if args.use_partial_classes:\n assert args.is_coco_model\n args.partial_classes = [classname for classname in coco_obj_to_actev_obj]\n\n assert args.gpu == args.im_batch_size # one gpu one image\n assert args.gpu == 1, \"Currently only support single-gpu inference\"\n\n if args.is_load_from_pb:\n args.load_from = args.model_path\n\n args.controller = \"/cpu:0\" # parameter server\n\n targetid2class = targetid2class\n targetClass2id = targetClass2id\n\n if args.actasobj:\n from class_ids import targetAct2id\n targetClass2id = targetAct2id\n targetid2class = {targetAct2id[one]: one for one in targetAct2id}\n if args.bupt_exp:\n from class_ids import targetAct2id_bupt\n targetClass2id =targetAct2id_bupt\n targetid2class = {targetAct2id_bupt[one]: one for one in targetAct2id_bupt}\n\n assert len(targetClass2id) == args.num_class, (len(targetClass2id), args.num_class)\n\n\n assert args.version in [2, 3, 4, 5, 6], \"Currently we only have version 2-6 model\"\n\n if args.version == 2:\n pass\n elif args.version == 3:\n args.use_dilations = True\n elif args.version == 4:\n args.use_frcnn_class_agnostic = True\n args.use_dilations = True\n elif args.version == 5:\n args.use_frcnn_class_agnostic = True\n args.use_dilations = True\n elif args.version == 6:\n args.use_frcnn_class_agnostic = True\n args.use_se = True\n\n if args.is_coco_model:\n assert args.version == 2\n targetClass2id = coco_obj_class_to_id\n targetid2class = coco_obj_id_to_class\n args.num_class = 81\n if args.use_partial_classes:\n partial_classes = [\"BG\"] + args.partial_classes\n targetClass2id = {classname: i\n for i, classname in enumerate(partial_classes)}\n targetid2class = {targetClass2id[o]: o for o in targetClass2id}\n\n # ---------------more defautls\n args.is_pack_model = False\n args.diva_class3 = True\n args.diva_class = False\n args.diva_class2 = False\n args.use_small_object_head = False\n args.use_so_score_thres = False\n args.use_so_association = False\n args.use_gn = False\n args.so_person_topk = 10\n args.use_conv_frcnn_head = False\n args.use_cpu_nms = False\n args.use_bg_score = False\n args.freeze_rpn = True\n args.freeze_fastrcnn = True\n args.freeze = 2\n args.small_objects = [\"Prop\", \"Push_Pulled_Object\", \"Prop_plus_Push_Pulled_Object\", \"Bike\"]\n args.no_obj_detect = False\n #args.add_mask = False\n args.is_fpn = True\n # args.new_tensorpack_model = True\n args.mrcnn_head_dim = 256\n args.is_train = False\n\n args.rpn_min_size = 0\n args.rpn_proposal_nms_thres = 0.7\n args.anchor_strides = (4, 8, 16, 32, 64)\n\n args.fpn_resolution_requirement = float(args.anchor_strides[3]) # [3] is 32, since we build FPN with r2,3,4,5?\n\n args.max_size = np.ceil(args.max_size / args.fpn_resolution_requirement) * args.fpn_resolution_requirement\n\n args.fpn_num_channel = 256\n\n args.fpn_frcnn_fc_head_dim = 1024\n\n # ---- all the mask rcnn config\n\n args.resnet_num_block = [3, 4, 23, 3] # resnet 101\n args.use_basic_block = False # for resnet-34 and resnet-18\n if args.resnet152:\n args.resnet_num_block = [3, 8, 36, 3]\n if args.resnet50:\n args.resnet_num_block = [3, 4, 6, 3]\n if args.resnet34:\n args.resnet_num_block = [3, 4, 6, 3]\n args.use_basic_block = True\n if args.resnet18:\n args.resnet_num_block = [2, 2, 2, 2]\n args.use_basic_block = True\n\n args.anchor_stride = 16 # has to be 16 to match the image feature total stride\n args.anchor_sizes = (32, 64, 128, 256, 512)\n\n args.anchor_ratios = (0.5, 1, 2)\n\n args.num_anchors = len(args.anchor_sizes) * len(args.anchor_ratios)\n # iou thres to determine anchor label\n # args.positive_anchor_thres = 0.7\n # args.negative_anchor_thres = 0.3\n\n # when getting region proposal, avoid getting too large boxes\n args.bbox_decode_clip = np.log(args.max_size / 16.0)\n\n # fastrcnn\n args.fastrcnn_batch_per_im = args.frcnn_batch_size\n args.fastrcnn_bbox_reg_weights = np.array([10, 10, 5, 5], dtype='float32')\n\n args.fastrcnn_fg_thres = 0.5 # iou thres\n # args.fastrcnn_fg_ratio = 0.25 # 1:3 -> pos:neg\n\n # testing\n args.rpn_test_pre_nms_topk = 6000\n\n args.fastrcnn_nms_iou_thres = 0.5\n\n args.result_score_thres = args.threshold_conf\n args.result_per_im = 100\n\n return args\n\n\ndef initialize(config, sess):\n tf.global_variables_initializer().run()\n allvars = tf.global_variables()\n allvars = [var for var in allvars if \"global_step\" not in var.name]\n restore_vars = allvars\n opts = [\"Adam\",\"beta1_power\",\"beta2_power\",\"Adam_1\",\"Adadelta_1\",\"Adadelta\",\"Momentum\"]\n restore_vars = [var for var in restore_vars if var.name.split(\":\")[0].split(\"/\")[-1] not in opts]\n\n saver = tf.train.Saver(restore_vars, max_to_keep=5)\n\n load_from = config.model_path\n ckpt = tf.train.get_checkpoint_state(load_from)\n if ckpt and ckpt.model_checkpoint_path:\n loadpath = ckpt.model_checkpoint_path\n saver.restore(sess, loadpath)\n else:\n if os.path.exists(load_from):\n if load_from.endswith(\".ckpt\"):\n # load_from should be a single .ckpt file\n saver.restore(sess, load_from)\n elif load_from.endswith(\".npz\"):\n # load from dict\n weights = np.load(load_from)\n params = {get_op_tensor_name(n)[1]:v\n for n, v in dict(weights).iteritems()}\n param_names = set(params.iterkeys())\n\n variables = restore_vars\n\n variable_names = set([k.name for k in variables])\n\n intersect = variable_names & param_names\n\n restore_vars = [v for v in variables if v.name in intersect]\n\n with sess.as_default():\n for v in restore_vars:\n vname = v.name\n v.load(params[vname])\n\n not_used = [(one, weights[one].shape)\n for one in weights.keys()\n if get_op_tensor_name(one)[1] not in intersect]\n if not not_used:\n print(\"warning, %s/%s in npz not restored:%s\" %(len(weights.keys()) - len(intersect), len(weights.keys()), not_used))\n\n else:\n raise Exception(\"Not recognized model type:%s\" % load_from)\n else:\n raise Exception(\"Model not exists\")\n\n\n\n# check argument\ndef check_args(args):\n assert args.video_dir is not None\n assert args.video_lst_file is not None\n assert args.frame_gap >= 1\n if args.get_box_feat:\n assert args.box_feat_path is not None\n if not os.path.exists(args.box_feat_path):\n os.makedirs(args.box_feat_path)\n #print(\"cv2 version %s\" % (cv2.__version__)\n\n\nif __name__ == \"__main__\":\n args = get_args()\n\n check_args(args)\n\n videolst = [os.path.join(args.video_dir, one.strip()) for one in open(args.video_lst_file).readlines()]\n\n if args.out_dir is not None:\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n\n if args.visualize:\n from viz import draw_boxes\n\n vis_path = args.vis_path\n if not os.path.exists(vis_path):\n os.makedirs(vis_path)\n\n # 1. load the object detection model\n model = get_model(args, args.gpuid_start, controller=args.controller)\n\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n if not args.use_all_mem:\n tfconfig.gpu_options.allow_growth = True\n tfconfig.gpu_options.visible_device_list = \"%s\" % (\n \",\".join([\"%s\" % i for i in range(args.gpuid_start, args.gpuid_start + args.gpu)]))\n\n with tf.Session(config=tfconfig) as sess:\n\n if not args.is_load_from_pb:\n initialize(config=args, sess=sess)\n\n for videofile in tqdm(videolst, ascii=True):\n # 2. read the video file\n try:\n vcap = cv2.VideoCapture(videofile)\n if not vcap.isOpened():\n raise Exception(\"cannot open %s\" % videofile)\n except Exception as e:\n raise e\n\n # initialize tracking module\n if args.get_tracking:\n tracking_objs = args.tracking_objs.split(',')\n tracker_dict = {}\n tracking_results_dict = {}\n tmp_tracking_results_dict = {}\n for tracking_obj in tracking_objs:\n metric = metric = nn_matching.NearestNeighborDistanceMetric(\n \"cosine\", args.max_cosine_distance, args.nn_budget)\n tracker_dict[tracking_obj] = Tracker(metric)\n tracking_results_dict[tracking_obj] = []\n tmp_tracking_results_dict[tracking_obj] = {}\n\n # videoname = os.path.splitext(os.path.basename(videofile))[0]\n videoname = os.path.basename(videofile)\n if args.out_dir is not None: # not saving box json to save time\n video_out_path = os.path.join(args.out_dir, videoname)\n if not os.path.exists(video_out_path):\n os.makedirs(video_out_path)\n\n # for box feature\n if args.get_box_feat:\n feat_out_path = os.path.join(args.box_feat_path, videoname)\n if not os.path.exists(feat_out_path):\n os.makedirs(feat_out_path)\n\n # opencv 2\n if cv2.__version__.split(\".\")[0] == \"2\":\n frame_count = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\n else:\n # opencv 3/4\n frame_count = vcap.get(cv2.CAP_PROP_FRAME_COUNT)\n\n # 3. read frame one by one\n cur_frame = 0\n vis_count = 0\n frame_stack = []\n while cur_frame < frame_count:\n suc, frame = vcap.read()\n if not suc:\n cur_frame += 1\n tqdm.write(\"warning, %s frame of %s failed\" % (cur_frame, videoname))\n continue\n\n # skip some frame if frame_gap >1\n if cur_frame % args.frame_gap != 0:\n cur_frame += 1\n continue\n\n # 4. run detection on the frame stack if there is enough\n\n im = frame.astype(\"float32\")\n\n if args.test_frame_extraction:\n frame_file = os.path.join(video_out_path, \"%d.jpg\" % cur_frame)\n cv2.imwrite(frame_file, im)\n cur_frame += 1\n continue\n\n resized_image = resizeImage(im, args.short_edge_size, args.max_size)\n\n scale = (resized_image.shape[0] * 1.0 / im.shape[0] + resized_image.shape[1] * 1.0 / im.shape[1]) / 2.0\n\n feed_dict = model.get_feed_dict_forward(resized_image)\n\n if args.get_box_feat:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.fpn_box_feat]\n\n final_boxes, final_labels, final_probs, box_feats = sess.run(sess_input, feed_dict=feed_dict)\n assert len(box_feats) == len(final_boxes)\n # save the box feature first\n\n featfile = os.path.join(feat_out_path, \"%d.npy\" % (cur_frame))\n np.save(featfile, box_feats)\n elif args.get_tracking:\n\n if args.add_mask:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.fpn_box_feat, model.final_masks]\n final_boxes, final_labels, final_probs, box_feats, final_masks = sess.run(sess_input, feed_dict=feed_dict)\n else:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.fpn_box_feat]\n final_boxes, final_labels, final_probs, box_feats = sess.run(sess_input, feed_dict=feed_dict)\n\n assert len(box_feats) == len(final_boxes)\n\n for tracking_obj in tracking_objs:\n target_tracking_obs = [tracking_obj]\n detections = create_obj_infos(cur_frame, final_boxes, final_probs, final_labels, box_feats,\n targetid2class, target_tracking_obs, args.min_confidence,\n args.min_detection_height, scale, is_coco_model=args.is_coco_model, coco_to_actev_mapping=coco_obj_to_actev_obj)\n # Run non-maxima suppression.\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = preprocessing.non_max_suppression(\n boxes, args.nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n\n # tracking\n tracker_dict[tracking_obj].predict()\n tracker_dict[tracking_obj].update(detections)\n\n # Store results\n for track in tracker_dict[tracking_obj].tracks:\n if not track.is_confirmed() or track.time_since_update > 1:\n if (not track.is_confirmed()) and track.time_since_update == 0:\n bbox = track.to_tlwh()\n if track.track_id not in tmp_tracking_results_dict[tracking_obj]:\n tmp_tracking_results_dict[tracking_obj][track.track_id] = [[cur_frame, track.track_id,\n bbox[0], bbox[1], bbox[2], bbox[3]]]\n else:\n tmp_tracking_results_dict[tracking_obj][track.track_id].append([cur_frame, track.track_id,\n bbox[0], bbox[1], bbox[2],\n bbox[3]])\n continue\n bbox = track.to_tlwh()\n if track.track_id in tmp_tracking_results_dict[tracking_obj]:\n pred_list = tmp_tracking_results_dict[tracking_obj][track.track_id]\n for pred_data in pred_list:\n tracking_results_dict[tracking_obj].append(pred_data)\n tmp_tracking_results_dict[tracking_obj].pop(track.track_id, None)\n tracking_results_dict[tracking_obj].append([\n cur_frame, track.track_id, bbox[0], bbox[1], bbox[2], bbox[3]])\n\n else:\n if args.add_mask:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.final_masks]\n final_boxes, final_labels, final_probs, final_masks = sess.run(sess_input, feed_dict=feed_dict)\n else:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs]\n final_boxes, final_labels, final_probs = sess.run(sess_input, feed_dict=feed_dict)\n\n if args.out_dir is None:\n cur_frame += 1\n continue\n\n # scale back the box to original image size\n final_boxes = final_boxes / scale\n\n if args.add_mask:\n final_masks = [fill_full_mask(box, mask, im.shape[:2]) for box, mask in zip(final_boxes, final_masks)]\n\n # save as json\n pred = []\n\n for j, (box, prob, label) in enumerate(zip(final_boxes, final_probs, final_labels)):\n box[2] -= box[0]\n box[3] -= box[1] # produce x,y,w,h output\n\n\n cat_id = label\n cat_name = targetid2class[cat_id]\n\n # encode mask\n rle = None\n if args.add_mask:\n final_mask = final_masks[j] # [14, 14]\n rle = cocomask.encode(np.array(final_mask[:, :, None], order=\"F\"))[0]\n rle['counts'] = rle['counts'].decode(\"ascii\")\n\n res = {\n \"category_id\": cat_id,\n \"cat_name\": cat_name, # [0-80]\n \"score\": float(round(prob, 7)),\n \"bbox\": list(map(lambda x: float(round(x, 2)), box)),\n \"segmentation\": rle,\n }\n\n pred.append(res)\n\n # predfile = os.path.join(args.out_dir, \"%s_F_%08d.json\"%(videoname, cur_frame))\n if args.use_my_naming:\n predfile = os.path.join(video_out_path,\n \"%s_F_%08d.json\" % (os.path.splitext(videoname)[0], cur_frame))\n else:\n predfile = os.path.join(video_out_path, \"%d.json\" % (cur_frame))\n\n with open(predfile, \"w\") as f:\n json.dump(pred, f)\n\n # for visualization\n if args.visualize:\n good_ids = [i for i in range(len(final_boxes)) if final_probs[i] >= args.vis_thres]\n final_boxes, final_labels, final_probs = final_boxes[good_ids], final_labels[good_ids], final_probs[\n good_ids]\n vis_boxes = np.asarray([[box[0], box[1], box[2] + box[0], box[3] + box[1]] for box in final_boxes])\n vis_labels = [\"%s_%.2f\" % (targetid2class[cat_id], prob) for cat_id, prob in\n zip(final_labels, final_probs)]\n newim = draw_boxes(im, vis_boxes, vis_labels, color=np.array([255, 0, 0]), font_scale=0.5,\n thickness=2)\n\n vis_file = os.path.join(vis_path, \"%s_F_%08d.jpg\" % (videoname, vis_count))\n cv2.imwrite(vis_file, newim)\n vis_count += 1\n\n cur_frame += 1\n\n if args.get_tracking:\n for tracking_obj in tracking_objs:\n output_dir = os.path.join(args.tracking_dir, videoname, tracking_obj)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n output_file = os.path.join(output_dir, \"%s.txt\" % (os.path.splitext(videoname))[0])\n\n tracking_results = sorted(tracking_results_dict[tracking_obj], key=lambda x: (x[0], x[1]))\n # print(len(tracking_results)\n tracking_data = np.asarray(tracking_results)\n # print(tracking_data.shape\n tracking_data = linear_inter_bbox(tracking_data, args.frame_gap)\n tracking_data = filter_short_objs(tracking_data)\n tracking_results = tracking_data.tolist()\n with open(output_file, 'wb') as fw:\n for row in tracking_results:\n line = '%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1' % (\n row[0], row[1], row[2], row[3], row[4], row[5])\n fw.write(line + '\\n')\n\n if args.test_frame_extraction:\n tqdm.write(\n \"video %s got %s frames, opencv said frame count is %s\" % (videoname, cur_frame, frame_count))\n\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "numpy.log", "numpy.asarray", "tensorflow.global_variables", "numpy.save", "tensorflow.ConfigProto", "numpy.ceil", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.train.Saver", "numpy.load", "numpy.array" ] ]