repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
microsoft/DualOctreeGNN
[ "29eed84653d4f0c1681c8227714cf84e76c31abe" ]
[ "tools/shapenet.py" ]
[ "# --------------------------------------------------------\n# Dual Octree Graph Networks\n# Copyright (c) 2022 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Peng-Shuai Wang\n# --------------------------------------------------------\n\nimport os\nimport time\nimport wget\nimport shutil\nimport torch\nimport ocnn\nimport trimesh\nimport logging\nimport mesh2sdf\nimport zipfile\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom plyfile import PlyData, PlyElement\n\nlogger = logging.getLogger(\"trimesh\")\nlogger.setLevel(logging.ERROR)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--run', type=str, required=True)\nparser.add_argument('--start', type=int, default=0)\nparser.add_argument('--end', type=int, default=45572)\nargs = parser.parse_args()\n\nsize = 128 # resolution of SDF\nlevel = 0.015 # 2/128 = 0.015625\nshape_scale = 0.5 # rescale the shape into [-0.5, 0.5]\nproject_folder = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nroot_folder = os.path.join(project_folder, 'data/ShapeNet')\n\n\ndef create_flag_file(filename):\n r''' Creates a flag file to indicate whether some time-consuming works\n have been done.\n '''\n\n folder = os.path.dirname(filename)\n if not os.path.exists(folder):\n os.makedirs(folder)\n with open(filename, 'w') as fid:\n fid.write('succ @ ' + time.ctime())\n\n\ndef check_folder(filenames: list):\n r''' Checks whether the folder contains the filename exists.\n '''\n\n for filename in filenames:\n folder = os.path.dirname(filename)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\ndef get_filenames(filelist):\n r''' Gets filenames from a filelist.\n '''\n\n filelist = os.path.join(root_folder, 'filelist', filelist)\n with open(filelist, 'r') as fid:\n lines = fid.readlines()\n filenames = [line.split()[0] for line in lines]\n return filenames\n\n\ndef unzip_shapenet():\n r''' Unzip the ShapeNetCore.v1\n '''\n\n filename = os.path.join(root_folder, 'ShapeNetCore.v1.zip')\n flag_file = os.path.join(root_folder, 'flags/unzip_shapenet_succ')\n if not os.path.exists(flag_file):\n print('-> Unzip ShapeNetCore.v1.zip.')\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(root_folder)\n create_flag_file(flag_file)\n\n folder = os.path.join(root_folder, 'ShapeNetCore.v1')\n flag_file = os.path.join(root_folder, 'flags/unzip_shapenet_all_succ')\n if not os.path.exists(flag_file):\n print('-> Unzip all zip files in ShapeNetCore.v1.')\n filenames = os.listdir(folder)\n for filename in filenames:\n if filename.endswith('.zip'):\n print('- Unzip %s' % filename)\n zipname = os.path.join(folder, filename)\n with zipfile.ZipFile(zipname, 'r') as zip_ref:\n zip_ref.extractall(folder)\n os.remove(zipname)\n create_flag_file(flag_file)\n\n\ndef download_filelist():\n r''' Downloads the filelists used for learning.\n '''\n\n flag_file = os.path.join(root_folder, 'flags/download_filelist_succ')\n if not os.path.exists(flag_file):\n print('-> Download the filelist.')\n url = 'https://www.dropbox.com/s/4jvam486l8961t7/shapenet.filelist.zip?dl=1'\n filename = os.path.join(root_folder, 'filelist.zip')\n wget.download(url, filename, bar=None)\n\n folder = os.path.join(root_folder, 'filelist')\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(path=folder)\n os.remove(filename)\n create_flag_file(flag_file)\n\n\ndef run_mesh2sdf():\n r''' Converts the meshes from ShapeNet to SDFs and manifold meshes.\n '''\n\n print('-> Run mesh2sdf.')\n mesh_scale = 0.8\n filenames = get_filenames('all.txt')\n for i in tqdm(range(args.start, args.end), ncols=80):\n filename = filenames[i]\n filename_raw = os.path.join(\n root_folder, 'ShapeNetCore.v1', filename, 'model.obj')\n filename_obj = os.path.join(root_folder, 'mesh', filename + '.obj')\n filename_box = os.path.join(root_folder, 'bbox', filename + '.npz')\n filename_npy = os.path.join(root_folder, 'sdf', filename + '.npy')\n check_folder([filename_obj, filename_box, filename_npy])\n if os.path.exists(filename_obj): continue\n\n # load the raw mesh\n mesh = trimesh.load(filename_raw, force='mesh')\n\n # rescale mesh to [-1, 1] for mesh2sdf, note the factor **mesh_scale**\n vertices = mesh.vertices\n bbmin, bbmax = vertices.min(0), vertices.max(0)\n center = (bbmin + bbmax) * 0.5\n scale = 2.0 * mesh_scale / (bbmax - bbmin).max()\n vertices = (vertices - center) * scale\n\n # run mesh2sdf\n sdf, mesh_new = mesh2sdf.compute(vertices, mesh.faces, size, fix=True,\n level=level, return_mesh=True)\n mesh_new.vertices = mesh_new.vertices * shape_scale\n\n # save\n np.savez(filename_box, bbmax=bbmax, bbmin=bbmin, mul=mesh_scale)\n np.save(filename_npy, sdf)\n mesh_new.export(filename_obj)\n\n\ndef sample_pts_from_mesh():\n r''' Samples 10k points with normals from the ground-truth meshes.\n '''\n\n print('-> Run sample_pts_from_mesh.')\n num_samples = 40000\n mesh_folder = os.path.join(root_folder, 'mesh')\n output_folder = os.path.join(root_folder, 'dataset')\n filenames = get_filenames('all.txt')\n for i in tqdm(range(args.start, args.end), ncols=80):\n filename = filenames[i]\n filename_obj = os.path.join(mesh_folder, filename + '.obj')\n filename_pts = os.path.join(output_folder, filename, 'pointcloud.npz')\n check_folder([filename_pts])\n if os.path.exists(filename_pts): continue\n\n # sample points\n mesh = trimesh.load(filename_obj, force='mesh')\n points, idx = trimesh.sample.sample_surface(mesh, num_samples)\n normals = mesh.face_normals[idx]\n\n # save points\n np.savez(filename_pts, points=points.astype(np.float16),\n normals=normals.astype(np.float16))\n\n\ndef sample_sdf():\n r''' Samples ground-truth SDF values for training.\n '''\n\n # constants\n depth, full_depth = 6, 4\n sample_num = 4 # number of samples in each octree node\n grid = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],\n [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])\n\n print('-> Sample SDFs from the ground truth.')\n filenames = get_filenames('all.txt')\n for i in tqdm(range(args.start, args.end), ncols=80):\n filename = filenames[i]\n dataset_folder = os.path.join(root_folder, 'dataset')\n filename_sdf = os.path.join(root_folder, 'sdf', filename + '.npy')\n filename_pts = os.path.join(dataset_folder, filename, 'pointcloud.npz')\n filename_out = os.path.join(dataset_folder, filename, 'sdf.npz')\n if os.path.exists(filename_out): continue\n\n # load data\n pts = np.load(filename_pts)\n sdf = np.load(filename_sdf)\n sdf = torch.from_numpy(sdf)\n points = pts['points'].astype(np.float32)\n normals = pts['normals'].astype(np.float32)\n points = points / shape_scale # rescale points to [-1, 1]\n\n # build octree\n points = ocnn.points_new(\n torch.from_numpy(points), torch.from_numpy(normals),\n torch.Tensor(), torch.Tensor())\n octree2points = ocnn.Points2Octree(depth=depth, full_depth=full_depth)\n octree = octree2points(points)\n\n # sample points and grads according to the xyz\n xyzs, grads, sdfs = [], [], []\n for d in range(full_depth, depth + 1):\n xyz = ocnn.octree_property(octree, 'xyz', d)\n xyz = ocnn.octree_decode_key(xyz)\n\n # sample k points in each octree node\n xyz = xyz[:, :3].float() # + 0.5 -> octree node center\n xyz = xyz.unsqueeze(1) + torch.rand(xyz.shape[0], sample_num, 3)\n xyz = xyz.view(-1, 3) # (N, 3)\n xyz = xyz * (size / 2 ** d) # normalize to [0, 2^sdf_depth]\n xyz = xyz[(xyz < 127).all(dim=1)] # remove out-of-bound points\n xyzs.append(xyz)\n\n # interpolate the sdf values\n xyzi = torch.floor(xyz) # the integer part (N, 3)\n corners = xyzi.unsqueeze(1) + grid # (N, 8, 3)\n coordsf = xyz.unsqueeze(1) - corners # (N, 8, 3), in [-1.0, 1.0]\n weights = (1 - coordsf.abs()).prod(dim=-1) # (N, 8, 1)\n corners = corners.long().view(-1, 3)\n x, y, z = corners[:, 0], corners[:, 1], corners[:, 2]\n s = sdf[x, y, z].view(-1, 8)\n sw = torch.sum(s * weights, dim=1)\n sdfs.append(sw)\n\n # calc the gradient\n gx = s[:, 4] - s[:, 0] + s[:, 5] - s[:, 1] + \\\n s[:, 6] - s[:, 2] + s[:, 7] - s[:, 3] # noqa\n gy = s[:, 2] - s[:, 0] + s[:, 3] - s[:, 1] + \\\n s[:, 6] - s[:, 4] + s[:, 7] - s[:, 5] # noqa\n gz = s[:, 1] - s[:, 0] + s[:, 3] - s[:, 2] + \\\n s[:, 5] - s[:, 4] + s[:, 7] - s[:, 6] # noqa\n grad = torch.stack([gx, gy, gz], dim=-1)\n norm = torch.sqrt(torch.sum(grad ** 2, dim=-1, keepdims=True))\n grad = grad / (norm + 1.0e-8)\n grads.append(grad)\n\n # concat the results\n xyzs = torch.cat(xyzs, dim=0).numpy()\n points = (xyzs / 64 - 1).astype(np.float16) * shape_scale # !shape_scale\n grads = torch.cat(grads, dim=0).numpy().astype(np.float16)\n sdfs = torch.cat(sdfs, dim=0).numpy().astype(np.float16)\n\n # save results\n # points = (points * args.scale).astype(np.float16) # in [-scale, scale]\n np.savez(filename_out, points=points, grad=grads, sdf=sdfs)\n\n\ndef sample_occu():\n r''' Samples occupancy values for evaluating the IoU following ConvONet.\n '''\n\n num_samples = 100000\n grid = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],\n [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])\n\n # filenames = get_filenames('all.txt')\n filenames = get_filenames('test.txt') + get_filenames('test_unseen5.txt')\n for filename in tqdm(filenames, ncols=80):\n filename_sdf = os.path.join(root_folder, 'sdf', filename + '.npy')\n filename_occu = os.path.join(root_folder, 'dataset', filename, 'points')\n if os.path.exists(filename_occu) or (not os.path.exists(filename_sdf)):\n continue\n\n sdf = np.load(filename_sdf)\n factor = 127.0 / 128.0 # make sure the interpolation is well defined\n points_uniform = np.random.rand(num_samples, 3) * factor # in [0, 1)\n points = (points_uniform - 0.5) * (2 * shape_scale) # !!! rescale\n points = points.astype(np.float16)\n\n # interpolate the sdf values\n xyz = points_uniform * 128 # in [0, 127)\n xyzi = np.floor(xyz) # the integer part (N, 3)\n corners = np.expand_dims(xyzi, 1) + grid # (N, 8, 3)\n coordsf = np.expand_dims(xyz, 1) - corners # (N, 8, 3), in [-1.0, 1.0]\n weights = np.prod(1 - np.abs(coordsf), axis=-1) # (N, 8)\n\n corners = np.reshape(corners.astype(np.int64), (-1, 3))\n x, y, z = corners[:, 0], corners[:, 1], corners[:, 2]\n values = np.reshape(sdf[x, y, z], (-1, 8))\n value = np.sum(values * weights, axis=1)\n occu = value < 0\n occu = np.packbits(occu)\n\n # save\n np.savez(filename_occu, points=points, occupancies=occu)\n\n\ndef generate_test_points():\n r''' Generates points in `ply` format for testing.\n '''\n\n noise_std = 0.005\n point_sample_num = 3000\n # filenames = get_filenames('all.txt')\n filenames = get_filenames('test.txt') + get_filenames('test_unseen5.txt')\n for filename in tqdm(filenames, ncols=80):\n filename_pts = os.path.join(\n root_folder, 'dataset', filename, 'pointcloud.npz')\n filename_ply = os.path.join(\n root_folder, 'test.input', filename + '.ply')\n if not os.path.exists(filename_pts): continue\n check_folder([filename_ply])\n\n # sample points\n pts = np.load(filename_pts)\n points = pts['points'].astype(np.float32)\n noise = noise_std * np.random.randn(point_sample_num, 3)\n rand_idx = np.random.choice(points.shape[0], size=point_sample_num)\n points_noise = points[rand_idx] + noise\n\n # save ply\n vertices = []\n py_types = (float, float, float)\n npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]\n for idx in range(points_noise.shape[0]):\n vertices.append(\n tuple(dtype(d) for dtype, d in zip(py_types, points_noise[idx])))\n structured_array = np.array(vertices, dtype=npy_types)\n el = PlyElement.describe(structured_array, 'vertex')\n PlyData([el]).write(filename_ply)\n\n\ndef download_dataset():\n r''' Directly downloads the dataset.\n '''\n\n flag_file = os.path.join(root_folder, 'flags/download_dataset_succ')\n if not os.path.exists(flag_file):\n print('-> Download the dataset.')\n url = 'https://www.dropbox.com/s/mc3lrwqpmnfq3j8/shapenet.dataset.zip?dl=1'\n filename = os.path.join(root_folder, 'shapenet.dataset.zip')\n wget.download(url, filename)\n\n with zipfile.ZipFile(filename, 'r') as zip_ref:\n zip_ref.extractall(path=root_folder)\n # os.remove(filename)\n create_flag_file(flag_file)\n\n\ndef generate_dataset_unseen5():\n r'''Creates the unseen5 dataset\n '''\n\n dataset_folder = os.path.join(root_folder, 'dataset')\n unseen5_folder = os.path.join(root_folder, 'dataset.unseen5')\n if not os.path.exists(unseen5_folder):\n os.makedirs(unseen5_folder)\n for folder in ['02808440', '02773838', '02818832', '02876657', '03938244']:\n curr_folder = os.path.join(dataset_folder, folder)\n if os.path.exists(curr_folder):\n shutil.move(os.path.join(dataset_folder, folder), unseen5_folder)\n\n\ndef copy_convonet_filelists():\n r''' Copies the filelist of ConvONet to the datasets, which are needed when\n calculating the evaluation metrics.\n '''\n\n with open(os.path.join(root_folder, 'filelist/lists.txt'), 'r') as fid:\n lines = fid.readlines()\n filenames = [line.split()[0] for line in lines]\n filelist_folder = os.path.join(root_folder, 'filelist')\n for filename in filenames:\n src_name = os.path.join(filelist_folder, filename)\n des_name = src_name.replace('filelist/convonet.filelist', 'dataset') \\\n .replace('filelist/unseen5.filelist', 'dataset.unseen5')\n if not os.path.exists(des_name):\n shutil.copy(src_name, des_name)\n\n\ndef convert_mesh_to_sdf():\n unzip_shapenet()\n download_filelist()\n run_mesh2sdf()\n\n\ndef generate_dataset():\n sample_pts_from_mesh()\n sample_sdf()\n sample_occu()\n generate_test_points()\n generate_dataset_unseen5()\n copy_convonet_filelists()\n\n\nif __name__ == '__main__':\n eval('%s()' % args.run)\n" ]
[ [ "numpy.save", "numpy.sum", "torch.stack", "torch.rand", "torch.cat", "torch.floor", "numpy.savez", "numpy.reshape", "numpy.abs", "numpy.random.choice", "torch.from_numpy", "numpy.expand_dims", "numpy.random.rand", "torch.Tensor", "numpy.load", "torch.sum", "numpy.packbits", "numpy.floor", "numpy.random.randn", "numpy.array" ] ]
ahmedtaiye/tfeatslekan
[ "fc6bbfe9f1cfdb56b002c03f611725120be0d9c4" ]
[ "L1.py" ]
[ "\r\nfrom __future__ import print_function\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom sklearn.decomposition import TruncatedSVD\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.preprocessing import Normalizer\r\nfrom sklearn import metrics\r\nfrom sklearn.decomposition import TruncatedSVD\r\nfrom sklearn.decomposition import NMF\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\nfrom sklearn.decomposition import ProjectedGradientNMF\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics.pairwise import euclidean_distances\r\nfrom sklearn.metrics import jaccard_similarity_score\r\nfrom sklearn.metrics.pairwise import paired_distances\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.cluster import KMeans, MiniBatchKMeans\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom nltk.corpus import stopwords\r\n#import codecs, difflib, Levenshtein, distance\r\nimport logging\r\nfrom optparse import OptionParser\r\nimport sys\r\nfrom time import time\r\nimport numpy as np\r\n# Display progress logs on stdout\r\nlogging.basicConfig(level=logging.INFO,\r\n format='%(asctime)s %(levelname)s %(message)s')\r\n\r\n# parse commandline arguments\r\nop = OptionParser()\r\nop.add_option(\"--lsa\",\r\n dest=\"n_components\", type=\"int\",\r\n help=\"Preprocess documents with latent semantic analysis.\")\r\nop.add_option(\"--no-minibatch\",\r\n action=\"store_false\", dest=\"minibatch\", default=True,\r\n help=\"Use ordinary k-means algorithm (in batch mode).\")\r\nop.add_option(\"--no-idf\",\r\n action=\"store_false\", dest=\"use_idf\", default=True,\r\n help=\"Disable Inverse Document Frequency feature weighting.\")\r\nop.add_option(\"--use-hashing\",\r\n action=\"store_true\", default=False,\r\n help=\"Use a hashing feature vectorizer\")\r\nop.add_option(\"--n-features\", type=int, default=10000,\r\n help=\"Maximum number of features (dimensions)\"\r\n \" to extract from text.\")\r\nop.add_option(\"--verbose\",\r\n action=\"store_true\", dest=\"verbose\", default=False,\r\n help=\"Print progress reports inside k-means algorithm.\")\r\n\r\nprint(__doc__)\r\nop.print_help()\r\ndef is_interactive():\r\n return not hasattr(sys.modules['__main__'], '__file__')\r\n\r\n# work-around for Jupyter notebook and IPython console\r\nargv = [] if is_interactive() else sys.argv[1:]\r\n(opts, args) = op.parse_args(argv)\r\nif len(args) > 0:\r\n op.error(\"this script takes no arguments.\")\r\n sys.exit(1)\r\n\r\ndef is_interactive():\r\n return not hasattr(sys.modules['__main__'], '__file__')\r\n# Bring in standard stopwords\r\nwith np.errstate(divide='ignore'):\r\n np.float64(1.0) / 0.0\r\nfrom nltk.corpus import stopwords\r\n\r\n# Bring in the default English NLTK stop words\r\nstoplist = stopwords.words('english')\r\n\r\n# Define additional stopwords in a string\r\nadditional_stopwords = \"\"\"To [ ] I you am As it can't <<...>> sincerely, . > - < Kenneth Lay/Corp/Enron@Enron Best regards Sincerely From Sent Original Message Q <-> * | /\\ 100% 12345678910 () \"\"\"\r\n\r\n# Split the the additional stopwords string on each word and then add\r\n# those words to the NLTK stopwords list\r\nstoplist += additional_stopwords.split()\r\n\r\nstopWords = stopwords.words('english')\r\n\r\nprint (\"\\nCalculating document similarity scores...\")\r\n\r\n# Open and read a bunch of files\r\nf = open('ken-lay_body.txt')\r\ndoc1 = str(f.read())\r\nf = open('jeff-skilling_body.txt')\r\ndoc2 = str(f.read())\r\nf = open('Richard-shapiro_body.txt')\r\ndoc3 = str(f.read())\r\nf = open('kay-mann_body.txt')\r\ndoc4 = str(f.read())\r\nf = open('Jeff-dasovich_body.txt',)\r\ndoc5 = str(f.read())\r\nf = open('tana jones_body.txt')\r\ndoc6 = str(f.read())\r\nf = open('steven kean_body.txt')\r\ndoc7 = str(f.read())\r\nf = open('shackleton sara_body.txt')\r\ndoc8 = str(f.read())\r\nf = open('james steffes_body.txt')\r\ndoc9 = str(f.read())\r\nf = open('Mark taylor_body.txt')\r\ndoc10 = str(f.read())\r\nf = open('davis pete_body.txt')\r\ndoc11 = str(f.read())\r\nf = open('Chris g_body.txt')\r\ndoc12 = str(f.read())\r\nf = open('kate symes_body.txt')\r\ndoc13 = str(f.read())\r\nf = open('Mcconnell.body.txt')\r\ndoc14 = str(f.read())\r\nf = open('kaminski_body.txt')\r\ndoc15 = str(f.read())\r\n#train_string = 'By these proceedings for judicial review the Claimant seeks to challenge the decision of the Defendant dated the 23rd of May 2014 refusing the Claimant’s application of the 3rd of January 2012 for naturalisation as a British citizen'\r\n# Construct the training set as a list\r\ntrain_set = [ doc1, doc2, doc3, doc4, doc5, doc6,doc7, doc8, doc9, doc10, doc11, doc12, doc13, doc14, doc15]\r\n\r\n# Set up the vectoriser, passing in the stop words\r\ntfidf_vectorizer = TfidfVectorizer(stop_words=stopWords)\r\nvectorizer = TfidfVectorizer(min_df = 1, stop_words = 'english')\r\n# Apply the vectoriser to the training set\r\ntfidf_matrix_train = tfidf_vectorizer.fit_transform(train_set)\r\nC = cosine_similarity(tfidf_matrix_train)\r\n#print (\"\\nSimilarity Score [*] \",cosine_similarity(tfidf_matrix_train[0:1], tfidf_matrix_train))\r\n\r\ntfidf_matrix_train.shape\r\n#print(tfidf_matrix_train.toarray())\r\n#print(vector.toarray())\r\n\r\n\r\n\r\n\r\nprint(\"Top terms per cluster:\")\r\n\r\nif opts.n_components:\r\n print(\"Performing dimensionality reduction using LSA\")\r\n t0 = time()\r\n # Vectorizer results are normalized, which makes KMeans behave as\r\n # spherical k-means for better results. Since LSA/SVD results are\r\n # not normalized, we have to redo the normalization.\r\n svd = TruncatedSVD(opts.n_components)\r\n normalizer = Normalizer(copy=False)\r\n lsa = make_pipeline(svd, normalizer)\r\n tfidf_matrix_train = lsa.fit_transform(tfidf_matrix_train)\r\n print(\"done in %fs\" % (time() - t0))\r\n\r\n explained_variance = svd.explained_variance_ratio_.sum()\r\n print(\"Explained variance of the SVD step: {}%\".format(\r\n int(explained_variance * 100)))\r\n\r\n print()\r\n\r\n\r\n# #############################################################################\r\n# Do the actual clustering\r\ntrue_k = 5\r\nkm = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1)\r\n\r\ndata_vectorized=km.fit(tfidf_matrix_train)\r\nlabels = km.labels_\r\n\r\nif opts.minibatch:\r\n km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\r\n init_size=1000, batch_size=1000, verbose=opts.verbose)\r\nelse:\r\n km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\r\n verbose=opts.verbose)\r\n\r\nNUM_TOPICS= labels\r\nprint(\"Clustering sparse data with %s\" % km)\r\nt0 = time()\r\nkm.fit(tfidf_matrix_train)\r\nprint(\"done in %0.3fs\" % (time() - t0))\r\nprint()\r\n\r\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels,km.labels_))\r\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels,km.labels_))\r\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels,km.labels_))\r\nprint(\"Adjusted Rand-Index: %.3f\"\r\n % metrics.adjusted_rand_score( labels, km.labels_))\r\nprint(\"Silhouette Coefficient: %0.3f\"\r\n % metrics.silhouette_score(tfidf_matrix_train, km.labels_, sample_size=1000))\r\n\r\nprint()\r\n\r\n\r\n\r\n# Build a Latent Dirichlet Allocation Model\r\nlda_model = LatentDirichletAllocation(n_topics=NUM_TOPICS, max_iter=10, learning_method='online')\r\nlda_Z = lda_model.fit_transform(data_vectorized)\r\nprint(lda_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)\r\n\r\n\r\n\r\npgnmf_model= ProjectedGradientNMF(n_components=NUM_TOPICS)\r\npgnmf_z= pgnmf_model.fit_transform(data_vectorized)\r\nprint(pgnmf_z.shape) # (NO_DOCUMENTS, NO_TOPICS)\r\n\r\n\r\n# Build a Non-Negative Matrix Factorization Model\r\nnmf_model = NMF(n_components=NUM_TOPICS)\r\nnmf_Z = nmf_model.fit_transform(data_vectorized)\r\nprint(nmf_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)\r\n\r\n# Build a Latent Semantic Indexing Model\r\nlsi_model = TruncatedSVD(n_components=NUM_TOPICS)\r\nlsi_Z = lsi_model.fit_transform(data_vectorized)\r\nprint(lsi_Z.shape) # (NO_DOCUMENTS, NO_TOPICS)\r\n\r\n\r\n# Let's see how the first document in the corpus looks like in different topic spaces\r\nprint(lda_Z[0])\r\nprint(nmf_Z[0])\r\nprint(lsi_Z[0])\r\nprint(pgnmf_z[0])\r\n# Let's see how the first document in the corpus looks like in different topic spaces\r\nprint(lda_Z[0])\r\nprint(nmf_Z[0])\r\nprint(lsi_Z[0])\r\nprint(pgnmf_z[0])\r\ndef print_topics(model, vectorizer, top_n=10):\r\n for idx, topic in enumerate(model.components_):\r\n print(\"Concepts %d:\" % (idx))\r\n print([(vectorizer.get_feature_names()[i], topic[i])\r\n for i in topic.argsort()[:-top_n - 1:-1]])\r\n\r\nprint(\"LDA Model:\")\r\nprint_topics(lda_model, vectorizer)\r\nprint(\"=\" * 20)\r\n\r\n# #############################################################################\r\n# Do the actual clustering\r\n\r\nif opts.minibatch:\r\n km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\r\n init_size=10000, batch_size=1000, verbose=opts.verbose)\r\nelse:\r\n km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\r\n verbose=opts.verbose)\r\n\r\nprint(\"Clustering sparse data with %s\" % km)\r\nt0 = time()\r\nkm.fit(tfidf_matrix_train)\r\nprint(\"done in %0.3fs\" % (time() - t0))\r\nprint()\r\n\r\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, km.labels_))\r\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels, km.labels_))\r\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, km.labels_))\r\nprint(\"Adjusted Rand-Index: %.3f\"\r\n % metrics.adjusted_rand_score(labels, km.labels_))\r\nprint(\"Silhouette Coefficient: %0.3f\"\r\n % metrics.silhouette_score(tfidf_matrix_train, km.labels_, sample_size=1000))\r\n\r\nprint()\r\n\r\nprint(\"NMF Model:\")\r\nprint_topics(nmf_model, vectorizer)\r\nprint(\"=\" * 20)\r\n\r\n# #############################################################################\r\n# Do the actual clustering\r\n\r\nif opts.minibatch:\r\n km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\r\n init_size=10000, batch_size=1000, verbose=opts.verbose)\r\nelse:\r\n km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\r\n verbose=opts.verbose)\r\n\r\nprint(\"Clustering sparse data with %s\" % km)\r\nt0 = time()\r\nkm.fit(tfidf_matrix_train)\r\nprint(\"done in %0.3fs\" % (time() - t0))\r\nprint()\r\n\r\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, km.labels_))\r\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels, km.labels_))\r\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, km.labels_))\r\nprint(\"Adjusted Rand-Index: %.3f\"\r\n % metrics.adjusted_rand_score(labels, km.labels_))\r\nprint(\"Silhouette Coefficient: %0.3f\"\r\n % metrics.silhouette_score(tfidf_matrix_train, km.labels_, sample_size=1000))\r\n\r\nprint()\r\n\r\n\r\nprint(\"PGNMF Model:\")\r\nprint_topics(pgnmf_model, vectorizer)\r\nprint(\"=\" * 20)\r\n\r\n# #############################################################################\r\n# Do the actual clustering\r\n\r\nif opts.minibatch:\r\n km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,\r\n init_size=10000, batch_size=1000, verbose=opts.verbose)\r\nelse:\r\n km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,\r\n verbose=opts.verbose)\r\n\r\nprint(\"Clustering sparse data with %s\" % km)\r\nt0 = time()\r\nkm.fit(tfidf_matrix_train)\r\nprint(\"done in %0.3fs\" % (time() - t0))\r\nprint()\r\n\r\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels, km.labels_))\r\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels, km.labels_))\r\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels, km.labels_))\r\nprint(\"Adjusted Rand-Index: %.3f\"\r\n % metrics.adjusted_rand_score(labels, km.labels_))\r\nprint(\"Silhouette Coefficient: %0.3f\"\r\n % metrics.silhouette_score(tfidf_matrix_train, km.labels_, sample_size=1000))\r\n\r\nprint()\r\n\r\n\r\nif not opts.use_hashing:\r\n print(\"Top terms per cluster:\")\r\n\r\n if opts.n_components:\r\n original_space_centroids = svd.inverse_transform(km.cluster_centers_)\r\n order_centroids = original_space_centroids.argsort()[:, ::-1]\r\n else:\r\n order_centroids = km.cluster_centers_.argsort()[:, ::-1]\r\n\r\n terms = tfidf_vectorizer .get_feature_names()\r\n for i in range(true_k):\r\n print(\"Cluster %d:\" % i, end='')\r\n for ind in order_centroids[i, :10]:\r\n print(' %s' % terms[ind], end='')\r\n print()\r\n\r\n" ]
[ [ "sklearn.preprocessing.Normalizer", "sklearn.metrics.v_measure_score", "sklearn.decomposition.ProjectedGradientNMF", "sklearn.metrics.adjusted_rand_score", "sklearn.feature_extraction.text.TfidfVectorizer", "sklearn.decomposition.TruncatedSVD", "sklearn.decomposition.LatentDirichletAllocation", "sklearn.cluster.KMeans", "numpy.errstate", "sklearn.pipeline.make_pipeline", "sklearn.metrics.homogeneity_score", "sklearn.metrics.completeness_score", "sklearn.metrics.silhouette_score", "sklearn.metrics.pairwise.cosine_similarity", "sklearn.decomposition.NMF", "numpy.float64", "sklearn.cluster.MiniBatchKMeans" ] ]
hashstat/cvxpy
[ "20d667ebe8614821fa38e41b1e333257512d9594" ]
[ "examples/extensions/feature_selection.py" ]
[ "\"\"\"\nCopyright 2013 Steven Diamond\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom cvxpy import Card, norm, Minimize, Parameter, pos, Problem, Variable\nfrom mixed_integer import *\nimport numpy as np\n\n# Feature selection on a linear kernel SVM classifier.\n# Uses the Alternating Direction Method of Multipliers\n# with a (non-convex) cardinality constraint.\n\n# Generate data.\nnp.random.seed(1)\nN = 50\nM = 40\nn = 10\ndata = []\nfor i in range(N):\n data += [(1, np.random.normal(1.0, 2.0, (n, 1)))]\nfor i in range(M):\n data += [(-1, np.random.normal(-1.0, 2.0, (n, 1)))]\n\n# Construct problem.\ngamma = Parameter(nonneg=True)\ngamma.value = 0.1\n# 'a' is a variable constrained to have at most 6 non-zero entries.\na = Card(n, k=6)\nb = Variable()\n\nslack = [pos(1 - label*(sample.T*a - b)) for (label, sample) in data]\nobjective = Minimize(norm(a, 2) + gamma*sum(slack))\np = Problem(objective)\n# Extensions can attach new solve methods to the CVXPY Problem class.\np.solve(method=\"admm\")\n\n# Count misclassifications.\nerror = 0\nfor label, sample in data:\n if not label*(a.value.T*sample - b.value)[0] >= 0:\n error += 1\n\nprint(\"%s misclassifications\" % error)\nprint(a.value)\nprint(b.value)\n" ]
[ [ "numpy.random.normal", "numpy.random.seed" ] ]
NuTufts/chroma_lartpc
[ "ea6d1a62d22eeeaac069efdef1068a56be683fcc" ]
[ "chroma/uboone/materials.py" ]
[ "import os,sys\nimport numpy as np\n\n# This module has functions and defintions to load the optical \n# properties required by the MicroBooNE detector\n\nmaterialnames = [\"LAr\", # liquid argon [ may have its own module one day ]\n \"ArGas\", # gaseous argon\n \"Titanium\", # for the wires (fancy)\n \"STEEL_STAINLESS_Fe7Cr2Ni\", # cryostat walls\n \"Acrylic\", # wavelength shifting plates\n \"Glass\", # pmt window\n \"bialkali\", # pmt photocathode\n \"Vacuum\",\n \"PU_foam_light\", # mastic insulation. Irrelevant.\n \"PU_foam_dense\", # mastic insulation. Irrelevant.\n \"Air\", # lab air, Irrelevant\n \"G10\", # fiberglass\n \"Concrete\",] # Irrelevant\n# --------------------------------------------------------------------------------\n# what needs to be specified.\n# Materials need:\n# - refractive_index (can be function of wavelength)\n# - absorption_length (function of wavelength)\n# - scattering_length (function of wavelength)\n# See chroma.geometry: class Material for more information\n# --------------------------------------------------------------------------------\n# LAr: Liquid Argon\n# * Refractice index from\n# Sinnock, A. C. Refractive indices of the condensed rare gases, argon, krypton and xenon. \n# Journal of Physics C: Solid State Physics 13, 2375 (1980).\n# Measured at 83 K at 546.1 nm\n# Values at 260 and 400 are dummpy values\n# * Scattering Length from\n# Ishida et al. NIMA 384 (1997) 380-386: 66+/-3 cm\n# [USED] Seidel et al. NIMA 489 (2002) 189–194: 90 cm (calculated)\n# * Absorption Length\n# Going to be a function of puity and other inputs. \n# 80.9 cm from (from C. Rubbia)\n# 2000.0 cm from LArSoft\n# refractive from LArSoft\n#lar_refractive_index = np.array( [ (260.0, 1.2316),\n# (400.0, 1,2316),\n# (546.1, 1.2316) ] )\n# below in mm\nlar_refractive_index = np.array( [ (114.1, 1.60),\n (117.4, 1.56),\n (122.5, 1.45),\n (125.2, 1.39),\n (135.3, 1.35),\n (160.2, 1.29),\n (200.3, 1.26),\n (278.7, 1.24),\n (401.3, 1.23),\n (681.3, 1.23) ] )\nlar_scattering_length = np.array( [ (117.3, 100.0),\n (124.6, 380.0),\n (128.2, 900.0),\n (145.9, 1920.0),\n (164.7, 4100.0),\n (190.5, 9300.0),\n (217.9, 18500.0),\n (250.5, 37900.0) ] )\n\ndef load_lar_material_info( matclass ):\n matclass.set( 'refractive_index', lar_refractive_index[:,1], lar_refractive_index[:,0] )\n matclass.set( 'scattering_length', lar_scattering_length[:,1], lar_scattering_length[:,0] )\n matclass.set( 'absorption_length', 20000.0 ) # mm\n\n# --------------------------------------------------------------------------------\n# Gaseous Argon\ndef load_argas_material_info( matclass ):\n matclass.set('refractive_index', 1.0)\n matclass.absorption_length = np.array( 1.0e6 )\n matclass.set('scattering_length', 1000.0 )\n\n# --------------------------------------------------------------------------------\n# Acrylic\n# This can vary based on mnufacturer, even batch to batch...especially bellow 440 nm\n# We use data from RPT #1 from MiniClean report in \n# Bodmer et al., http://arxiv.org/pdf/1310.6454v2.pdf\n\ndef load_acrylic_material_info( matclass ):\n matclass.set('refractive_index', 1.49)\n matclass.absorption_length = np.array( [(375.0,29.0), (405.0, 155.0), (440.0, 261.0), (543, 3360.0), (632.0, 1650.0), (800, 1650.0)] )\n matclass.set('scattering_length', 1000.0 )\n\n# --------------------------------------------------------------------------------\n# Matclass\n\ndef load_glass_material_info( matclass ):\n # Taken from chroma.demo.optics as a starting point\n matclass.set('refractive_index', 1.49)\n matclass.absorption_length = \\\n np.array([(200, 0.1e-6), (300, 0.1e-6), (330, 1000.0), (500, 2000.0), (600, 1000.0), (770, 500.0), (800, 0.1e-6)])\n matclass.set('scattering_length', 1e6)\n\n# --------------------------------------------------------------------------------\n# Vacuum\n\ndef load_vacuum_material_info( matclass ):\n # Taken from chroma.demo.optics as a starting point\n matclass.set('refractive_index', 1.0)\n matclass.set('absorption_length', 1.0e6)\n matclass.set('scattering_length', 1.0e6)\n\n# --------------------------------------------------------------------------------\n# Dummy values for non-transmissive materials\n\ndef load_stainless_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_titanium_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_bialkali_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_concrete_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_air_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_pufoam_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_G10_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\ndef load_dummy_material_info( matclass ):\n # using dummy values, as we never expect photons to be propagating here\n matclass.set( 'refractive_index', 1.0 )\n matclass.set( 'scattering_length', 1.0 )\n matclass.set( 'absorption_length', 1.0 )\n\n\n# --------------------------------------------------------------------------------\ndef load_uboone_materials( c2cclass ):\n \"\"\"\n c2cclass: collada_to_chroma class instance\n \"\"\"\n if not isinstance(c2class, ColladaToChroma):\n raise TypeError('input to function should be instance of ColladaToChroma')\n loaders = { \"LAr\":load_lar_material_info,\n \"ArGas\":load_argas_material_info,\n \"Titanium\":load_titanium_material_info,\n \"Acrylic\":load_acrylic_material_info,\n \"Glass\":load_glass_material_info,\n \"Vacuum\":load_vacuum_material_info,\n \"STEEL_STAINLESS_Fe7Cr2Ni\":load_vacuum_material_info,\n \"PU_foam_light\":load_pufoam_material_info,\n \"PU_foam_dense\":load_pufoam_material_info,\n \"Concrete\":load_concrete_material_info,\n \"Concrete\":load_G10_material_info }\n \ndef clean_material_name( matname ):\n # pointer addresses attached to names\n return matname.split(\"0x\")[0]\n" ]
[ [ "numpy.array" ] ]
Mu-L/TheAlgorithmsOfPython
[ "2d3d660155241113b23e4ed810e05479b2fc4bba" ]
[ "machine_learning/polymonial_regression.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\n\n# Fitting Polynomial Regression to the dataset\nfrom sklearn.preprocessing import PolynomialFeatures\n\n# Importing the dataset\ndataset = pd.read_csv(\n \"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/\"\n \"position_salaries.csv\"\n)\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n\npoly_reg = PolynomialFeatures(degree=4)\nX_poly = poly_reg.fit_transform(X)\npol_reg = LinearRegression()\npol_reg.fit(X_poly, y)\n\n\n# Visualizing the Polymonial Regression results\ndef viz_polymonial():\n plt.scatter(X, y, color=\"red\")\n plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color=\"blue\")\n plt.title(\"Truth or Bluff (Linear Regression)\")\n plt.xlabel(\"Position level\")\n plt.ylabel(\"Salary\")\n plt.show()\n return\n\n\nif __name__ == \"__main__\":\n viz_polymonial()\n\n # Predicting a new result with Polymonial Regression\n pol_reg.predict(poly_reg.fit_transform([[5.5]]))\n # output should be 132148.43750003\n" ]
[ [ "sklearn.preprocessing.PolynomialFeatures", "pandas.read_csv", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter" ] ]
AleksaC/gym-snake
[ "216a1af7cc1edd3d95be8a5ae2effc5f420452b0" ]
[ "gym-snake/gym_snake/envs/snake_env.py" ]
[ "from collections import deque\nimport time\n\nimport gym\nimport numpy as np\n\nfrom gym import spaces, logger\nfrom gym.utils import seeding\nfrom gym.envs.classic_control import rendering\n\n\nclass SnakeEnv(gym.Env):\n metadata = {\n \"render.modes\": [\"human\", \"rgb_array\"],\n \"video.frames_per_second\": \"35\"\n }\n\n def __init__(self, height=20, width=20, scaling_factor=6,\n starting_position=(7, 5), snake_size=3, direction=(0, 1),\n time_penalty=-0.01, food_reward=1, loss_penalty=-1, win_reward=10):\n self.action_space = spaces.Discrete(3)\n self.ACTIONS = [\"STRAIGHT\", \"LEFT\", \"RIGHT\"]\n self.observation_space = spaces.Box(0, 2, (height + 2, width + 2), dtype=\"uint8\")\n self.viewer = None\n self.seed()\n\n # rewards and penalties\n self.time_penalty = time_penalty\n self.food_reward = food_reward\n self.loss_penalty = loss_penalty\n self.win_reward = win_reward\n if loss_penalty > 0 or time_penalty > 0:\n logger.warn(\"Values of penalties should not be positive.\")\n\n # initialize size and position properties\n self.height = height\n self.width = width\n if height + 1 > starting_position[0] > 0 and width + 1 > starting_position[1] > snake_size:\n self.starting_position = starting_position\n else:\n raise ValueError(\"starting_position of snake should be in range (0 - height + 1, snake_size - width + 1)\")\n self.scaling_factor = scaling_factor\n self.initial_size = snake_size\n self.snake_size = snake_size\n self.max_size = height * width\n self.state = np.zeros((height + 2, width + 2), dtype=\"uint8\")\n self.game_over = False\n\n # set bounds of the environment\n self.state[:, 0] = self.state[:, -1] = 1\n self.state[0, :] = self.state[-1, :] = 1\n\n # initialize snake properties\n self.initial_direction = direction\n self.direction = direction\n self.snake = deque()\n\n # initialize position of the snake\n self._init_field(starting_position, snake_size)\n\n # place food on the field\n self.food = self._generate_food()\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _init_field(self, starting_position, snake_size):\n y, x = starting_position\n for i in range(snake_size):\n self.state[y][x] = 1\n self.snake.appendleft((y, x))\n x -= 1\n\n def _generate_food(self):\n y, x = self.np_random.randint(self.height), self.np_random.randint(self.width)\n while self.state[y][x]:\n y, x = self.np_random.randint(self.height), self.np_random.randint(self.width)\n self.state[y][x] = 2\n\n return y, x\n\n def _check_for_collision(self, y, x):\n done = False\n pop = True\n reward = self.time_penalty\n\n if self.state[y][x]:\n if self.state[y][x] == 2:\n pop = False\n reward += self.food_reward\n self.snake_size += 1\n if self.snake_size == self.max_size:\n reward += self.win_reward\n self.game_over = done = True\n self.food = self._generate_food()\n else:\n reward += self.loss_penalty\n self.game_over = done = True\n pop = False\n\n self.state[y][x] = 1\n\n return reward, done, pop\n\n def step(self, action):\n y, x = self.snake[-1]\n if action == 0:\n y += self.direction[0]\n x += self.direction[1]\n elif action == 1:\n if self.direction[0] == 0:\n self.direction = (-self.direction[1], 0)\n y += self.direction[0]\n else:\n self.direction = (0, self.direction[0])\n x += self.direction[1]\n elif action == 2:\n if self.direction[0] == 0:\n self.direction = (self.direction[1], 0)\n y += self.direction[0]\n else:\n self.direction = (0, -self.direction[0])\n x += self.direction[1]\n else:\n raise ValueError(\"Action can only be 0, 1 or 2\")\n\n if self.game_over:\n raise RuntimeError(\"You're calling step() even though the environment has returned done = True.\"\n \"You should restart the environment after receiving done = True\")\n\n reward, done, pop = self._check_for_collision(y, x)\n\n if not done:\n self.snake.append((y, x))\n\n if pop:\n y, x = self.snake.popleft()\n self.state[y][x] = 0\n\n observation = self.state\n\n info = {\n \"snake\": self.snake,\n \"snake_size\": self.snake_size,\n \"direction\": self.direction,\n \"food\": self.food\n }\n\n return observation, reward, done, info\n\n def reset(self):\n self.game_over = False\n self.direction = self.initial_direction\n\n while self.snake:\n y, x = self.snake.pop()\n self.state[y][x] = 0\n\n self.state[self.food[0]][self.food[1]] = 0\n\n self._init_field(self.starting_position, self.initial_size)\n self.food = self._generate_food()\n self.snake_size = self.initial_size\n\n return self.state\n\n def _to_rgb(self, scaling_factor):\n scaled_grid = np.zeros(((self.height + 2) * scaling_factor, (self.width + 2) * scaling_factor), dtype=\"uint8\")\n scaled_grid[:, :scaling_factor] = scaled_grid[:, -scaling_factor:] = 255\n scaled_grid[:scaling_factor, :] = scaled_grid[-scaling_factor:, :] = 255\n\n y, x = self.food\n scaled_y, scaled_x = y * scaling_factor, x * scaling_factor\n scaled_grid[scaled_y : scaled_y + scaling_factor, scaled_x : scaled_x + scaling_factor] = 255\n\n for (y, x) in self.snake:\n scaled_y, scaled_x = y * scaling_factor, x * scaling_factor\n scaled_grid[scaled_y : scaled_y + scaling_factor, scaled_x : scaled_x + scaling_factor] = 255\n\n img = np.empty(((self.height + 2) * scaling_factor, (self.width + 2) * scaling_factor, 3), dtype=\"uint8\")\n img[:, :, 0] = img[:, :, 1] = img[:, :, 2] = scaled_grid\n\n return img\n\n def render(self, mode=\"human\", close=False):\n img = self._to_rgb(self.scaling_factor)\n if mode == \"rgb_array\":\n return img\n elif mode == \"human\":\n if self.viewer is None:\n self.viewer = rendering.SimpleImageViewer()\n self.viewer.imshow(img)\n time.sleep(0.027)\n\n return self.viewer.isopen\n\n def close(self):\n if self.viewer:\n self.viewer.close()\n self.viewer = None\n" ]
[ [ "numpy.empty", "numpy.zeros" ] ]
moghadas76/test_bigcity
[ "607b9602c5b1113b23e1830455e174b0901d7558", "607b9602c5b1113b23e1830455e174b0901d7558" ]
[ "libcity/model/traffic_speed_prediction/STAGGCN.py", "test/test_gwnet.py" ]
[ "import math\nfrom logging import getLogger\nfrom typing import Optional\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils import weight_norm\n\nfrom libcity.model import loss\nfrom libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel\n\n\ndef remove_self_loops(edge_index: torch.Tensor):\n return edge_index[:, edge_index[0] != edge_index[1]]\n\n\ndef maybe_num_nodes(edge_index: torch.Tensor, num_nodes: Optional[int] = None):\n if num_nodes is not None:\n return num_nodes\n else:\n return int(edge_index.max()) + 1\n\n\ndef add_self_loops(edge_index: torch.Tensor, num_nodes: Optional[int] = None):\n return torch.cat((edge_index,\n torch.arange(maybe_num_nodes(edge_index, num_nodes))\n .repeat(2, 1)\n .to(edge_index.device)), dim=1)\n\n\ndef softmax(x: torch.Tensor, index: torch.Tensor, num_nodes: Optional[int] = None, dim: int = 0):\n N = maybe_num_nodes(index, num_nodes)\n x_max = scatter(x, index, dim, dim_size=N, reduce='max').index_select(dim, index)\n out = (x - x_max).exp()\n out_sum = scatter(out, index, dim, dim_size=N, reduce='sum').index_select(dim, index)\n return out / out_sum\n\n\nclass STAGGCN(AbstractTrafficStateModel):\n def __init__(self, config, data_feature):\n super().__init__(config, data_feature)\n self._scaler = self.data_feature.get('scaler')\n self.adj_mx = self.data_feature.get('adj_mx', 1)\n self.num_nodes = self.data_feature.get('num_nodes', 1)\n self.input_dim = self.data_feature.get('feature_dim', 1)\n self.output_dim = self.data_feature.get('output_dim', 1)\n self.ext_dim = self.data_feature.get('ext_dim', 1)\n\n # 以下两项是STAG-GCN对数据集额外进行预处理得到的边关系数据\n # 对数据集预处理得到的空间邻接边集\n self.edge_index = self.data_feature.get('edge_index', torch.tensor([[], []], dtype=torch.long)) # 空间邻接边\n # 对数据集预处理得到的语义邻接边集\n self.dtw_edge_index = self.data_feature.get('dtw_edge_index', torch.tensor([[], []], dtype=torch.long)) # 语义邻接边\n\n self._logger = getLogger()\n self.device = config.get('device', torch.device('cpu'))\n self.input_window = config.get('input_window', 1)\n self.output_window = config.get('output_window', 1)\n self.graph_dim = config.get('graph_dim', 32)\n self.tcn_dim = config.get('tcn_dim', [10])\n self.attn_head = config.get('atten_head', 3)\n self.choice = config.get('choice', [1, 1, 1])\n self.batch_size = config.get('batch_size', 64)\n\n self.edge_index = self.edge_index.to(self.device)\n self.dtw_edge_index = self.dtw_edge_index.to(self.device)\n\n self.model = STAGGCNModel(input_dim=self.input_dim,\n output_dim=self.output_dim,\n node_num=self.num_nodes,\n seq_len=self.input_window,\n pred_len=self.output_window,\n graph_dim=self.graph_dim,\n tcn_dim=self.tcn_dim,\n attn_head=self.attn_head,\n choice=self.choice).to(self.device)\n\n def forward(self, batch):\n x = batch['X'] # shape = (batch_size, input_length, num_nodes, input_dim)\n\n # [batch_size, pred_len, num_nodes, output_dim]\n return self.model(x, self.edge_index, self.dtw_edge_index)\n\n def calculate_loss(self, batch):\n y_true = batch['y']\n y_predicted = self.predict(batch)\n y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])\n y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])\n return loss.masked_mae_torch(y_predicted, y_true)\n\n def predict(self, batch):\n # one-inference multi-step prediction\n return self.forward(batch)\n\n\nclass STAGGCNModel(nn.Module):\n def __init__(self, input_dim=1, output_dim=1,\n node_num=325, seq_len=12, pred_len=6, graph_dim=32,\n tcn_dim=[10], attn_head=4, choice=[1, 1, 1]):\n super(STAGGCNModel, self).__init__()\n self.node_num = node_num\n self.seq_len = seq_len\n self.pred_len = pred_len\n self.graph_dim = graph_dim\n # self.output_dim = seq_len + np.sum(choice) * graph_dim\n self.pred_len_raw = np.sum(choice) * graph_dim\n\n self.STCell = STCell(node_num, seq_len, graph_dim, tcn_dim,\n choice=choice, attn_head=attn_head,\n input_dim=input_dim, output_dim=output_dim)\n self.output_linear = nn.Linear(in_features=self.pred_len_raw, out_features=self.pred_len)\n # self.output_linear_0 = nn.Linear(in_features=self.graph_dim, out_features=256)\n # self.output_linear_1 = nn.Linear(in_features=256, out_features=self.pred_len)\n\n def forward(self, x, edge_index, dtw_edge_index):\n # x: [batch_size, seq_len, num_nodes, input_dim]\n # st_output: [batch_size, num_nodes, output_dim, sum(choice)*graph_dim ==\n # [batch_size, num_nodes, output_dim, pred_len_raw]]\n st_output = self.STCell(x, edge_index, dtw_edge_index)\n output = st_output\n\n # [batch_size, num_nodes, output_dim, pred_len]\n output = self.output_linear(output)\n # output = F.relu(self.output_linear_0(output))\n # output = self.output_linear_1(output)\n # output = torch.reshape(output, (-1, self.node_num, self.pred_len))\n\n # [batch_size, pred_len, num_nodes, output_dim]\n return output.permute(0, 3, 1, 2).contiguous()\n\n\nclass Chomp1d(nn.Module):\n def __init__(self, chomp_size):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n\n def forward(self, x):\n return x[:, :, :-self.chomp_size].contiguous()\n\n\nclass TemporalBlock(nn.Module):\n def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):\n super(TemporalBlock, self).__init__()\n self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,\n stride=stride, padding=padding, dilation=dilation))\n self.chomp1 = Chomp1d(padding)\n self.relu1 = nn.ReLU()\n self.dropout1 = nn.Dropout(dropout)\n\n self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,\n stride=stride, padding=padding, dilation=dilation))\n self.chomp2 = Chomp1d(padding)\n self.relu2 = nn.ReLU()\n self.dropout2 = nn.Dropout(dropout)\n\n self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,\n self.conv2, self.chomp2, self.relu2, self.dropout2)\n self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None\n self.relu = nn.ReLU()\n self.init_weights()\n\n def init_weights(self):\n self.conv1.weight.data.normal_(0, 0.01)\n self.conv2.weight.data.normal_(0, 0.01)\n if self.downsample is not None:\n self.downsample.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n # x: [batch_size*input_dim*num_nodes, n_inputs, seq_len]\n # self.conv1(x): [batch_size*input_dim*num_nodes, n_outputs, ...]\n # self.chomp1(self.conv2(x)): [batch_size*input_dim*num_nodes, n_outputs, seq_len]\n # return: [batch_size*input_dim*num_nodes, n_outputs, seq_len]\n out = self.net(x)\n res = x if self.downsample is None else self.downsample(x)\n return self.relu(out + res)\n\n\nclass TemporalConvNet(nn.Module):\n def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):\n super(TemporalConvNet, self).__init__()\n layers = []\n num_levels = len(num_channels)\n for i in range(num_levels):\n dilation_size = 2 ** i\n in_channels = num_inputs if i == 0 else num_channels[i - 1]\n out_channels = num_channels[i]\n layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,\n padding=(kernel_size - 1) * dilation_size, dropout=dropout)]\n\n self.network = nn.Sequential(*layers)\n\n def forward(self, x):\n # x: [batch_size*num_nodes, input_dim, seq_len]\n # return: [batch_size*num_nodes, output_dim*num_channels[-1], seq_len]\n return self.network(x)\n\n\nclass LearnedGCN(nn.Module):\n def __init__(self, node_num, in_feature, out_feature):\n super(LearnedGCN, self).__init__()\n self.node_num = node_num\n self.in_feature = in_feature\n self.out_feature = out_feature\n\n self.source_embed = nn.Parameter(torch.Tensor(self.node_num, 10))\n self.target_embed = nn.Parameter(torch.Tensor(10, self.node_num))\n self.linear = nn.Linear(self.in_feature, self.out_feature)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.source_embed.size(0))\n self.source_embed.data.uniform_(-stdv, stdv)\n self.target_embed.data.uniform_(-stdv, stdv)\n\n def forward(self, input):\n learned_matrix = F.softmax(F.relu(torch.mm(self.source_embed, self.target_embed)), dim=1)\n output = learned_matrix.matmul(input)\n output = self.linear(output)\n return output\n\n\nclass GATConv(nn.Module):\n def __init__(self,\n in_channels: int, out_channels: int,\n heads: int = 1, concat: bool = True,\n negative_slope: float = 0.2, dropout: float = 0.0,\n add_self_loops: bool = True, bias: bool = True):\n super(GATConv, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.attn_heads = heads\n\n self.negative_slope = negative_slope\n self.dropout = dropout\n\n self.bias = bias\n self.concat = concat\n self.add_self_loops = add_self_loops\n\n self.linear = nn.Linear(self.in_channels, self.attn_heads * self.out_channels, bias=False)\n self.attn_j = nn.Parameter(torch.Tensor(1, self.attn_heads, self.out_channels))\n self.attn_i = nn.Parameter(torch.Tensor(1, self.attn_heads, self.out_channels))\n\n if bias and concat:\n self.bias = nn.Parameter(torch.Tensor(self.attn_heads * self.out_channels))\n elif bias and not concat:\n self.bias = nn.Parameter(torch.Tensor(self.out_channels))\n else:\n self.register_parameter('bias', None)\n\n self._alpha = None\n\n self.init_weights()\n\n def init_weights(self):\n self._glorot(self.linear.weight)\n self._glorot(self.attn_j)\n self._glorot(self.attn_i)\n self._zeros(self.bias)\n\n @staticmethod\n def _glorot(t: torch.Tensor):\n if t is None:\n return\n stdv = math.sqrt(6. / (t.size(-2) * t.size(-1)))\n t.data.uniform_(-stdv, stdv)\n\n @staticmethod\n def _zeros(t: torch.Tensor):\n if t is None:\n return\n t.data.fill_(0.)\n\n def forward(self, x: torch.Tensor, edge_index: torch.Tensor):\n num_nodes = x.size(0)\n\n edge_index = remove_self_loops(edge_index)\n edge_index = add_self_loops(edge_index, num_nodes=num_nodes)\n\n edge_index_j, edge_index_i = edge_index\n\n # x: [num_nodes, num_features]\n # [num_edges, attn_heads, out_channels]\n x_j = self.linear(x).view(-1, self.attn_heads, self.out_channels)[edge_index_j]\n x_i = self.linear(x).view(-1, self.attn_heads, self.out_channels)[edge_index_i]\n\n # [num_edges, attn_heads]\n alpha_j = (x_j * self.attn_j).sum(dim=-1)[edge_index_j]\n alpha_i = (x_i * self.attn_i).sum(dim=-1)[edge_index_i]\n\n # message passing\n # [num_edges, attn_heads]\n alpha = alpha_j + alpha_i\n alpha = F.leaky_relu(alpha, self.negative_slope)\n alpha = softmax(alpha, edge_index_i, x_i.size(0))\n alpha = F.dropout(alpha, p=self.dropout, training=self.training)\n # [num_edges, attn_heads, out_channels]\n message = x_j * alpha.unsqueeze(-1)\n\n out = scatter(message, edge_index_i, dim=0, reduce='add')\n\n if self.concat:\n out = out.view(-1, self.attn_heads * self.out_channels)\n else:\n out = out.mean(dim=1)\n if self.bias is not None:\n out += self.bias\n\n return out\n\n\nclass STCell(nn.Module):\n def __init__(self, node_num=524, seq_len=12, graph_dim=16, tcn_dim=[10],\n choice=[1, 1, 1], attn_head=2, input_dim=1, output_dim=1):\n super(STCell, self).__init__()\n self.node_num = node_num\n self.seq_len = seq_len\n self.graph_dim = graph_dim\n self.tcn_dim = tcn_dim\n self.pred_len_raw = np.sum(choice) * graph_dim\n self.choice = choice\n # self.jklayer = JumpingKnowledge(\"max\")\n # self.jklayer = JumpingKnowledge(\"lstm\", self.graph_dim, 1)\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.in_features = seq_len * input_dim\n\n self.seq_linear = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.input_dim * seq_len)\n\n if choice[0] == 1:\n print(\"[TCN]\")\n print(\"node_num:\", node_num, \"\\tattn_head:\", attn_head)\n # one node of one input feature per embedding element\n self.self_attn = nn.MultiheadAttention(embed_dim=node_num * input_dim, num_heads=attn_head)\n # expand convolution output_dimension by output_dim\n self.tcn = TemporalConvNet(num_inputs=self.input_dim,\n num_channels=[x * self.output_dim for x in self.tcn_dim])\n self.tlinear = nn.Linear(in_features=self.output_dim * self.tcn_dim[-1] * self.seq_len,\n out_features=self.output_dim * self.graph_dim)\n\n if choice[1] == 1:\n print(\"[SP]\")\n self.sp_origin = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.output_dim * graph_dim)\n self.sp_gconv1 = GATConv(self.input_dim * seq_len, self.output_dim * graph_dim, heads=3, concat=False)\n self.sp_gconv2 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n self.sp_gconv3 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n self.sp_gconv4 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=1, concat=False)\n # self.sp_gconv5 = GATConv(graph_dim, graph_dim, heads = 1, concat = False)\n self.sp_source_embed = nn.Parameter(torch.Tensor(self.node_num, 12))\n self.sp_target_embed = nn.Parameter(torch.Tensor(12, self.node_num))\n self.sp_linear_1 = nn.Linear(self.input_dim * seq_len, self.output_dim * self.graph_dim)\n self.sp_linear_2 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n self.sp_linear_3 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n self.sp_linear_4 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n # self.sp_linear_5 = nn.Linear(self.graph_dim, self.graph_dim)\n # self.sp_jklayer = JumpingKnowledge(\"max\")\n\n nn.init.xavier_uniform_(self.sp_source_embed)\n nn.init.xavier_uniform_(self.sp_target_embed)\n\n if choice[2] == 1:\n print(\"[DTW]\")\n self.dtw_origin = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.output_dim * graph_dim)\n self.dtw_gconv1 = GATConv(self.input_dim * seq_len, self.output_dim * graph_dim, heads=3, concat=False)\n self.dtw_gconv2 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n self.dtw_gconv3 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n self.dtw_gconv4 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)\n # self.dtw_gconv5 = GATConv(graph_dim, graph_dim, heads = 1, concat = False)\n self.dtw_source_embed = nn.Parameter(torch.Tensor(self.node_num, 12))\n self.dtw_target_embed = nn.Parameter(torch.Tensor(12, self.node_num))\n self.dtw_linear_1 = nn.Linear(self.input_dim * self.seq_len, self.output_dim * self.graph_dim)\n self.dtw_linear_2 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n self.dtw_linear_3 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n self.dtw_linear_4 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)\n # self.dtw_linear_5 = nn.Linear(self.graph_dim, self.graph_dim)\n # self.dtw_jklayer = JumpingKnowledge(\"max\")\n\n nn.init.xavier_uniform_(self.dtw_source_embed)\n nn.init.xavier_uniform_(self.dtw_target_embed)\n\n def forward(self, x, edge_index, dtw_edge_index):\n # x: [batch_size, seq_len, num_nodes, input_dim]\n output_list = [0, 0, 0]\n batch_size = x.shape[0]\n\n if self.choice[0] == 1:\n # [seq_len, batch_size, input_dim*num_nodes]\n attn_input = x.permute(1, 0, 3, 2).reshape(self.seq_len, batch_size, -1).contiguous()\n # [seq_len, batch_size, input_dim*num_nodes]\n # input_dim*num_nodes is the embedding dimension\n attn_output, _ = self.self_attn(attn_input, attn_input, attn_input)\n # [seq_len, batch_size, input_dim*num_nodes]\n attn_output = torch.tanh(attn_output + attn_input)\n # [batch_size*num_nodes, input_dim, seq_len]\n attn_output = attn_output.reshape(self.seq_len, batch_size, self.input_dim, self.node_num) \\\n .permute(1, 3, 2, 0) \\\n .reshape(-1, self.input_dim, self.seq_len)\n\n # [batch_size*num_nodes, input_dim, seq_len]\n tcn_input = attn_output\n # [batch_size*num_nodes, output_dim*self.tcn_dim[-1], seq_len]\n tcn_output = self.tcn(tcn_input)\n # [batch_size*num_nodes, output_dim*self.tcn_dim[-1]*seq_len]\n tcn_output = torch.reshape(tcn_output,\n (-1, self.output_dim * self.tcn_dim[-1] * self.seq_len))\n # [batch_size*num_nodes, output_dim*self.graph_dim]\n tcn_output = self.tlinear(tcn_output)\n # [batch_size, num_nodes, output_dim, self.graph_dim]\n tcn_output = torch.reshape(tcn_output, (batch_size, self.node_num, self.output_dim, self.graph_dim))\n\n output_list[0] = tcn_output\n\n if self.choice[1] == 1 or self.choice[2] == 1:\n # [batch_size, num_nodes, input_dim*seq_len]\n sp_gout_0 = x.permute(0, 2, 3, 1).reshape(-1, self.input_dim * self.seq_len).contiguous()\n dtw_gout_0 = sp_gout_0.detach().clone()\n\n if self.choice[1] == 1:\n # [batch_size*num_nodes, input_dim*seq_len]\n sp_gout_0 = self.seq_linear(sp_gout_0) + sp_gout_0\n\n # [num_nodes, num_nodes]\n sp_learned_matrix = F.softmax(F.relu(torch.mm(self.sp_source_embed, self.sp_target_embed)), dim=1)\n\n # GATConv: [input_dim*seq_len, output_dim*graph_dim]\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_gout_1 = self.sp_gconv1(sp_gout_0, edge_index)\n # [batch_size, num_nodes, input_dim*seq_len]\n adp_input_1 = torch.reshape(sp_gout_0, (-1, self.node_num, self.input_dim * self.seq_len))\n # [batch_size, num_nodes, output_dim*graph_dim]\n sp_adp_1 = self.sp_linear_1(sp_learned_matrix.matmul(F.dropout(adp_input_1, p=0.1)))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_adp_1 = torch.reshape(sp_adp_1, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_origin = self.sp_origin(sp_gout_0)\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_output_1 = torch.tanh(sp_gout_1) * torch.sigmoid(sp_adp_1) + sp_origin * (1 - torch.sigmoid(sp_adp_1))\n\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_gout_2 = self.sp_gconv2(torch.tanh(sp_output_1), edge_index)\n # [batch_size, num_nodes, output_dim*graph_dim]\n adp_input_2 = torch.reshape(torch.tanh(sp_output_1), (-1, self.node_num, self.output_dim * self.graph_dim))\n # [batch_size, num_nodes, output_dim*graph_dim]\n sp_adp_2 = self.sp_linear_2(sp_learned_matrix.matmul(F.dropout(adp_input_2, p=0.1)))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_adp_2 = torch.reshape(sp_adp_2, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_output_2 = F.leaky_relu(sp_gout_2) * torch.sigmoid(sp_adp_2) + \\\n sp_output_1 * (1 - torch.sigmoid(sp_adp_2))\n\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_gout_3 = self.sp_gconv3(F.relu(sp_output_2), edge_index)\n # [batch_size, num_nodes, output_dim*graph_dim]\n adp_input_3 = torch.reshape(F.relu(sp_output_2), (-1, self.node_num, self.output_dim * self.graph_dim))\n # [batch_size, num_nodes, output_dim*graph_dim]\n sp_adp_3 = self.sp_linear_3(sp_learned_matrix.matmul(F.dropout(adp_input_3, p=0.1)))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_adp_3 = torch.reshape(sp_adp_3, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_output_3 = F.relu(sp_gout_3) * torch.sigmoid(sp_adp_3) + sp_output_2 * (1 - torch.sigmoid(sp_adp_3))\n\n sp_gout_4 = self.sp_gconv4(F.relu(sp_output_3), edge_index)\n adp_input_4 = torch.reshape(F.relu(sp_output_3), (-1, self.node_num, self.output_dim * self.graph_dim))\n sp_adp_4 = self.sp_linear_4(sp_learned_matrix.matmul(F.dropout(adp_input_4, p=0.1)))\n sp_adp_4 = torch.reshape(sp_adp_4, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n sp_output_4 = F.relu(sp_gout_4) * torch.sigmoid(sp_adp_4) + sp_output_3 * (1 - torch.sigmoid(sp_adp_4))\n\n # sp_gout_5 = self.sp_gconv5(F.relu(sp_output_4), edge_index)\n # adp_input_5 = torch.reshape(F.relu(sp_output_4), (-1, self.node_num, self.graph_dim))\n # sp_adp_5 = self.sp_linear_5(sp_learned_matrix.matmul(F.dropout(adp_input_5,p=0.1)))\n # sp_adp_5 = torch.reshape(sp_adp_5, (-1, self.graph_dim))\n # sp_output_5 = F.relu(sp_gout_5) * torch.sigmoid(sp_adp_5) + sp_output_4 * (1 - torch.sigmoid(sp_adp_5))\n\n # [batch_size, num_nodes, output_dim, graph_dim]\n sp_output = torch.reshape(sp_output_4, (batch_size, self.node_num, self.output_dim, self.graph_dim))\n # sp_output = sp_output_4\n output_list[1] = sp_output\n\n if self.choice[2] == 1:\n dtw_gout_0 = self.seq_linear(dtw_gout_0) + dtw_gout_0\n\n dtw_learned_matrix = F.softmax(F.relu(torch.mm(self.dtw_source_embed, self.dtw_target_embed)), dim=1)\n\n dtw_gout_1 = self.dtw_gconv1(dtw_gout_0, dtw_edge_index)\n adp_input_1 = torch.reshape(dtw_gout_0, (-1, self.node_num, self.input_dim * self.seq_len))\n dtw_adp_1 = self.dtw_linear_1(dtw_learned_matrix.matmul(F.dropout(adp_input_1, p=0.1)))\n dtw_adp_1 = torch.reshape(dtw_adp_1, (-1, self.output_dim * self.graph_dim))\n dtw_origin = self.dtw_origin(dtw_gout_0)\n dtw_output_1 = torch.tanh(dtw_gout_1) * torch.sigmoid(dtw_adp_1) + \\\n dtw_origin * (1 - torch.sigmoid(dtw_adp_1))\n\n dtw_gout_2 = self.dtw_gconv2(torch.tanh(dtw_output_1), dtw_edge_index)\n adp_input_2 = torch.reshape(torch.tanh(dtw_output_1), (-1, self.node_num, self.output_dim * self.graph_dim))\n dtw_adp_2 = self.dtw_linear_2(dtw_learned_matrix.matmul(F.dropout(adp_input_2, p=0.1)))\n dtw_adp_2 = torch.reshape(dtw_adp_2, (-1, self.output_dim * self.graph_dim))\n dtw_output_2 = F.leaky_relu(dtw_gout_2) * torch.sigmoid(dtw_adp_2) + \\\n dtw_output_1 * (1 - torch.sigmoid(dtw_adp_2))\n\n dtw_gout_3 = self.dtw_gconv3(F.relu(dtw_output_2), dtw_edge_index)\n adp_input_3 = torch.reshape(F.relu(dtw_output_2), (-1, self.node_num, self.output_dim * self.graph_dim))\n dtw_adp_3 = self.dtw_linear_3(dtw_learned_matrix.matmul(F.dropout(adp_input_3, p=0.1)))\n dtw_adp_3 = torch.reshape(dtw_adp_3, (-1, self.output_dim * self.graph_dim))\n dtw_output_3 = F.relu(dtw_gout_3) * torch.sigmoid(dtw_adp_3) + dtw_output_2 * (1 - torch.sigmoid(dtw_adp_3))\n\n dtw_gout_4 = self.dtw_gconv4(F.relu(dtw_output_3), dtw_edge_index)\n adp_input_4 = torch.reshape(F.relu(dtw_output_3), (-1, self.node_num, self.output_dim * self.graph_dim))\n dtw_adp_4 = self.dtw_linear_4(dtw_learned_matrix.matmul(F.dropout(adp_input_4, p=0.1)))\n dtw_adp_4 = torch.reshape(dtw_adp_4, (-1, self.output_dim * self.graph_dim))\n # [batch_size*num_nodes, output_dim*graph_dim]\n dtw_output_4 = F.relu(dtw_gout_4) * torch.sigmoid(dtw_adp_4) + dtw_output_3 * (1 - torch.sigmoid(dtw_adp_4))\n\n # dtw_gout_5 = self.dtw_gconv5(F.relu(dtw_output_4), dtw_edge_index)\n # adp_input_5 = torch.reshape(F.relu(dtw_output_4), (-1, self.node_num, self.graph_dim))\n # dtw_adp_5 = self.dtw_linear_5(dtw_learned_matrix.matmul(F.dropout(adp_input_5,p=0.1)))\n # dtw_adp_5 = torch.reshape(dtw_adp_5, (-1, self.graph_dim))\n # dtw_output_5 = \\\n # F.relu(dtw_gout_5) * torch.sigmoid(dtw_adp_5) + dtw_output_4 * (1 - torch.sigmoid(dtw_adp_5))\n\n # [batch_size, num_nodes, output_dim, graph_dim]\n dtw_output = torch.reshape(dtw_output_4, (batch_size, self.node_num, self.output_dim, self.graph_dim))\n # dtw_output = dtw_output_4\n output_list[2] = dtw_output\n\n # output_list[*]: [batch_size, num_nodes, output_dim, graph_dim]\n # cell_output: [batch_size, num_nodes, output_dim, sum(choice)*graph_dim]\n step = 0\n for i in range(len(self.choice)):\n if self.choice[i] == 1 and step == 0:\n cell_output = output_list[i]\n step += 1\n elif self.choice[i] == 1:\n cell_output = torch.cat((cell_output, output_list[i]), dim=3)\n\n # cell_output = self.jklayer([output_list[0], output_list[1], output_list[2]])\n # cell_output = self.out(cell_output)\n\n # cell_output = torch.reshape(cell_output, (-1, self.pred_len_raw))\n\n return cell_output\n", "from libcity.data import get_dataset\nfrom libcity.utils import get_logger, get_executor, get_model\n\nif __name__ == '__main__':\n config = {\n 'log_level': 'INFO',\n 'input_window': 12,\n 'output_window': 12,\n 'train_rate': 0.7,\n 'eval_rate': 0.1,\n 'cache_dataset': True,\n 'batch_size': 64,\n 'num_workers': 1,\n\n 'evaluator': 'TrafficStateEvaluator',\n 'dataset_class': 'TrafficStatePointDataset',\n 'executor': 'TrafficStateExecutor',\n 'model': 'GWNET',\n\n 'learning_rate': 0.001,\n 'learner': 'adam',\n 'lr_decay': False,\n 'weight_decay': 0.0001,\n 'dropout': 0.3,\n 'max_epoch': 100,\n 'epoch': 0,\n 'max_grad_norm': 5,\n 'clip_grad_norm': True,\n\n 'metrics': ['MAE', 'MSE', 'RMSE', 'MAPE', 'masked_MAE', 'masked_MSE', 'masked_RMSE', 'masked_MAPE', 'R2', 'EVAR'],\n 'gpu': True,\n 'gpu_id': '1',\n 'dataset': 'METR_LA',\n 'weight_col': 'cost',\n 'data_col': ['traffic_speed'],\n 'calculate_weight': True,\n 'add_time_in_day': False,\n 'add_day_in_week': False,\n 'scaler': \"standard\",\n 'use_early_stop': False,\n }\n import os\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = config['gpu_id']\n import torch\n config['device'] = torch.device(\"cuda\" if torch.cuda.is_available() and config['gpu'] else \"cpu\")\n\n logger = get_logger(config)\n dataset = get_dataset(config)\n train_data, valid_data, test_data = dataset.get_data()\n print(len(train_data.dataset), train_data.dataset[0][0].shape, train_data.dataset[0][1].shape,\n train_data.batch_size)\n print(len(valid_data.dataset), valid_data.dataset[0][0].shape, valid_data.dataset[0][1].shape,\n valid_data.batch_size)\n print(len(test_data.dataset), test_data.dataset[0][0].shape, test_data.dataset[0][1].shape, test_data.batch_size)\n\n data_feature = dataset.get_data_feature()\n print(data_feature['adj_mx'].shape)\n print(data_feature['adj_mx'].sum())\n model = get_model(config, data_feature)\n executor = get_executor(config, model)\n executor.train(train_data, valid_data)\n model_cache_file = './libcity/cache/model_cache/' + config['model'] + '_' + config['dataset'] + '.m'\n executor.save_model(model_cache_file)\n executor.load_model(model_cache_file)\n # 评估,评估结果将会放在 cache/evaluate_cache 下\n executor.evaluate(test_data)\n" ]
[ [ "numpy.sum", "torch.nn.init.xavier_uniform_", "torch.mm", "torch.cat", "torch.nn.Dropout", "torch.nn.functional.dropout", "torch.tanh", "torch.nn.functional.leaky_relu", "torch.sigmoid", "torch.device", "torch.Tensor", "torch.tensor", "torch.nn.Conv1d", "torch.reshape", "torch.nn.MultiheadAttention", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.Sequential", "torch.nn.ReLU" ], [ "torch.cuda.is_available" ] ]
Sunnyfred/Atlantic_Hurricane_Simulations
[ "ee5d6d0f975876a01c4a21bebd3089bf3bbb843a" ]
[ "section3_change_pars_for_weak_hurricanes/Source_code_for_extracting_data/source_code_change_Clz/1_Calculate_wind_track.py" ]
[ "import numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nfrom plotly.graph_objs import Scattergeo, Layout\nfrom plotly import offline\nfrom cartopy import config\nimport matplotlib as matplot\nfrom matplotlib.image import imread\nimport cartopy.crs as crs\nimport os\nimport shapely.geometry as sgeom\nfrom cartopy.feature import NaturalEarthFeature\nimport csv\nfrom wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,\n cartopy_ylim, latlon_coords)\nimport json\nfrom math import sin, cos, sqrt, atan2, radians\nimport pickle\n\n\n\n\n\n\n\nmainpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/'\nHurricaneall = ['Gert','Nicole','Joaquin','Cristobal','Ike']\nReal_Hurricane_Data = ['Gert_Real_Track_Time_NOAA.csv',\n 'Nicole_Real_Track_Time_NOAA.csv',\n 'Joaquin_Real_Track_Time_NOAA.csv',\n 'Cristobal_Real_Track_Time_NOAA.csv',\n 'Ike_Real_Track_Time_NOAA.csv']\ndays = [15, 14, 4, 26, 10] # start day\nhours = [-6, -6, -6, -6, -6] # start hour\noutput_interval=6\ngridsize = ['8km','16km']\nswansize = ['swgr8p0', 'swgr16p0']\nprefix = 'WRFSWAN_NoTurb_swdt10_cpdt7200_'\nDirall = ['_swh8_swt14_Clz0p0001',\n '_swh8_swt14_Clz0p01',\n '_swh8_swt14_A1200B4p5C0P11',\n '_swh8_swt14_Clz100p00']\noutputpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/postprocessing_WRFONLY/0_Paper_figures/section3_change_pars_for_weak_winds/source_code_outputs_change_Clz/'\n\n\n# This function returns a list of all wrf files in the directory.\ndef list_files(Dir, ncfiles):\n \tfor f in os.listdir(Dir):\n \t \tif f.startswith('wrfout'):\n \t \t \tncfiles.append(f)\n \treturn (ncfiles)\n\n\n\n\nfor gk in range(len(gridsize)):\n count1=0\n\n for Hurricane in Hurricaneall:\n \n \n\n \n # Initiate the lists that will contain all the necessary data to plot the hurricane's truck.\n Real_Times = []\n Real_Lat = []\n Real_Long =[]\n Real_hour=[]\n Real_day=[]\n real_dic={}\n with open(outputpath+Real_Hurricane_Data[count1]) as f:\n \t reader = csv.reader(f)\n \t # Move to the row containing the row headers. \n \t next (reader)\n \t row_header = next(reader)\n \t # Extract the data necessary to plot the real truck.\n \t for row in reader:\n \t \t Real_Lat.append(float(row[row_header.index('Lat')]))\n \t \t Real_Long.append(float(row[row_header.index('Lon')]))\n \t \t Real_hour.append(int(row[row_header.index('Time - hour')]))\t\n \t \t Real_day.append(int(row[row_header.index('Time - day')]))\t\t\n\n for i in range(len(Real_day)):\n real_dic[Real_day[i]]=[]\n\n for i in range(len(Real_day)):\n real_dic[Real_day[i]].append([Real_hour[i],Real_Lat[i],Real_Long[i]])\n print(real_dic)\n # with open(outputpath+Hurricane+'_track.txt', 'w') as outfile:\n # json.dump(real_dic, outfile)\n \n \n \n \n\n\n \n\n results=[] \n for Dir in Dirall:\n \n \n \n\n print('Current folder is: ')\n Dir_local = mainpath+Hurricane+ '/' +gridsize[gk]+ '/' +prefix+swansize[gk]+Dir\n print(Dir_local)\n #row.append(Hurricane+Dir)\n \n \n simu_dic = {}\n for i in range(len(Real_day)):\n simu_dic[Real_day[i]]=[]\n \n \n day=days[count1]\n hour=hours[count1]\n day_count=0\n # Set the working space>\n os.chdir(Dir_local)\n # initiate the list that will contain all wrf files in Dir directory.\n ncfiles = []\n # Use the list_files function to list all the wrf files in the directory.\n ncfiles = list_files(Dir_local, ncfiles)\n # Sort the ncfiles \n ncfiles = sorted(ncfiles)\n #print (ncfiles)\n # initiate the list that will contain the hurricane-track data\n min_slp = []\n min_lat = []\n min_long = []\n\n for ncfile in ncfiles: \n \n \t #print (ncfile)\n \t ncfile = Dataset(ncfile)\n \t # Get the latitude and longitude data.\n \t LAT = np.array(getvar(ncfile, \"XLAT\"))\n \t latitudes = (LAT[:,0])\n \t LONG = np.array(getvar(ncfile, \"XLONG\")) \n \t longitudes = (LONG[0,:])\n \t # Get the sea level pressure for each wrf output file.\n \t slp2D = getvar(ncfile, \"slp\")\n \t slp = np.array(slp2D)\n \t # Get theindex of the minimum value of pressure.\n \t idx = np.where(slp == np.amin(slp))\n \t #print (idx)\n \t # List the data of the minimum SLP\n \t min_slp.append(np.amin(slp)) \n \t min_lat.append(latitudes[idx[0]])\n \t min_long.append(longitudes[idx[1]])\n \t if day_count > 3:\n \t \t if day==31:\n \t \t \t day=1\n \t \t else:\n \t \t \t day+=1\n \t \t day_count=0\n \t day_count += 1 \n \n \t hour += output_interval\n \t if hour == 24:\n \t \t hour=0 \n \t print(day, hour)\n \t simu_dic[day].append([hour,latitudes[idx[0]].tolist()[0],longitudes[idx[1]].tolist()[0]])\n results.append(simu_dic)\n print(results)\n\n\n with open(outputpath+Hurricane+'_track_'+gridsize[gk]+'.txt', 'w') as outfile: \n json.dump(real_dic, outfile) \n for i in range(len(results)): \n json.dump(results[i], outfile) \n \n pickle.dump( slp2D, open( outputpath+Hurricane+'_'+gridsize[gk]+'.p', \"wb\" ) )\n \n \n count1=count1+1 " ]
[ [ "numpy.array", "numpy.amin" ] ]
ZurMaD/DeblurGANv2
[ "bf8ab7d178ecf32db7eba588ede3f3f121d17470" ]
[ "predict.py" ]
[ "import os\nfrom glob import glob\nfrom typing import Optional\n\nimport cv2\nimport numpy as np\nimport torch\nimport yaml\nfrom fire import Fire\nfrom tqdm import tqdm\n\nfrom aug import get_normalize\nfrom models.networks import get_generator\n\n\nclass Predictor:\n def __init__(self, weights_path: str, model_name: str = ''):\n with open('/content/DeblurGANv2/config/config.yaml') as cfg:\n config = yaml.load(cfg)\n model = get_generator(model_name or config['model'])\n v1=torch.load(weights_path)\n print(v1)\n v2=torch.load(weights_path)['model']\n print(v2)\n model.load_state_dict(torch.load(weights_path)['model'])\n self.model = model.cuda()\n self.model.train(True)\n # GAN inference should be in train mode to use actual stats in norm layers,\n # it's not a bug\n self.normalize_fn = get_normalize()\n\n @staticmethod\n def _array_to_batch(x):\n x = np.transpose(x, (2, 0, 1))\n x = np.expand_dims(x, 0)\n return torch.from_numpy(x)\n\n def _preprocess(self, x: np.ndarray, mask: Optional[np.ndarray]):\n x, _ = self.normalize_fn(x, x)\n if mask is None:\n mask = np.ones_like(x, dtype=np.float32)\n else:\n mask = np.round(mask.astype('float32') / 255)\n\n h, w, _ = x.shape\n block_size = 32\n min_height = (h // block_size + 1) * block_size\n min_width = (w // block_size + 1) * block_size\n\n pad_params = {'mode': 'constant',\n 'constant_values': 0,\n 'pad_width': ((0, min_height - h), (0, min_width - w), (0, 0))\n }\n x = np.pad(x, **pad_params)\n mask = np.pad(mask, **pad_params)\n\n return map(self._array_to_batch, (x, mask)), h, w\n\n @staticmethod\n def _postprocess(x: torch.Tensor) -> np.ndarray:\n x, = x\n x = x.detach().cpu().float().numpy()\n x = (np.transpose(x, (1, 2, 0)) + 1) / 2.0 * 255.0\n return x.astype('uint8')\n\n def __call__(self, img: np.ndarray, mask: Optional[np.ndarray], ignore_mask=True) -> np.ndarray:\n (img, mask), h, w = self._preprocess(img, mask)\n with torch.no_grad():\n inputs = [img.cuda()]\n if not ignore_mask:\n inputs += [mask]\n pred = self.model(*inputs)\n return self._postprocess(pred)[:h, :w, :]\n\ndef process_video(pairs, predictor, output_dir):\n for video_filepath, mask in tqdm(pairs):\n video_filename = os.path.basename(video_filepath)\n output_filepath = os.path.join(output_dir, os.path.splitext(video_filename)[0]+'_deblur.mp4')\n video_in = cv2.VideoCapture(video_filepath)\n fps = video_in.get(cv2.CAP_PROP_FPS)\n width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))\n total_frame_num = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT))\n video_out = cv2.VideoWriter(output_filepath, cv2.VideoWriter_fourcc(*'MP4V'), fps, (width, height))\n tqdm.write(f'process {video_filepath} to {output_filepath}, {fps}fps, resolution: {width}x{height}')\n for frame_num in tqdm(range(total_frame_num), desc=video_filename):\n res, img = video_in.read()\n if not res:\n break\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n pred = predictor(img, mask)\n pred = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)\n video_out.write(pred)\n\ndef main(img_pattern: str,\n mask_pattern: Optional[str] = None,\n weights_path='/content/best_fpn.h5',\n out_dir='/content/submit/',\n side_by_side: bool = False,\n video: bool = False):\n def sorted_glob(pattern):\n return sorted(glob(pattern))\n\n imgs = sorted_glob(img_pattern)\n masks = sorted_glob(mask_pattern) if mask_pattern is not None else [None for _ in imgs]\n pairs = zip(imgs, masks)\n names = sorted([os.path.basename(x) for x in glob(img_pattern)])\n print(weights_path)\n predictor = Predictor(weights_path=weights_path)\n\n os.makedirs(out_dir, exist_ok=True)\n if not video:\n for name, pair in tqdm(zip(names, pairs), total=len(names)):\n f_img, f_mask = pair\n img, mask = map(cv2.imread, (f_img, f_mask))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n pred = predictor(img, mask)\n if side_by_side:\n pred = np.hstack((img, pred))\n pred = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)\n cv2.imwrite(os.path.join(out_dir, name),\n pred)\n else:\n process_video(pairs, predictor, out_dir)\n\n\nif __name__ == '__main__':\n Fire(main)\n" ]
[ [ "numpy.transpose", "torch.load", "torch.no_grad", "numpy.ones_like", "numpy.hstack", "torch.from_numpy", "numpy.expand_dims", "numpy.pad" ] ]
NCcoco/kaggle-project
[ "bff565bcfa8395c87920068557678566631b8d99" ]
[ "Bird-Species/transformer/vision-transformer3.py" ]
[ "import tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow.keras as keras\nimport tensorflow.keras.layers as layers\n\nfrom PIL import Image\nfrom io import BytesIO\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport requests\nimport os\nimport platform\nimport pathlib\nimport random\nimport math\n\n\nbase_path = os.path.abspath(\".\")\ndir_separator = \"/\"\nif platform.system().lower() == 'windows':\n dir_separator = \"\\\\\"\n base_path = base_path[:(base_path.index('Bird-Species'))]\n\n\n# 超参数设置\nnum_classes = 325\nimage_size = 224\npatch_size = 32\nepochs = 30\nbatch_size = 128\nlearning_rate = keras.optimizers.schedules.InverseTimeDecay(\n initial_learning_rate=0.02,\n decay_steps=100,\n decay_rate=0.7\n)\nlearning_rate = 0.002\n\n\n# 准备数据集\ndef load_dataset(batch_size=128):\n train_path = ['Bird-Species', 'datasets', 'train']\n # 获取所有图片地址\n train_dir = base_path + dir_separator.join(train_path)\n # 下面的方式获得一个Path类型的训练图片根路径\n train_root = pathlib.Path(train_dir)\n # # Path类型提供一个glob方法将保存的根路径下所有的文件地址分割为list\n # all_image_paths = list(train_root.glob(\"*/*\"))\n # all_image_paths = [str(path) for path in all_image_paths]\n #\n # random.shuffle(all_image_paths)\n\n train_ds = keras.utils.image_dataset_from_directory(\n train_root,\n image_size=(image_size, image_size),\n batch_size=batch_size\n )\n return train_ds\n\n\n# 加载验证集\ndef load_valid_dataset():\n valid_dir = ['Bird-Species', 'datasets', 'valid']\n valid_dir = base_path + dir_separator.join(valid_dir)\n return __load_dataset(valid_dir)\n\n\ndef __load_dataset(dir, batch_size=64, image_size=(224, 224)):\n data_root = pathlib.Path(dir)\n # 获取所有的图片路径\n all_image_paths = list(data_root.glob('*/*'))\n all_image_paths = [str(path) for path in all_image_paths]\n # 打乱路径list\n random.shuffle(all_image_paths)\n image_count = len(all_image_paths)\n # print(all_image_paths[:10])\n\n # c = np.array(imageio.imread(all_image_paths[0]))\n # plt.imshow(c)\n # plt.show()\n\n train_ds = tf.keras.utils.image_dataset_from_directory(\n data_root,\n image_size=image_size,\n batch_size=batch_size)\n # print(train_ds)\n class_names = train_ds.class_names\n # print(class_names)\n # plt.figure(figsize=(10, 10))\n # for images, labels in train_ds.take(1):\n # for i in range(9):\n # ax = plt.subplot(3, 3, i + 1)\n # plt.imshow(images[i].numpy().astype(\"uint8\"))\n # plt.title(class_names[labels[i]])\n # plt.axis(\"off\")\n # plt.show()\n\n normalization_layer = tf.keras.layers.Rescaling(1. / 255)\n normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))\n\n # train_ds = normalized_ds.cache().prefetch(buffer_size=AUTOTUNE)\n return normalized_ds\n\n\ndef norm_img(image, label):\n image = tf.image.resize(image, size=(224, 224))\n return tf.cast(image, tf.float32) / 255., label\n\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\ntrain_dataset = load_dataset(batch_size)\ntrain_dataset = train_dataset.map(norm_img, num_parallel_calls=AUTOTUNE)\ntrain_dataset = train_dataset.cache()\ntrain_dataset = train_dataset.prefetch(AUTOTUNE)\n\nvalid_dataset = load_valid_dataset()\n\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy()\n\n\n\n\nmodel = tf.keras.Sequential([\n # layers.InputLayer((image_size, image_size, 3)),\n hub.KerasLayer(r\"models\", trainable=False),\n keras.layers.Dense(num_classes, activation=\"softmax\")\n])\n\nmodel.build(input_shape=(None, 224, 224, 3))\nprint(model.summary())\n# model.compile(optimizer='adam',\n# loss=keras.losses.SparseCategoricalCrossentropy(),\n# metrics=['accuracy'])\n\n# model.fit(ds_train, batch_size, epochs)\n\n\ntrain_loss = tf.keras.metrics.Mean(name='train_loss')\ntrain_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\noptimizer = keras.optimizers.Adam(learning_rate=learning_rate)\n\nvalid_loss = tf.keras.metrics.Mean(name='valid_loss')\nvalid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='valid_accuracy')\n\n\n# tf.config.experimental_run_functions_eagerly(True)\[email protected]\ndef train_step(images, labels, optimizer):\n with tf.GradientTape() as tape:\n predictions = model(images, training=True)\n loss_aux = loss_object(y_true=labels, y_pred=predictions)\n loss = 0.5 * loss_aux + 0.5 * loss_object(y_true=labels, y_pred=predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))\n\n train_loss(loss)\n train_accuracy(labels, predictions)\n\n\[email protected]\ndef valid_step(images, labels):\n predictions = model(images, training=False)\n v_loss = loss_object(labels, predictions)\n\n valid_loss(v_loss)\n valid_accuracy(labels, predictions)\n\n\n# start training\nfor epoch in range(epochs):\n train_loss.reset_states()\n train_accuracy.reset_states()\n valid_loss.reset_states()\n valid_accuracy.reset_states()\n step = 0\n for images, labels in train_dataset:\n step += 1\n\n train_step(images, labels, optimizer)\n print(f\"Epoch: {epoch + 1}/{epochs}, \"\n f\"step: {step}/{math.ceil(47332 / batch_size)},\"\n f\"learning_rate: {optimizer.lr.numpy():.7f}\"\n f\" loss: {train_loss.result():.5f},\"\n f\" accuracy: { train_accuracy.result():.5f}\")\n\n for valid_images, valid_labels in valid_dataset:\n valid_step(valid_images, valid_labels)\n\n print(f\"Epoch: {epoch + 1}/{epochs}, \"\n f\"valid loss: {valid_loss.result():.5f}, \"\n f\"valid accuracy: {valid_accuracy.result():.5f}, \")\n\n # 每训练一轮就降低80%\n learning_rate = learning_rate * 0.2\n optimizer.lr = learning_rate\n\n\n# def preprocess_image(image):\n# image = np.array(image)\n# image_resized = tf.image.resize(image, (224, 224))\n# image_resized = tf.cast(image_resized, tf.float32)\n# image_resized = (image_resized - 127.5) / 127.5\n# return tf.expand_dims(image_resized, 0).numpy()\n#\n#\n# def load_image_from_url(url):\n# response = requests.get(url)\n# image = Image.open(BytesIO(response.content))\n# image = preprocess_image(image)\n# return image\n#\n#\n# img_url = \"https://p0.pikrepo.com/preview/853/907/close-up-photo-of-gray-elephant.jpg\"\n# image = load_image_from_url(img_url)\n# #\n# # plt.imshow((image[0] + 1) / 2)\n# # plt.show()\n# predictions = model.predict(image)\n# print(predictions)\n\n# with open(\"models/ilsvrc2012_wordnet_lemmas.txt\", \"r\") as f:\n# lines = f.readlines()\n# imagenet_int_to_str = [line.rstrip() for line in lines]\n#\n# predicted_label = imagenet_int_to_str[int(np.argmax(predictions))]\n# print(predicted_label)\n\n\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.image.resize", "tensorflow.keras.layers.Rescaling", "tensorflow.cast", "tensorflow.GradientTape", "tensorflow.keras.layers.Dense", "tensorflow.keras.optimizers.schedules.InverseTimeDecay", "tensorflow.keras.metrics.Mean", "tensorflow.keras.utils.image_dataset_from_directory" ] ]
kineticengines/text-to-text-transfer-transformer
[ "97cdc174f138e1aa5c189593ed2be77236dcb323" ]
[ "t5/data/preprocessors_test.py" ]
[ "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for from t5.preprocessors.\"\"\"\n\nimport functools\n\nfrom absl.testing import absltest\nimport gin\nfrom t5.data import preprocessors as prep\nfrom t5.data import test_utils\nfrom t5.data import utils\nfrom t5.data.dataset_providers import Feature\nimport tensorflow as tf\n\nmock = absltest.mock\nassert_dataset = test_utils.assert_dataset\n\n\nclass PreprocessorsTest(tf.test.TestCase):\n def test_regular_noise_mask(self):\n length = 800\n span_length = 2\n noise_density = 0.25\n noise_mask = prep.regular_noise_mask(length=length,\n noise_density=noise_density,\n seeds=[(0, 1), (2, 3)],\n min_span_length=span_length,\n max_span_length=span_length)\n num_masked = tf.reduce_sum(tf.cast(noise_mask, tf.int32))\n self.assertEqual(self.evaluate(num_masked), length * noise_density)\n\n def test_random_prefix_noise_mask(self):\n for _ in range(100):\n length = 10\n noise_density = 0.5\n noise_mask = prep.random_prefix_noise_mask(\n length=length, noise_density=noise_density, seeds=[(0, 1)])\n first = noise_mask[0]\n last = noise_mask[-1]\n self.assertTrue(self.evaluate(first))\n self.assertFalse(self.evaluate(last))\n\n def test_random_spans_noise_mask(self):\n length = 32\n noise_density = 0.25\n mean_noise_span_length = 2.0\n # there should be 4 noise spans with a total length of 8.\n noise_mask = prep.random_spans_noise_mask(length, noise_density,\n [(1, 2), (3, 4)],\n mean_noise_span_length)\n output = self.evaluate(tf.cast(noise_mask, tf.int32))\n expected_output = [\n 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 1\n ]\n self.assertAllEqual(output, expected_output)\n\n def test_noise_token_to_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [999, 999, 12, 13, 999, 15]\n output = self.evaluate(\n prep.noise_token_to_sentinel(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_span_to_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [999, 12, 13, 999, 15]\n output = self.evaluate(\n prep.noise_span_to_sentinel(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_nonnoise_span_to_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [10, 11, 999, 14, 999]\n output = self.evaluate(\n prep.nonnoise_span_to_sentinel(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_span_to_unique_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [999, 12, 13, 998, 15]\n output = self.evaluate(\n prep.noise_span_to_unique_sentinel(tokens, noise_mask, vocabulary,\n ()))\n self.assertAllEqual(output, expected_output)\n\n def test_drop_noise_tokens(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [12, 13, 15]\n output = self.evaluate(\n prep.drop_noise_tokens(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_drop_nonnoise_tokens(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [10, 11, 14]\n output = self.evaluate(\n prep.drop_nonnoise_tokens(tokens, noise_mask, vocabulary, ()))\n self.assertAllEqual(output, expected_output)\n\n def test_permute_noise_tokens(self):\n tf.random.set_seed(55)\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [10, 14, 12, 13, 11, 15]\n output = self.evaluate(\n prep.permute_noise_tokens(tokens, noise_mask, vocabulary,\n [(0, 1)]))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_token_to_gathered_token(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [13, 14, 12, 13, 10, 15]\n output = self.evaluate(\n prep.noise_token_to_gathered_token(tokens, noise_mask, vocabulary,\n [(55, 56)]))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_token_to_random_token(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant([10, 11, 12, 13, 14, 15])\n noise_mask = tf.constant([True, True, False, False, True, False])\n expected_output = [961, 553, 12, 13, 60, 15]\n\n output = self.evaluate(\n prep.noise_token_to_random_token(tokens,\n noise_mask,\n vocabulary,\n seeds=[(55, 56)]))\n self.assertAllEqual(output, expected_output)\n\n def test_noise_token_to_random_token_or_sentinel(self):\n vocabulary = test_utils.MockVocabulary({'foo': [10]}, vocab_size=1000)\n tokens = tf.constant(list(range(10)))\n noise_mask = tf.constant(\n [True, True, False, False, True, False, True, True, True, True])\n expected_output = [999, 348, 2, 3, 108, 5, 999, 999, 999, 999]\n output = self.evaluate(\n prep.noise_token_to_random_token_or_sentinel(tokens,\n noise_mask,\n vocabulary,\n seeds=[(55, 56),\n (57, 58)],\n random_prob=0.2))\n self.assertAllEqual(output, expected_output)\n\n def test_rekey(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'text': 'That is good.',\n 'other': 'That is bad.'\n })\n dataset = prep.rekey(og_dataset, {\n 'inputs': 'other',\n 'targets': 'text'\n })\n assert_dataset(dataset, {\n 'inputs': 'That is bad.',\n 'targets': 'That is good.'\n })\n\n dataset = prep.rekey(og_dataset, {'targets': 'text'})\n assert_dataset(dataset, {'targets': 'That is good.'})\n\n dataset = prep.rekey(og_dataset, {'inputs': 'text'})\n assert_dataset(dataset, {'inputs': 'That is good.'})\n\n dataset = prep.rekey(og_dataset)\n assert_dataset(dataset, {\n 'text': 'That is good.',\n 'other': 'That is bad.'\n })\n\n dataset = prep.rekey(og_dataset, {'inputs': 'text', 'targets': None})\n assert_dataset(dataset, {'inputs': 'That is good.', 'targets': ''})\n\n def test_translate(self):\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'en': ['That is good.'],\n 'de': ['Das ist gut.']\n })\n\n dataset = prep.translate(og_dataset, 'en', 'de')\n assert_dataset(\n dataset, {\n 'inputs': 'translate English to German: That is good.',\n 'targets': 'Das ist gut.',\n })\n\n def test_summarize(self):\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'article': ['An article.'],\n 'highlights': ['A summary.']\n })\n\n dataset = prep.summarize(og_dataset, 'article', 'highlights')\n assert_dataset(\n dataset,\n {\n 'inputs': 'summarize: An article.',\n 'targets': 'A summary.'\n },\n )\n\n def assertStringEqual(self, a, b):\n self.assertTrue(tf.equal(a, b), '%s != %s' % (a, b))\n\n def test_pad_punctuation(self):\n self.assertStringEqual(\n ' \" This is a string with \" punctuation ( 1845 - 1986 ) \" . ',\n prep._pad_punctuation(\n '\"This is a string with \"punctuation (1845-1986) \".'))\n\n def test_span_answer(self):\n self.assertStringEqual(\n 'start: 2 end: 3',\n prep._span_answer(tf.constant('Called the Denver Broncos.'),\n tf.constant('Denver Broncos')))\n # Not found.\n self.assertStringEqual(\n '',\n prep._span_answer(tf.constant('Called the Denver Broncos.'),\n tf.constant('Denver Bronscos')))\n\n def test_squad(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'id': 'testid',\n 'context': 'Some context.',\n 'question': 'A question?',\n 'answers': {\n 'text': ['The answer.', 'Another answer.'],\n }\n })\n\n dataset = prep.squad(og_dataset)\n assert_dataset(\n dataset, {\n 'id': 'testid',\n 'inputs': 'question: A question ? context: Some context . ',\n 'targets': 'The answer . ',\n 'context': 'Some context . ',\n 'question': 'A question ? ',\n 'answers': ['The answer . ', 'Another answer . '],\n })\n\n def test_pad_nonspaced_languages(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': ['Hello there. 你好吗?']})\n dataset = prep.pad_nonspaced_languages(dataset)\n assert_dataset(dataset, {\n 'text': 'Hello there. 你 好 吗 ?',\n })\n\n def test_triviaqa(self):\n answers = ['key', 'keys']\n contexts = [\n 'The answer to all questions is the key.',\n 'The answer to all questions are the keys.'\n ]\n og_dataset = tf.data.Dataset.from_tensors({\n 'question': 'What is the answer?',\n 'entity_pages': {\n 'wiki_context': contexts\n },\n 'answer': {\n 'normalized_aliases': answers,\n 'normalized_value': 'key'\n }\n })\n\n dataset = prep.trivia_qa(og_dataset)\n assert_dataset(dataset, [{\n 'inputs':\n 'question: What is the answer ? context: The answer to all questions is the key . ',\n 'targets': 'key'\n }, {\n 'inputs':\n 'question: What is the answer ? context: The answer to all questions are the keys . ',\n 'targets': 'key'\n }, {\n 'inputs':\n 'question: What is the answer ? context: The answer to all questions are the keys . ',\n 'targets': 'keys'\n }])\n\n def test_squad_span_space_tokenized(self):\n answers = ['the answer', 'answer']\n d = tf.data.Dataset.from_tensors(\n {\n 'id': 'a',\n 'context': 'context with the answer.',\n 'question': 'Say what?',\n 'answers': {\n 'text': answers,\n },\n }, )\n og_dataset = d.concatenate(\n tf.data.Dataset.from_tensors(\n { # Filter this out because answer is not in context.\n 'id': 'b',\n 'context': 'context without answers.',\n 'question': 'Say what?',\n 'answers': {\n 'text': answers,\n }\n }))\n\n dataset = prep.squad_span_space_tokenized(og_dataset)\n assert_dataset(\n dataset, {\n 'id': 'a',\n 'inputs':\n 'question: Say what ? context: context with the answer . ',\n 'targets': 'start: 2 end: 3',\n 'context': 'context with the answer . ',\n 'question': 'Say what ? ',\n 'answers': answers,\n })\n\n def test_glue(self):\n test_idx = 10\n input_data = {\n 'q1': 'How so?',\n 'q2': 'Why not?',\n 'q3': 'Who?',\n 'idx': test_idx,\n 'label': 0,\n }\n og_dataset = tf.data.Dataset.from_tensors(input_data)\n benchmark_name = 'qqp'\n label_names = ['not_duplicate', 'duplicate']\n\n dataset = prep.glue(og_dataset, benchmark_name, label_names)\n assert_dataset(\n dataset,\n {\n 'inputs': 'qqp q1: How so? q2: Why not? q3: Who?',\n 'targets': 'not_duplicate',\n 'idx': test_idx,\n },\n )\n\n # Test `feature_names` argument.\n dataset = prep.glue(og_dataset,\n benchmark_name,\n label_names,\n feature_names=['q3', 'q1'])\n assert_dataset(\n dataset,\n {\n 'inputs': 'qqp q3: Who? q1: How so?',\n 'targets': 'not_duplicate',\n 'idx': test_idx,\n },\n )\n\n # Test target is <unk> when label is -1\n input_data['label'] = -1\n og_dataset = tf.data.Dataset.from_tensors(input_data)\n dataset = prep.glue(og_dataset, benchmark_name, label_names)\n assert_dataset(\n dataset,\n {\n 'inputs': 'qqp q1: How so? q2: Why not? q3: Who?',\n 'targets': '<unk>',\n 'idx': test_idx,\n },\n )\n\n # Test id_key argument\n input_data = {\n 'q1': 'How so?',\n 'q2': 'Why not?',\n 'q3': 'Who?',\n 'uid': test_idx,\n 'label': 0,\n }\n og_dataset = tf.data.Dataset.from_tensors(input_data)\n dataset = prep.glue(og_dataset,\n benchmark_name,\n label_names,\n feature_names=['q1', 'q2', 'q3'],\n id_key='uid')\n assert_dataset(\n dataset,\n {\n 'inputs': 'qqp q1: How so? q2: Why not? q3: Who?',\n 'targets': 'not_duplicate',\n 'idx': test_idx,\n },\n )\n\n def test_multirc(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'paragraph':\n '<b>Sent 1: </b>Once upon a time, there was a squirrel named Joey.<br><b>Sent 2: </b>Joey loved to go outside and play with his cousin Jimmy.',\n 'question':\n 'Why was Joey surprised the morning he woke up for breakfast?',\n 'answer': 'There was only pie to eat',\n 'label': 1,\n 'idx': {\n 'paragraph': 5,\n 'question': 1,\n 'answer': 3\n }\n })\n\n dataset = prep.glue(\n og_dataset,\n 'multirc',\n label_names=['False', 'True'],\n feature_names=('question', 'answer', 'paragraph'),\n )\n assert_dataset(\n dataset,\n {\n 'inputs':\n 'multirc question: Why was Joey surprised the morning he woke up for breakfast? answer: There was only pie to eat paragraph: Sent 1: Once upon a time, there was a squirrel named Joey. Sent 2: Joey loved to go outside and play with his cousin Jimmy.',\n 'targets': 'True',\n 'idx/paragraph': 5,\n 'idx/question': 1,\n 'idx/answer': 3,\n },\n )\n\n def test_stsb(self):\n test_idx = 10\n og_dataset = tf.data.Dataset.from_tensors(\n {\n 'sentence1': ['Big news.'],\n 'sentence2': ['No idea.'],\n 'label': [2.8],\n 'idx': test_idx,\n }, )\n\n dataset = prep.stsb(og_dataset)\n assert_dataset(\n dataset,\n {\n 'inputs': 'stsb sentence1: Big news. sentence2: No idea.',\n 'targets': '2.8',\n 'idx': test_idx,\n },\n )\n\n # Test when floating point label is not in [0., 0.2, ..., 4.8, 5.0]\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'sentence1': ['Big news.'],\n 'sentence2': ['No idea.'],\n 'label': [1.66],\n 'idx': [test_idx],\n })\n dataset = prep.stsb(og_dataset)\n assert_dataset(\n dataset,\n {\n 'inputs': 'stsb sentence1: Big news. sentence2: No idea.',\n 'targets': '1.6',\n 'idx': test_idx,\n },\n )\n\n def test_multi_translate(self):\n languages = ['en', 'de', 'fr']\n translations = ['That is good.', 'Das ist gut.', 'Ca c\\'est bon.']\n og_dataset = tf.data.Dataset.from_tensors({\n 'translations': {\n 'language': languages,\n 'translation': translations\n }\n })\n\n dataset = prep.multi_translate(og_dataset, 'en', 'de')\n assert_dataset(\n dataset, {\n 'inputs': 'translate English to German: That is good.',\n 'targets': 'Das ist gut.',\n })\n\n # Test that it skips over the whole (single-entry) dataset when we ask for\n # a language which is not in the language list\n dataset = prep.multi_translate(og_dataset, 'en', 'sk')\n assert_dataset(dataset, [])\n\n def test_fill_in_the_blank(self):\n num_tries = 1000\n original = 'This is a long test with lots of words to see if it works ok.'\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': [original] * num_tries})\n dataset = prep.fill_in_the_blank(dataset)\n for data in test_utils.dataset_as_text(dataset):\n # Remove the prefix from the start of the input string\n self.assertTrue(data['inputs'].startswith('fill: '))\n inp = data['inputs'].replace('fill: ', '')\n # Split output into chunks according to X locations.\n out_split = data['targets'].split('X')\n # Make sure that there is at least one blank\n self.assertGreater(len(out_split), 1)\n # Remove leading/trailing whitespace and any empty chunks\n out_split = [o.strip() for o in out_split if o]\n # Replace 'X' with entries from out_split by popping from the front\n reconstructed = ''.join(\n [i if i != 'X' else out_split.pop(0) for i in inp])\n self.assertEqual(reconstructed, original)\n\n def test_fill_in_the_blank_sized(self):\n def _validate_data(data, valid_bins, og_length=15):\n # Remove the prefix from the start of the input string\n self.assertTrue(data['inputs'].startswith('fill: '))\n inp = data['inputs'].replace('fill: ', '')\n # Split input into chunks according to blank locations.\n inp_split = inp.split('_')\n # Make sure that there is exactly one blank (could be at beginning/end).\n self.assertLen(inp_split, 3)\n # Make sure reconstruction is accurate.\n reconstructed = ''.join([inp_split[0], data['targets']] +\n inp_split[2:])\n self.assertEqual(reconstructed, original)\n # Make sure blank size is correctly chosen.\n blank_bin = int(inp_split[1])\n self.assertIn(blank_bin, valid_bins)\n blank_size = len(data['targets'].split())\n self.assertGreaterEqual(blank_size, min(og_length, valid_bins[0]))\n self.assertLessEqual(blank_size, valid_bins[-1])\n return blank_size, blank_bin\n\n num_tries = 250\n original = 'This is a long test with lots of words to see if it works ok.'\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': [original] * num_tries})\n dataset = prep.fill_in_the_blank_sized(dataset, [1, 4])\n num_outputs = 0\n for data in test_utils.dataset_as_text(dataset):\n blank_size, blank_bin = _validate_data(data, [1, 4])\n if blank_size <= 2:\n self.assertEqual(blank_bin, 1)\n else:\n self.assertEqual(blank_bin, 4)\n num_outputs += 1\n self.assertEqual(num_tries, num_outputs)\n\n # Check case where bin size is larger than text.\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': [original] * num_tries})\n dataset = prep.fill_in_the_blank_sized(dataset, [1024])\n self.assertEmpty(list(test_utils.dataset_as_text(dataset)))\n\n def test_random_split_text(self):\n num_tries = 10\n original = '%s' % list(range(100))\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': [original] * num_tries})\n dataset = prep.random_split_text(dataset)\n out = []\n for data in test_utils.dataset_as_text(dataset):\n out.append(data['text'])\n reconstructed = ' '.join(out)\n ref = ' '.join([original] * num_tries)\n self.assertEqual(reconstructed, ref)\n\n def test_split_tokens(self):\n original = list(range(2, 102))\n og_dataset = tf.data.Dataset.from_tensors({'targets': original})\n\n # Verify splits with no max segments.\n def _verify_split(length, n_expected_outputs):\n ds = prep.split_tokens(og_dataset,\n unused_vocabulary=None,\n max_tokens_per_segment=length)\n outputs = list(test_utils.dataset_as_text(ds))\n self.assertLen(outputs, n_expected_outputs)\n reconstructed = []\n for ex in outputs[:-1]:\n t = ex['targets']\n self.assertLen(t, length)\n reconstructed.extend(t)\n final_t = outputs[-1]['targets']\n self.assertLessEqual(len(final_t), length)\n reconstructed.extend(final_t)\n self.assertEqual(reconstructed, original)\n\n _verify_split(25, 4)\n _verify_split(30, 4)\n _verify_split(100, 1)\n _verify_split(1000, 1)\n\n def test_split_tokens_additional_features_passthrough(self):\n original = list(range(2, 102))\n original_aux = list(range(4, 104))\n original_passthrough = list(range(20))\n og_dataset = tf.data.Dataset.from_tensors({\n 'targets':\n original,\n 'aux':\n original_aux,\n 'passthrough':\n original_passthrough\n })\n\n # Verify splits with no max segments.\n def _verify_split(length, n_expected_outputs):\n ds = prep.split_tokens(og_dataset,\n unused_vocabulary=None,\n max_tokens_per_segment=length,\n additional_feature_keys=['aux'],\n passthrough_feature_keys=['passthrough'])\n outputs = list(test_utils.dataset_as_text(ds))\n self.assertLen(outputs, n_expected_outputs)\n reconstructed = []\n reconstructed_aux = []\n for ex in outputs[:-1]:\n t = ex['targets']\n self.assertLen(t, length)\n reconstructed.extend(t)\n\n a = ex['aux']\n self.assertLen(a, length)\n reconstructed_aux.extend(a)\n final_t = outputs[-1]['targets']\n self.assertLessEqual(len(final_t), length)\n reconstructed.extend(final_t)\n self.assertEqual(reconstructed, original)\n\n final_a = outputs[-1]['aux']\n self.assertLessEqual(len(final_a), length)\n reconstructed_aux.extend(final_a)\n self.assertEqual(reconstructed_aux, original_aux)\n\n for ex in outputs:\n self.assertAllEqual(original_passthrough, ex['passthrough'])\n\n _verify_split(25, 4)\n _verify_split(30, 4)\n _verify_split(100, 1)\n _verify_split(1000, 1)\n\n def test_trim_tokens_at_front(self):\n sequence_length = {'inputs': 4}\n inputs = tf.data.Dataset.from_tensors(\n {'inputs': tf.constant([10, 11, 12, 13, 14, 15])})\n output = prep.trim_tokens_at_front(inputs,\n sequence_length=sequence_length)\n\n expected_output = [{'inputs': tf.constant([13, 14, 15])}]\n test_utils.assert_dataset(output, expected_output)\n\n def test_split_text_to_words(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': ['That good.', 'That.']})\n dataset = prep._split_text_to_words(dataset)\n assert_dataset(dataset, {\n 'text': 'That good.',\n 'words': ['That', 'good.']\n })\n\n def test_definite_pronoun_resolution_simple(self):\n # Test where the pronoun is in the middle of the sentence. Also test the\n # case where the string pronoun is a substring of another word in the\n # sentence.\n og_dataset = tf.data.Dataset.from_tensors({\n 'sentence':\n 'Mitchell asked Tom if he could lend some money.',\n 'pronoun':\n 'he',\n 'candidates': ['Mitchell', 'Tom'],\n 'label':\n 1,\n })\n dataset = prep.definite_pronoun_resolution_simple(og_dataset)\n assert_dataset(\n dataset, {\n 'inputs':\n 'wsc: Mitchell asked Tom if *he* could lend some money.',\n 'targets': 'Tom',\n })\n\n # Test multiple word pronouns. The Definite Pronoun Resolution Dataset is\n # weird.\n og_dataset = tf.data.Dataset.from_tensors({\n 'sentence':\n 'Bill beat Tom at Scrabble because that newbie had all the luck.',\n 'pronoun': 'that newbie',\n 'candidates': ['Bill', 'Tom'],\n 'label': 0,\n })\n dataset = prep.definite_pronoun_resolution_simple(og_dataset)\n assert_dataset(\n dataset, {\n 'inputs':\n 'wsc: Bill beat Tom at Scrabble because *that newbie* had all the luck.',\n 'targets': 'Bill',\n })\n\n # Test pronoun at end of sentence.\n og_dataset = tf.data.Dataset.from_tensors({\n 'sentence':\n 'Carl borrowed a book from Richard, but the book was unreadable to him.',\n 'pronoun':\n 'him',\n 'candidates': ['Carl', 'Richard'],\n 'label':\n 0,\n })\n dataset = prep.definite_pronoun_resolution_simple(og_dataset)\n assert_dataset(\n dataset, {\n 'inputs':\n 'wsc: Carl borrowed a book from Richard, but the book was unreadable to *him*.',\n 'targets': 'Carl',\n })\n\n def test_wsc_simple(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'text': 'Mitchell asked Tom if he could lend some money.',\n 'span1_text': 'Tom',\n 'span2_text': 'he',\n 'span2_index': 4,\n 'idx': 1,\n })\n\n dataset = prep.wsc_simple(og_dataset, correct_referent_only=False)\n assert_dataset(\n dataset, {\n 'inputs':\n 'wsc: Mitchell asked Tom if *he* could lend some money.',\n 'targets': 'Tom',\n 'label': 0,\n 'idx': 1,\n })\n\n # Test including only examples with the correct referent.\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'text': [\n 'Mitchell asked Tom if he could lend some money.',\n 'Mitchell asked Tom if he could lend some money.',\n ],\n 'span1_text': [\n 'Tom',\n 'Mitchell',\n ],\n 'span2_text': [\n 'he',\n 'he',\n ],\n 'span2_index': [4, 4],\n 'label': [1, 0],\n 'idx': [1, 2]\n })\n dataset = prep.wsc_simple(og_dataset, correct_referent_only=True)\n assert_dataset(dataset, [{\n 'inputs': 'wsc: Mitchell asked Tom if *he* could lend some money.',\n 'targets': 'Tom',\n 'label': True,\n 'idx': 1,\n }])\n\n def test_wnli_simple(self):\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'sentence1': [\n 'Lily spoke to Donna breaking her silence.',\n 'The fish ate the worm. It was tasty.',\n 'Edward dropped adhesive tape onto his window sill, and when he pulled the tape off, some of the glue was stuck on it.',\n \"Al stole Bob's wallet and car, and then he was driving it really fast to get away.\",\n ],\n 'sentence2': [\n \"Lily spoke to Donna breaking Donna's silence.\",\n 'The worm was tasty.',\n 'Some of the glue was stuck on the window sill.',\n 'He was driving the car really fast to get away.',\n ],\n 'idx': [1, 2, 3, 4],\n 'label': [1, 0, 0, 1],\n })\n dataset = prep.wnli_simple(og_dataset)\n assert_dataset(dataset, [\n {\n 'inputs': 'wsc: Lily spoke to Donna breaking *her* silence.',\n 'targets': 'Donna',\n 'premise': 'Lily spoke to Donna breaking her silence.',\n 'hypothesis': \"Lily spoke to Donna breaking Donna's silence.\",\n 'label': 1,\n 'idx': 1,\n },\n {\n 'inputs': 'wsc: The fish ate the worm. *It* was tasty.',\n 'targets': 'The worm',\n 'premise': 'The fish ate the worm. It was tasty.',\n 'hypothesis': 'The worm was tasty.',\n 'label': 0,\n 'idx': 2,\n },\n {\n 'inputs':\n 'wsc: Edward dropped adhesive tape onto his window sill, and when he pulled the tape off, some of the glue was stuck on *it* .',\n 'targets': 'the window sill',\n 'premise':\n 'Edward dropped adhesive tape onto his window sill, and when he pulled the tape off, some of the glue was stuck on it.',\n 'hypothesis': 'Some of the glue was stuck on the window sill.',\n 'label': 0,\n 'idx': 3,\n },\n {\n 'inputs':\n \"wsc: Al stole Bob's wallet and car, and then he was driving *it* really fast to get away.\",\n 'targets': 'the car',\n 'premise':\n \"Al stole Bob's wallet and car, and then he was driving it really fast to get away.\",\n 'hypothesis':\n 'He was driving the car really fast to get away.',\n 'label': 1,\n 'idx': 4,\n },\n ])\n\n def test_next_sentence_prediction(self):\n\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'text': [\n 'This is the first sentence. This is the second sentence.',\n 'This is the third sentence. This is the fourth sentence.',\n ]\n })\n\n # Test neighboring sentences.\n dataset = prep.next_sentence_prediction(og_dataset,\n label_sentences=False,\n p_neighbors=1,\n buffer_size=1)\n assert_dataset(\n dataset,\n [\n {\n 'inputs':\n 'nsp: This is the first sentence. This is the second sentence.',\n 'targets': 'next',\n },\n {\n 'inputs':\n 'nsp: This is the third sentence. This is the fourth sentence.',\n 'targets': 'next',\n },\n ],\n )\n\n # Test non-neighboring sentences.\n dataset = prep.next_sentence_prediction(og_dataset,\n label_sentences=False,\n p_neighbors=0,\n buffer_size=1)\n assert_dataset(\n dataset,\n [\n {\n 'inputs':\n 'nsp: This is the first sentence. This is the fourth sentence.',\n 'targets': 'not_next',\n },\n {\n 'inputs':\n 'nsp: This is the third sentence. This is the second sentence.',\n 'targets': 'not_next',\n },\n ],\n )\n\n # Test labeling sentences.\n dataset = prep.next_sentence_prediction(og_dataset,\n label_sentences=True,\n p_neighbors=1,\n buffer_size=1)\n assert_dataset(\n dataset,\n [\n {\n 'inputs':\n 'nsp: sentence1: This is the first sentence. sentence2: This is the second sentence.',\n 'targets': 'next',\n },\n {\n 'inputs':\n 'nsp: sentence1: This is the third sentence. sentence2: This is the fourth sentence.',\n 'targets': 'next',\n },\n ],\n )\n\n def test_lm(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n {'text': ['That is good.']})\n dataset = prep.lm(dataset)\n assert_dataset(dataset, {'inputs': '', 'targets': 'That is good.'})\n\n def test_triviaqa_truncate_text(self):\n\n vocab = test_utils.sentencepiece_vocab()\n\n def tokenize_and_prepare_dataset(inputs, targets):\n tokenized_inputs = vocab.encode(inputs)\n tokenized_targets = vocab.encode(targets)\n\n dataset = tf.data.Dataset.from_tensors({\n 'inputs':\n tokenized_inputs,\n 'targets':\n tokenized_targets,\n })\n\n return dataset, tokenized_targets\n\n inputs = 'This is a very very long string which must contain the answer.'\n targets = 'long string'\n\n og_dataset, tokenized_targets = tokenize_and_prepare_dataset(\n inputs, targets)\n\n for _ in range(0, 10):\n dataset = prep.trivia_qa_truncate_inputs(\n og_dataset,\n output_features=None,\n sequence_length={'inputs': 20})\n\n for data in test_utils.dataset_as_text(dataset):\n self.assertLen(data['inputs'], 20)\n self.assertContainsSubset(tokenized_targets, data['inputs'])\n\n # Dummy input which exists in the vocab to be able to compare strings after\n # decoding.\n inputs = 'w h d n r t v'\n targets = 'h d'\n\n og_dataset, _ = tokenize_and_prepare_dataset(inputs, targets)\n\n for _ in range(0, 5):\n dataset = prep.trivia_qa_truncate_inputs(\n og_dataset,\n output_features=None,\n sequence_length={'inputs': 5})\n\n for data in test_utils.dataset_as_text(dataset):\n self.assertLen(data['inputs'], 5)\n truncated_inputs = vocab.decode(data['inputs'].tolist())\n new_targets = vocab.decode(data['targets'].tolist())\n self.assertRegex(truncated_inputs, '.*' + targets + '.*')\n self.assertEqual(targets, new_targets)\n\n def test_triviaqa_truncate(self):\n\n sequence_length = {\n 'inputs': 10,\n }\n\n # Answer starts from the 0th position of the inputs.\n dataset = tf.data.Dataset.from_tensors({\n 'inputs': tf.range(0, 30),\n 'targets': tf.range(0, 5)\n })\n\n dataset = prep.trivia_qa_truncate_inputs(\n dataset, output_features=None, sequence_length=sequence_length)\n\n assert_dataset(dataset, {\n 'inputs': tf.range(0, 10),\n 'targets': tf.range(0, 5)\n })\n\n # Answer is in the last n elements of the targets.\n dataset = tf.data.Dataset.from_tensors({\n 'inputs': tf.range(0, 30),\n 'targets': tf.range(27, 30)\n })\n\n dataset = prep.trivia_qa_truncate_inputs(\n dataset, output_features=None, sequence_length=sequence_length)\n\n assert_dataset(dataset, {\n 'inputs': tf.range(20, 30),\n 'targets': tf.range(27, 30)\n })\n\n # Answer is not in inputs. Example is droped from the dataset.\n no_overlap_dataset = tf.data.Dataset.from_tensors({\n 'inputs':\n tf.range(0, 30),\n 'targets':\n tf.range(27, 32)\n })\n\n dataset = prep.trivia_qa_truncate_inputs(\n no_overlap_dataset,\n output_features=None,\n sequence_length=sequence_length)\n\n i = 0\n for data in test_utils.dataset_as_text(dataset):\n i = i + 1\n\n self.assertEqual(i, 0)\n\n # Answer is in the middle of the inputs.\n for _ in range(0, 10):\n og_dataset = tf.data.Dataset.from_tensors({\n 'inputs':\n tf.range(0, 30),\n 'targets':\n tf.range(10, 15),\n })\n\n dataset = prep.trivia_qa_truncate_inputs(\n og_dataset,\n output_features=None,\n sequence_length=sequence_length)\n for data in test_utils.dataset_as_text(dataset):\n self.assertContainsSubset(data['targets'], data['inputs'])\n self.assertLen(data['inputs'], 10)\n\n def test_record(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'query':\n 'It was @placeholder.',\n 'entities': ['A', 'B', 'C'],\n 'passage': [\n 'This is the passage\\n@highlight\\nAnother sentence.\\n@highlight\\nThird sentence.'\n ],\n 'answers': ['A', 'C'],\n 'idx': {\n 'passage': 1,\n 'query': 2,\n },\n })\n\n dataset = prep.record(og_dataset)\n assert_dataset(dataset, [\n {\n 'inputs':\n 'record query: It was @placeholder. entities: A, B, C passage: This is the passage. Another sentence. Third sentence.',\n 'targets': 'A',\n 'idx/passage': 1,\n 'idx/query': 2,\n 'answers': ['A', 'C'],\n },\n {\n 'inputs':\n 'record query: It was @placeholder. entities: A, B, C passage: This is the passage. Another sentence. Third sentence.',\n 'targets': 'C',\n 'idx/passage': 1,\n 'idx/query': 2,\n 'answers': ['A', 'C'],\n },\n ])\n\n # Test a dataset without answers, as would appear in the test set\n og_test_dataset = tf.data.Dataset.from_tensors({\n 'query':\n 'It was @placeholder.',\n 'entities': ['A', 'B', 'C'],\n 'passage': [\n 'This is the passage\\n@highlight\\nAnother sentence.\\n@highlight\\nThird sentence.'\n ],\n 'answers':\n tf.constant([], dtype=tf.string),\n 'idx': {\n 'passage': 1,\n 'query': 2,\n },\n })\n\n # Test all answers.\n dataset = prep.record(og_test_dataset)\n assert_dataset(dataset, [\n {\n 'inputs':\n 'record query: It was @placeholder. entities: A, B, C passage: This is the passage. Another sentence. Third sentence.',\n 'targets': '<unk>',\n 'idx/passage': 1,\n 'idx/query': 2,\n 'answers': []\n },\n ])\n\n def test_take(self):\n og_dataset = tf.data.Dataset.from_tensor_slices({'inputs': [1] * 100})\n dataset = prep.take(og_dataset, 5)\n assert_dataset(dataset, [{'inputs': 1} for _ in range(5)])\n dataset = prep.take(og_dataset, -1)\n assert_dataset(dataset, [{'inputs': 1} for _ in range(100)])\n\n def test_parse_tsv(self):\n og_dataset = tf.data.Dataset.from_tensor_slices(['a\\tb', 'c\\td'])\n dataset = prep.parse_tsv(og_dataset, field_names=['f1', 'f2'])\n assert_dataset(dataset, [{\n 'f1': 'a',\n 'f2': 'b'\n }, {\n 'f1': 'c',\n 'f2': 'd'\n }])\n\n def test_tokenize(self):\n og_dataset = tf.data.Dataset.from_tensors({\n 'prefix': 'This is',\n 'suffix': 'a test.'\n })\n output_features = {\n 'prefix': Feature(test_utils.MockVocabulary({'This is': [0, 1]})),\n 'suffix': Feature(test_utils.MockVocabulary({'a test.': [2, 3]})),\n }\n\n assert_dataset(\n prep.tokenize(og_dataset, output_features=output_features), {\n 'prefix': [0, 1],\n 'prefix_plaintext': 'This is',\n 'suffix': [2, 3],\n 'suffix_plaintext': 'a test.'\n })\n assert_dataset(\n prep.tokenize(og_dataset,\n output_features=output_features,\n copy_plaintext=False), {\n 'prefix': [0, 1],\n 'suffix': [2, 3]\n })\n\n def test_denoise(self):\n vocab = test_utils.sentencepiece_vocab()\n target_tokens = vocab.encode('The quick brown fox.')\n\n # This is what it encodes to.\n self.assertEqual(\n target_tokens,\n [3, 2, 20, 4, 3, 2, 8, 13, 2, 3, 2, 23, 7, 19, 22, 3, 2, 7, 2])\n\n og_dataset = tf.data.Dataset.from_tensor_slices({\n 'targets': [target_tokens],\n })\n\n output_features = {\n 'targets': Feature(vocab),\n }\n\n # These are the parameters of denoise in the operative config of 'base'.\n # Except noise_density, bumped up from 0.15 to 0.3 in order to demonstrate\n # multiple corrupted spans.\n with utils.map_seed_manager(42):\n denoised_dataset = prep.denoise(\n og_dataset,\n output_features,\n noise_density=0.3,\n noise_mask_fn=prep.random_spans_noise_mask,\n inputs_fn=prep.noise_span_to_unique_sentinel,\n targets_fn=prep.nonnoise_span_to_unique_sentinel)\n\n # Two spans corrupted, [2] and [22, 3, 2, 7, 2], replaced by unique\n # sentinels 25 and 24 respectively.\n assert_dataset(denoised_dataset, [\n {\n 'inputs': [3, 2, 20, 4, 25, 2, 8, 13, 2, 3, 2, 23, 7, 19, 24],\n 'targets': [25, 3, 24, 22, 3, 2, 7, 2],\n },\n ])\n\n def test_denoise_nested_decorators(self):\n \"\"\"Test whether gin and utils.map_over_dataset decorators are compatible.\"\"\"\n bindings = \"\"\"\n preprocessors.unsupervised.preprocessors = [@preprocessors.denoise]\n preprocessors.denoise.noise_density = 0.15\n preprocessors.denoise.noise_mask_fn = @preprocessors.iid_noise_mask\n preprocessors.denoise.inputs_fn = @noise_token_to_sentinel\n \"\"\"\n gin.parse_config(bindings)\n og_dataset = tf.data.Dataset.from_tensor_slices({'targets': [1, 2, 3]})\n output_features = {\n 'targets': Feature(test_utils.sentencepiece_vocab())\n }\n # Test denoise function when it is used as a gin-configurable of another\n # gin-configurable, prep.unsupervised.\n dataset = prep.unsupervised(og_dataset,\n output_features=output_features)\n self.assertIsInstance(dataset, tf.data.Dataset)\n\n def test_prefix_lm(self):\n vocab = test_utils.sentencepiece_vocab()\n inp = list(range(1, 101))\n og_dataset = tf.data.Dataset.from_tensor_slices({'targets': [inp]})\n og_dataset = og_dataset.repeat(100)\n output_features = {'targets': Feature(vocab)}\n output_dataset = prep.prefix_lm(\n og_dataset,\n {\n 'inputs': 100,\n 'targets': 100\n },\n output_features,\n )\n input_lengths = set()\n for ex in output_dataset.as_numpy_iterator():\n self.assertListEqual(\n ex['inputs'].tolist() + ex['targets'].tolist(), inp)\n input_lengths.add(len(ex['inputs']))\n self.assertGreater(len(input_lengths), 1)\n\n def test_rank_classification(self):\n dataset = tf.data.Dataset.from_tensors({\n 'left': 'the sky is blue',\n 'right': 'cats are so cute',\n 'label_idx': 1,\n })\n preprocessor = functools.partial(\n prep.rank_classification,\n dataset,\n inputs_fn=lambda features: [features['right'], features['left']],\n targets_fn=lambda features: ['class 0', 'class 1'],\n label_key='label_idx')\n\n test_utils.assert_dataset(preprocessor(mode='train'),\n [{\n 'idx': 0,\n 'inputs': 'the sky is blue',\n 'targets': 'class 1',\n 'is_correct': True,\n }])\n\n test_utils.assert_dataset(preprocessor(mode='eval'),\n [{\n 'idx': 0,\n 'inputs': 'cats are so cute',\n 'targets': 'class 0',\n 'is_correct': False,\n }, {\n 'idx': 0,\n 'inputs': 'the sky is blue',\n 'targets': 'class 1',\n 'is_correct': True,\n }])\n\n test_utils.assert_dataset(preprocessor(mode='fewshot_eval'), [\n {\n 'idx': [0, 0],\n 'inputs': ['cats are so cute', 'the sky is blue'],\n 'targets': ['class 0', 'class 1'],\n 'is_correct': [False, True]\n },\n ])\n\n def test_rank_classification_multilabel(self):\n dataset = tf.data.Dataset.from_tensors({\n 'left': 'the sky is blue',\n 'right': 'cats are so cute',\n 'label_idx': [1, 2],\n })\n\n preprocessor = functools.partial(\n prep.rank_classification,\n dataset,\n inputs_fn=lambda features:\n [features['right'], features['left'], 'X'],\n targets_fn=lambda features: ['class 0', 'class 1', 'class 2'],\n label_key='label_idx')\n\n test_utils.assert_dataset(preprocessor(mode='train'), [\n {\n 'idx': 0,\n 'inputs': 'the sky is blue',\n 'targets': 'class 1',\n 'is_correct': True,\n },\n {\n 'idx': 0,\n 'inputs': 'X',\n 'targets': 'class 2',\n 'is_correct': True,\n },\n ])\n\n test_utils.assert_dataset(preprocessor(mode='eval'), [\n {\n 'idx': 0,\n 'inputs': 'cats are so cute',\n 'targets': 'class 0',\n 'is_correct': False,\n },\n {\n 'idx': 0,\n 'inputs': 'the sky is blue',\n 'targets': 'class 1',\n 'is_correct': True,\n },\n {\n 'idx': 0,\n 'inputs': 'X',\n 'targets': 'class 2',\n 'is_correct': True,\n },\n ])\n\n test_utils.assert_dataset(preprocessor(mode='fewshot_eval'), [\n {\n 'idx': [0, 0, 0],\n 'inputs': ['cats are so cute', 'the sky is blue', 'X'],\n 'targets': ['class 0', 'class 1', 'class 2'],\n 'is_correct': [False, True, True]\n },\n ])\n\n def test_rank_classification_errors(self):\n dataset = tf.data.Dataset.from_tensors({\n 'left': 'the sky is blue',\n 'right': 'cats are so cute',\n 'label': [0, 2],\n })\n\n with self.assertRaisesRegex(\n tf.errors.InvalidArgumentError,\n '.*`inputs_fn` and `targets_fn` must return the same size tensors.*'\n ):\n list(\n prep.rank_classification(\n dataset,\n inputs_fn=lambda features: tf.stack([features['right']]),\n targets_fn=lambda features: tf.stack(\n ['class 0', 'class 1'])))\n\n with self.assertRaisesRegex(\n tf.errors.InvalidArgumentError,\n 'Label values must be less than the number of classes.'):\n list(\n prep.rank_classification(\n dataset,\n inputs_fn=lambda features: tf.stack(\n [features['right'], features['left']]),\n targets_fn=lambda features: tf.stack(\n ['class 0', 'class 1'])))\n\n def test_rank_classification_formatter(self):\n input_examples = [\n {\n 'premise': 'The farmland needed irrigation.',\n 'question': 'effect',\n 'choice1': 'a canal was constructed',\n 'choice2': 'the crops grew tall',\n 'label': 0,\n },\n {\n 'premise': 'I decided to stay home last night.',\n 'question': 'cause',\n 'choice1': 'I wanted to see people',\n 'choice2': 'I was too tired',\n 'label': 1,\n },\n ]\n\n input_ds = tf.data.Dataset.from_generator(lambda:\n (x for x in input_examples),\n output_types={\n 'premise': tf.string,\n 'question': tf.string,\n 'choice1': tf.string,\n 'choice2': tf.string,\n 'label': tf.int32,\n },\n output_shapes={\n 'premise': [],\n 'question': [],\n 'choice1': [],\n 'choice2': [],\n 'label': [],\n })\n\n # all options\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{premise} What is the {question}? X',\n targets_formats=['I think {choice1}.', 'I think {choice2}.'],\n mode='eval')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx': 0,\n 'inputs':\n 'The farmland needed irrigation. What is the effect? X',\n 'targets': 'I think a canal was constructed.',\n 'is_correct': True\n },\n {\n 'idx': 0,\n 'inputs':\n 'The farmland needed irrigation. What is the effect? X',\n 'targets': 'I think the crops grew tall.',\n 'is_correct': False\n },\n {\n 'idx': 1,\n 'inputs':\n 'I decided to stay home last night. What is the cause? X',\n 'targets': 'I think I wanted to see people.',\n 'is_correct': False\n },\n {\n 'idx': 1,\n 'inputs':\n 'I decided to stay home last night. What is the cause? X',\n 'targets': 'I think I was too tired.',\n 'is_correct': True\n },\n ])\n\n # Reverse inputs and targets for supporting the use case when there is\n # one target, but multiple inputs to select from.\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats=['I think {choice1}.', 'I think {choice2}.'],\n targets_formats='{premise} What is the {question}? X',\n mode='eval')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx': 0,\n 'targets':\n 'The farmland needed irrigation. What is the effect? X',\n 'inputs': 'I think a canal was constructed.',\n 'is_correct': True\n },\n {\n 'idx': 0,\n 'targets':\n 'The farmland needed irrigation. What is the effect? X',\n 'inputs': 'I think the crops grew tall.',\n 'is_correct': False\n },\n {\n 'idx': 1,\n 'targets':\n 'I decided to stay home last night. What is the cause? X',\n 'inputs': 'I think I wanted to see people.',\n 'is_correct': False\n },\n {\n 'idx': 1,\n 'targets':\n 'I decided to stay home last night. What is the cause? X',\n 'inputs': 'I think I was too tired.',\n 'is_correct': True\n },\n ])\n\n # train mode\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{premise} What is the {question}? X',\n targets_formats=['I think {choice1}.', 'I think {choice2}.'],\n mode='train')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx': 0,\n 'inputs':\n 'The farmland needed irrigation. What is the effect? X',\n 'targets': 'I think a canal was constructed.',\n 'is_correct': True\n },\n {\n 'idx': 1,\n 'inputs':\n 'I decided to stay home last night. What is the cause? X',\n 'targets': 'I think I was too tired.',\n 'is_correct': True\n },\n ])\n\n # fewshot_eval mode\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{premise} What is the {question}? X',\n targets_formats=['I think {choice1}.', 'I think {choice2}.'],\n mode='fewshot_eval')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx': [0, 0],\n 'inputs': [\n 'The farmland needed irrigation. What is the effect? X',\n 'The farmland needed irrigation. What is the effect? X',\n ],\n 'targets': [\n 'I think a canal was constructed.',\n 'I think the crops grew tall.',\n ],\n 'is_correct': [True, False]\n },\n {\n 'idx': [1, 1],\n 'inputs': [\n 'I decided to stay home last night. What is the cause? X',\n 'I decided to stay home last night. What is the cause? X',\n ],\n 'targets': [\n 'I think I wanted to see people.',\n 'I think I was too tired.',\n ],\n 'is_correct': [False, True]\n },\n ])\n\n def test_nested_key_rank_classification_formatter(self):\n input_ds = tf.data.Dataset.from_tensors({\n 'answerKey': 0,\n 'fact1': 'creating paper requires cutting down trees',\n 'question': {\n 'choice_A': 'forests',\n 'choice_B': 'canyons',\n 'sub_question': {\n 'stem': 'What is the ultimate source of greeting cards?'\n }\n }\n })\n\n dataset = prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{fact1}. {question/sub_question/stem} X 0',\n targets_formats=[\n 'Correct Answer: {question/choice_A} X 1 Incorrect Answer: '\n '{question/choice_B} X 1',\n 'Correct Answer: {question/choice_B} X 1 Incorrect Answer: '\n '{question/choice_A} X 1',\n ],\n mode='eval',\n label_key='answerKey')\n\n test_utils.assert_dataset(dataset, [\n {\n 'idx':\n 0,\n 'inputs':\n 'creating paper requires cutting down trees. What is the '\n 'ultimate source of greeting cards? X 0',\n 'targets':\n 'Correct Answer: forests X 1 Incorrect Answer: canyons X 1',\n 'is_correct':\n True,\n },\n {\n 'idx':\n 0,\n 'inputs':\n 'creating paper requires cutting down trees. What is the '\n 'ultimate source of greeting cards? X 0',\n 'targets':\n 'Correct Answer: canyons X 1 Incorrect Answer: forests X 1',\n 'is_correct':\n False,\n },\n ])\n\n with self.assertRaisesRegex(\n ValueError,\n 'Final value of key \\'question/sub_question\\' must be a tf.string. '\n 'Got: dict'):\n prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{fact1}. {question/sub_question} X 0',\n targets_formats=['test1', 'test2'],\n mode='eval',\n label_key='answerKey')\n\n with self.assertRaises(TypeError):\n prep.rank_classification_formatter(\n input_ds,\n inputs_formats='{fact1}. {answerKey} X 0',\n targets_formats=['test1', 'test2'],\n mode='eval',\n label_key='answerKey')\n\n def test_select_random_chunk(self):\n dataset = tf.data.Dataset.from_tensors({\n 'targets': [0, 1, 2, 3],\n 'inputs': [4, 5, 6, 7]\n })\n dataset = prep.select_random_chunk(dataset,\n feature_key='targets',\n max_length=4)\n output = list(dataset.as_numpy_iterator())\n self.assertEqual(1, len(output))\n output = output[0]\n self.assertSequenceEqual(['targets'], list(output.keys()))\n self.assertGreater(len(output['targets']), 0)\n\n def test_select_random_chunk_uniform_start(self):\n dataset = tf.data.Dataset.from_tensors({\n 'targets': [0, 1, 2, 3],\n 'inputs': [4, 5, 6, 7]\n })\n dataset = prep.select_random_chunk(dataset,\n feature_key='targets',\n max_length=4,\n uniform_random_start=True)\n output = list(dataset.as_numpy_iterator())\n self.assertEqual(1, len(output))\n output = output[0]\n self.assertSequenceEqual(['targets'], list(output.keys()))\n self.assertGreater(len(output['targets']), 0)\n\n def test_select_random_chunk_additional_features(self):\n dataset = tf.data.Dataset.from_tensors({\n 'targets': [0, 1, 2, 3],\n 'inputs': [4, 5, 6, 7]\n })\n dataset = prep.select_random_chunk(dataset,\n feature_key='targets',\n additional_feature_keys=['inputs'],\n max_length=3)\n output = list(dataset.as_numpy_iterator())\n self.assertEqual(1, len(output))\n output = output[0]\n self.assertSequenceEqual(['inputs', 'targets'],\n sorted(list(output.keys())))\n self.assertAllEqual(output['inputs'] - 4, output['targets'])\n\n def test_select_random_chunk_different_sizes(self):\n dataset = tf.data.Dataset.from_tensors({\n 'targets': [0, 1, 2, 3],\n 'inputs': [4, 5]\n })\n with self.assertRaises(tf.errors.InvalidArgumentError):\n prep.select_random_chunk(dataset,\n feature_key='targets',\n additional_feature_keys=['inputs'],\n max_length=4)\n\n\nif __name__ == '__main__':\n absltest.main()\n" ]
[ [ "tensorflow.equal", "tensorflow.stack", "tensorflow.range", "tensorflow.data.Dataset.from_tensors", "tensorflow.cast", "tensorflow.data.Dataset.from_generator", "tensorflow.constant", "tensorflow.random.set_seed", "tensorflow.data.Dataset.from_tensor_slices" ] ]
Xiaoxiong-Liu/gluon-ts
[ "097c492769258dd70b7f223f826b17b0051ceee9" ]
[ "src/gluonts/nursery/spliced_binned_pareto/tcn.py" ]
[ "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Implementation taken and modified from\n# https://github.com/White-Link/UnsupervisedScalableRepresentationLearningTimeSeries, which was created\n# with the following license.\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\n# Implementation of causal CNNs partly taken and modified from\n# https://github.com/locuslab/TCN/blob/master/TCN/tcn.py, originally created\n# with the following license.\n\n# MIT License\n\n# Copyright (c) 2018 CMU Locus Lab\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport torch\n\n\nclass Chomp1d(torch.nn.Module):\n \"\"\"Removes leading or trailing elements of a time series.\n\n Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n batch size, `C` is the number of input channels, and `L` is the length of\n the input. Outputs a three-dimensional tensor (`B`, `C`, `L - s`) where `s`\n is the number of elements to remove.\n\n Args:\n chomp_size : Number of elements to remove.\n last : If True, removes the last elements in the time dimension,\n If False, removes the fist elements.\n \"\"\"\n\n def __init__(self, chomp_size: int, last: bool = True):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n self.last = last\n\n def forward(self, x):\n if self.last:\n x_chomped = x[:, :, : -self.chomp_size]\n else:\n x_chomped = x[:, :, self.chomp_size :]\n\n return x_chomped\n\n\nclass TCNBlock(torch.nn.Module):\n \"\"\"Temporal Convolutional Network block.\n\n Composed sequentially of two causal convolutions (with leaky ReLU activation functions),\n and a parallel residual connection.\n\n Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n batch size, `C` is the number of input channels, and `L` is the length of\n the input. Outputs a three-dimensional tensor (`B`, `C`, `L`).\n\n Args:\n in_channels : Number of input channels.\n out_channels : Number of output channels.\n kernel_size : Kernel size of the applied non-residual convolutions.\n dilation : Dilation parameter of non-residual convolutions.\n bias : If True, adds a learnable bias to the convolutions.\n fwd_time : If True, the network \"causal\" direction is from past to future (forward),\n if False, the relation is from future to past (backward).\n final : If True, the last activation function is disabled.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n dilation: int,\n bias: bool = True,\n fwd_time: bool = True,\n final: bool = False,\n ):\n\n super(TCNBlock, self).__init__()\n\n in_channels = int(in_channels)\n kernel_size = int(kernel_size)\n out_channels = int(out_channels)\n dilation = int(dilation)\n\n # Computes left padding so that the applied convolutions are causal\n padding = int((kernel_size - 1) * dilation)\n\n # First causal convolution\n conv1_pre = torch.nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n bias=bias,\n )\n conv1 = torch.nn.utils.weight_norm(conv1_pre)\n\n # The truncation makes the convolution causal\n chomp1 = Chomp1d(chomp_size=padding, last=fwd_time)\n\n relu1 = torch.nn.LeakyReLU()\n\n # Second causal convolution\n conv2_pre = torch.nn.Conv1d(\n in_channels=out_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n padding=padding,\n dilation=dilation,\n bias=bias,\n )\n conv2 = torch.nn.utils.weight_norm(conv2_pre)\n chomp2 = Chomp1d(padding)\n relu2 = torch.nn.LeakyReLU()\n\n # Causal network\n self.causal = torch.nn.Sequential(\n conv1, chomp1, relu1, conv2, chomp2, relu2\n )\n\n # Residual connection\n self.upordownsample = (\n torch.nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n )\n if in_channels != out_channels\n else None\n )\n\n # Final activation function\n self.activation = torch.nn.LeakyReLU() if final else None\n\n def forward(self, x):\n out_causal = self.causal(x)\n res = x if self.upordownsample is None else self.upordownsample(x)\n if self.activation is None:\n return out_causal + res\n else:\n return self.activation(out_causal + res)\n\n\nclass TCN(torch.nn.Module):\n \"\"\"Temporal Convolutional Network.\n\n Composed of a sequence of causal convolution blocks.\n\n Takes as input a three-dimensional tensor (`B`, `C`, `L`) where `B` is the\n batch size, `C` is the number of input channels, and `L` is the length of\n the input. Outputs a three-dimensional tensor (`B`, `C_out`, `L`).\n\n Args:\n in_channels : Number of input channels.\n out_channels : Number of output channels.\n kernel_size : Kernel size of the applied non-residual convolutions.\n channels : Number of channels processed in the network and of output\n channels.\n layers : Depth of the network.\n bias : If True, adds a learnable bias to the convolutions.\n fwd_time : If True the network is the relation relation if from past to future (forward),\n if False, the relation from future to past (backward).\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n channels: int,\n layers: int,\n bias: bool = True,\n fwd_time: bool = True,\n ):\n\n super(TCN, self).__init__()\n\n layers = int(layers)\n\n net_layers = [] # List of sequential TCN blocks\n dilation_size = 1 # Initial dilation size\n\n for i in range(layers):\n in_channels_block = in_channels if i == 0 else channels\n net_layers.append(\n TCNBlock(\n in_channels=in_channels_block,\n out_channels=channels,\n kernel_size=kernel_size,\n dilation=dilation_size,\n bias=bias,\n fwd_time=fwd_time,\n final=False,\n )\n )\n dilation_size *= 2 # Doubles the dilation size at each step\n\n # Last layer\n net_layers.append(\n TCNBlock(\n in_channels=channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n dilation=dilation_size,\n bias=bias,\n fwd_time=fwd_time,\n final=True,\n )\n )\n\n self.network = torch.nn.Sequential(*net_layers)\n\n def forward(self, x):\n return self.network(x)\n" ]
[ [ "torch.nn.utils.weight_norm", "torch.nn.Conv1d", "torch.nn.Sequential", "torch.nn.LeakyReLU" ] ]
marcbue/spikeinterface
[ "d3462eeabcb9f0b9816004dd47355e40f4de1ac5" ]
[ "spikeinterface/comparison/groundtruthstudy.py" ]
[ "from pathlib import Path\nimport os\nimport shutil\nimport numpy as np\nimport pandas as pd\n\nfrom spikeinterface.core import load_extractor\nfrom spikeinterface.extractors import NpzSortingExtractor\nfrom spikeinterface.sorters import sorter_dict, run_sorters\n\nfrom spikeinterface import WaveformExtractor\nfrom spikeinterface.toolkit import compute_quality_metrics\n\nfrom .comparisontools import _perf_keys\nfrom .groundtruthcomparison import compare_sorter_to_ground_truth\n\nfrom .studytools import (setup_comparison_study, get_rec_names, get_recordings,\n iter_output_folders, iter_computed_names, iter_computed_sorting, collect_run_times)\n\n\nclass GroundTruthStudy:\n def __init__(self, study_folder=None):\n self.study_folder = Path(study_folder)\n self._is_scanned = False\n self.computed_names = None\n self.rec_names = None\n self.sorter_names = None\n\n self.scan_folder()\n\n self.comparisons = None\n self.exhaustive_gt = None\n\n def __repr__(self):\n t = 'Groud truth study\\n'\n t += ' ' + str(self.study_folder) + '\\n'\n t += ' recordings: {} {}\\n'.format(len(self.rec_names), self.rec_names)\n if len(self.sorter_names):\n t += ' sorters: {} {}\\n'.format(len(self.sorter_names), self.sorter_names)\n\n return t\n\n def scan_folder(self):\n self.rec_names = get_rec_names(self.study_folder)\n # scan computed names\n self.computed_names = list(iter_computed_names(self.study_folder)) # list of pair (rec_name, sorter_name)\n self.sorter_names = np.unique([e for _, e in iter_computed_names(self.study_folder)]).tolist()\n self._is_scanned = True\n\n @classmethod\n def create(cls, study_folder, gt_dict, **job_kwargs):\n setup_comparison_study(study_folder, gt_dict, **job_kwargs)\n return cls(study_folder)\n\n def run_sorters(self, sorter_list, mode_if_folder_exists='keep', **kwargs):\n\n sorter_folders = self.study_folder / 'sorter_folders'\n recording_dict = get_recordings(self.study_folder)\n\n run_sorters(sorter_list, recording_dict, sorter_folders,\n with_output=False, mode_if_folder_exists=mode_if_folder_exists, **kwargs)\n\n # results are copied so the heavy sorter_folders can be removed\n self.copy_sortings()\n\n def _check_rec_name(self, rec_name):\n if not self._is_scanned:\n self.scan_folder()\n if len(self.rec_names) > 1 and rec_name is None:\n raise Exception(\"Pass 'rec_name' parameter to select which recording to use.\")\n elif len(self.rec_names) == 1:\n rec_name = self.rec_names[0]\n else:\n rec_name = self.rec_names[self.rec_names.index(rec_name)]\n return rec_name\n\n def get_ground_truth(self, rec_name=None):\n rec_name = self._check_rec_name(rec_name)\n sorting = load_extractor(self.study_folder / 'ground_truth' / rec_name)\n return sorting\n\n def get_recording(self, rec_name=None):\n rec_name = self._check_rec_name(rec_name)\n rec = load_extractor(self.study_folder / 'raw_files' / rec_name)\n return rec\n\n def get_sorting(self, sort_name, rec_name=None):\n rec_name = self._check_rec_name(rec_name)\n\n selected_sorting = None\n if sort_name in self.sorter_names:\n for r_name, sorter_name, sorting in iter_computed_sorting(self.study_folder):\n if sort_name == sorter_name and r_name == rec_name:\n selected_sorting = sorting\n return selected_sorting\n\n def copy_sortings(self):\n\n sorter_folders = self.study_folder / 'sorter_folders'\n sorting_folders = self.study_folder / 'sortings'\n log_olders = self.study_folder / 'sortings' / 'run_log'\n\n log_olders.mkdir(parents=True, exist_ok=True)\n\n for rec_name, sorter_name, output_folder in iter_output_folders(sorter_folders):\n SorterClass = sorter_dict[sorter_name]\n fname = rec_name + '[#]' + sorter_name\n npz_filename = sorting_folders / (fname + '.npz')\n\n sorting = SorterClass.get_result_from_folder(output_folder)\n try:\n sorting = SorterClass.get_result_from_folder(output_folder)\n NpzSortingExtractor.write_sorting(sorting, npz_filename)\n except:\n if npz_filename.is_file():\n npz_filename.unlink()\n if (output_folder / 'spikeinterface_log.json').is_file():\n shutil.copyfile(output_folder / 'spikeinterface_log.json',\n sorting_folders / 'run_log' / (fname + '.json'))\n\n self.scan_folder()\n\n def run_comparisons(self, exhaustive_gt=False, **kwargs):\n self.comparisons = {}\n for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder):\n gt_sorting = self.get_ground_truth(rec_name)\n sc = compare_sorter_to_ground_truth(gt_sorting, sorting, exhaustive_gt=exhaustive_gt, **kwargs)\n self.comparisons[(rec_name, sorter_name)] = sc\n self.exhaustive_gt = exhaustive_gt\n\n def aggregate_run_times(self):\n return collect_run_times(self.study_folder)\n\n def aggregate_performance_by_unit(self):\n assert self.comparisons is not None, 'run_comparisons first'\n\n perf_by_unit = []\n for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder):\n comp = self.comparisons[(rec_name, sorter_name)]\n\n perf = comp.get_performance(method='by_unit', output='pandas')\n perf['rec_name'] = rec_name\n perf['sorter_name'] = sorter_name\n perf = perf.reset_index()\n perf_by_unit.append(perf)\n\n perf_by_unit = pd.concat(perf_by_unit)\n perf_by_unit = perf_by_unit.set_index(['rec_name', 'sorter_name', 'gt_unit_id'])\n\n return perf_by_unit\n\n def aggregate_count_units(self, well_detected_score=None, redundant_score=None, overmerged_score=None):\n assert self.comparisons is not None, 'run_comparisons first'\n\n index = pd.MultiIndex.from_tuples(self.computed_names, names=['rec_name', 'sorter_name'])\n\n count_units = pd.DataFrame(index=index, columns=['num_gt', 'num_sorter', 'num_well_detected', 'num_redundant',\n 'num_overmerged'])\n\n if self.exhaustive_gt:\n count_units['num_false_positive'] = None\n count_units['num_bad'] = None\n\n for rec_name, sorter_name, sorting in iter_computed_sorting(self.study_folder):\n gt_sorting = self.get_ground_truth(rec_name)\n comp = self.comparisons[(rec_name, sorter_name)]\n\n count_units.loc[(rec_name, sorter_name), 'num_gt'] = len(gt_sorting.get_unit_ids())\n count_units.loc[(rec_name, sorter_name), 'num_sorter'] = len(sorting.get_unit_ids())\n count_units.loc[(rec_name, sorter_name), 'num_well_detected'] = \\\n comp.count_well_detected_units(well_detected_score)\n if self.exhaustive_gt:\n count_units.loc[(rec_name, sorter_name), 'num_overmerged'] = \\\n comp.count_overmerged_units(overmerged_score)\n count_units.loc[(rec_name, sorter_name), 'num_redundant'] = \\\n comp.count_redundant_units(redundant_score)\n count_units.loc[(rec_name, sorter_name), 'num_false_positive'] = \\\n comp.count_false_positive_units(redundant_score)\n count_units.loc[(rec_name, sorter_name), 'num_bad'] = comp.count_bad_units()\n\n return count_units\n\n def aggregate_dataframes(self, copy_into_folder=True, **karg_thresh):\n dataframes = {}\n dataframes['run_times'] = self.aggregate_run_times().reset_index()\n perfs = self.aggregate_performance_by_unit()\n\n dataframes['perf_by_unit'] = perfs.reset_index()\n dataframes['count_units'] = self.aggregate_count_units(**karg_thresh).reset_index()\n\n if copy_into_folder:\n tables_folder = self.study_folder / 'tables'\n tables_folder.mkdir(parents=True, exist_ok=True)\n\n for name, df in dataframes.items():\n df.to_csv(str(tables_folder / (name + '.csv')), sep='\\t', index=False)\n\n return dataframes\n\n def compute_metrics(self, rec_name, metric_names=['snr'],\n ms_before=3., ms_after=4., max_spikes_per_unit=500,\n n_jobs=-1, total_memory='1G', **snr_kwargs):\n\n rec = self.get_recording(rec_name)\n gt_sorting = self.get_ground_truth(rec_name)\n\n # waveform extractor\n waveform_folder = self.study_folder / 'metrics' / f'waveforms_{rec_name}'\n if waveform_folder.is_dir():\n shutil.rmtree(waveform_folder)\n we = WaveformExtractor.create(rec, gt_sorting, waveform_folder)\n we.set_params(ms_before=ms_before, ms_after=ms_after, max_spikes_per_unit=max_spikes_per_unit)\n we.run(n_jobs=n_jobs, total_memory=total_memory)\n\n # metrics\n metrics = compute_quality_metrics(we, metric_names=metric_names)\n filename = self.study_folder / 'metrics' / f'metrics _{rec_name}.txt'\n metrics.to_csv(filename, sep='\\t', index=True)\n\n return metrics\n\n def get_metrics(self, rec_name=None, **metric_kwargs):\n \"\"\"\n Load or compute units metrics for a given recording.\n \"\"\"\n rec_name = self._check_rec_name(rec_name)\n metrics_folder = self.study_folder / 'metrics'\n metrics_folder.mkdir(parents=True, exist_ok=True)\n\n filename = self.study_folder / 'metrics' / f'metrics _{rec_name}.txt'\n if filename.is_file():\n metrics = pd.read_csv(filename, sep='\\t', index_col=0)\n gt_sorting = self.get_ground_truth(rec_name)\n metrics.index = gt_sorting.unit_ids\n else:\n metrics = self.compute_metrics(rec_name, **metric_kwargs)\n\n metrics.index.name = 'unit_id'\n # add rec name columns \n metrics['rec_name'] = rec_name\n\n return metrics\n\n def get_units_snr(self, rec_name=None, **metric_kwargs):\n \"\"\"\n \n \"\"\"\n metric = self.get_metrics(rec_name=rec_name, **metric_kwargs)\n return metric['snr']\n\n def concat_all_snr(self):\n snr = []\n for rec_name in self.rec_names:\n df = self.get_units_snr(rec_name)\n df = df.reset_index()\n snr.append(df)\n snr = pd.concat(snr)\n snr = snr.set_index(['rec_name', 'gt_unit_id'])\n return snr\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.MultiIndex.from_tuples", "pandas.concat" ] ]
mrjavoman/Image-Super-Resolution-via-Iterative-Refinement
[ "d353bbcbc667e7ad5da739c7d1b343a44afb88c9" ]
[ "sr.py" ]
[ "import torch\nimport data as Data\nimport model as Model\nimport argparse\nimport logging\nimport core.logger as Logger\nimport core.metrics as Metrics\nfrom tensorboardX import SummaryWriter\nimport os\nimport numpy as np\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', type=str, default='config/sr_sr3_16_128.json',\n help='JSON file for configuration')\n parser.add_argument('-p', '--phase', type=str, choices=['train', 'val'],\n help='Run either train(training) or val(generation)', default='train')\n parser.add_argument('-gpu', '--gpu_ids', type=str, default=None)\n parser.add_argument('-debug', '-d', action='store_true')\n\n # parse configs\n args = parser.parse_args()\n opt = Logger.parse(args)\n # Convert to NoneDict, which return None for missing key.\n opt = Logger.dict_to_nonedict(opt)\n\n # logging\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n\n Logger.setup_logger(None, opt['path']['log'],\n 'train', level=logging.INFO, screen=True)\n Logger.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO)\n logger = logging.getLogger('base')\n logger.info(Logger.dict2str(opt))\n tb_logger = SummaryWriter(log_dir=opt['path']['tb_logger'])\n\n # dataset\n for phase, dataset_opt in opt['datasets'].items():\n if phase == 'train' and args.phase != 'val':\n train_set = Data.create_dataset(dataset_opt, phase)\n train_loader = Data.create_dataloader(\n train_set, dataset_opt, phase)\n elif phase == 'val':\n val_set = Data.create_dataset(dataset_opt, phase)\n val_loader = Data.create_dataloader(\n val_set, dataset_opt, phase)\n logger.info('Initial Dataset Finished')\n\n # model\n diffusion = Model.create_model(opt)\n logger.info('Initial Model Finished')\n\n # Train\n current_step = diffusion.begin_step\n current_epoch = diffusion.begin_epoch\n n_iter = opt['train']['n_iter']\n\n if opt['path']['resume_state']:\n logger.info('Resuming training from epoch: {}, iter: {}.'.format(\n current_epoch, current_step))\n\n diffusion.set_new_noise_schedule(\n opt['model']['beta_schedule'][opt['phase']], schedule_phase=opt['phase'])\n if opt['phase'] == 'train':\n while current_step < n_iter:\n current_epoch += 1\n for _, train_data in enumerate(train_loader):\n current_step += 1\n if current_step > n_iter:\n break\n diffusion.feed_data(train_data)\n diffusion.optimize_parameters()\n # log\n if current_step % opt['train']['print_freq'] == 0:\n logs = diffusion.get_current_log()\n message = '<epoch:{:3d}, iter:{:8,d}> '.format(\n current_epoch, current_step)\n for k, v in logs.items():\n message += '{:s}: {:.4e} '.format(k, v)\n tb_logger.add_scalar(k, v, current_step)\n logger.info(message)\n\n # validation\n if current_step % opt['train']['val_freq'] == 0:\n avg_psnr = 0.0\n idx = 0\n result_path = '{}/{}'.format(opt['path']\n ['results'], current_epoch)\n os.makedirs(result_path, exist_ok=True)\n\n diffusion.set_new_noise_schedule(\n opt['model']['beta_schedule']['val'], schedule_phase='val')\n for _, val_data in enumerate(val_loader):\n idx += 1\n diffusion.feed_data(val_data)\n diffusion.test(continous=False)\n visuals = diffusion.get_current_visuals()\n sr_img = Metrics.tensor2img(visuals['SR']) # uint8\n hr_img = Metrics.tensor2img(visuals['HR']) # uint8\n lr_img = Metrics.tensor2img(visuals['LR']) # uint8\n fake_img = Metrics.tensor2img(visuals['INF']) # uint8\n\n # generation\n Metrics.save_img(\n hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n sr_img, '{}/{}_{}_sr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx))\n tb_logger.add_image(\n 'Iter_{}'.format(current_step),\n np.transpose(np.concatenate(\n (fake_img, sr_img, hr_img), axis=1), [2, 0, 1]),\n idx)\n avg_psnr += Metrics.calculate_psnr(\n sr_img, hr_img)\n\n avg_psnr = avg_psnr / idx\n diffusion.set_new_noise_schedule(\n opt['model']['beta_schedule']['train'], schedule_phase='train')\n # log\n logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))\n logger_val = logging.getLogger('val') # validation logger\n logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}'.format(\n current_epoch, current_step, avg_psnr))\n # tensorboard logger\n tb_logger.add_scalar('psnr', avg_psnr, current_step)\n\n if current_step % opt['train']['save_checkpoint_freq'] == 0:\n logger.info('Saving models and training states.')\n diffusion.save_network(current_epoch, current_step)\n # save model\n logger.info('End of training.')\n else:\n logger.info('Begin Model Evaluation.')\n avg_psnr = 0.0\n avg_ssim = 0.0\n idx = 0\n result_path = '{}'.format(opt['path']['results'])\n os.makedirs(result_path, exist_ok=True)\n for _, val_data in enumerate(val_loader):\n idx += 1\n diffusion.feed_data(val_data)\n diffusion.test(continous=True)\n visuals = diffusion.get_current_visuals()\n\n hr_img = Metrics.tensor2img(visuals['HR']) # uint8\n lr_img = Metrics.tensor2img(visuals['LR']) # uint8\n fake_img = Metrics.tensor2img(visuals['INF']) # uint8\n\n sr_img_mode = 'grid'\n if sr_img_mode == 'single':\n # single img series\n sr_img = visuals['SR'] # uint8\n sample_num = sr_img.shape[0]\n for iter in range(0, sample_num):\n Metrics.save_img(\n Metrics.tensor2img(sr_img[iter]), '{}/{}_{}_sr_{}.png'.format(result_path, current_step, idx, iter))\n else:\n # grid img\n sr_img = Metrics.tensor2img(visuals['SR']) # uint8\n Metrics.save_img(\n sr_img, '{}/{}_{}_sr_process.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n Metrics.tensor2img(visuals['SR'][-1]), '{}/{}_{}_sr.png'.format(result_path, current_step, idx))\n\n Metrics.save_img(\n hr_img, '{}/{}_{}_hr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n lr_img, '{}/{}_{}_lr.png'.format(result_path, current_step, idx))\n Metrics.save_img(\n fake_img, '{}/{}_{}_inf.png'.format(result_path, current_step, idx))\n\n # generation\n avg_psnr += Metrics.calculate_psnr(\n Metrics.tensor2img(visuals['SR'][-1]), hr_img)\n avg_ssim += Metrics.calculate_ssim(\n Metrics.tensor2img(visuals['SR'][-1]), hr_img)\n avg_psnr = avg_psnr / idx\n avg_ssim = avg_ssim / idx\n\n # log\n logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))\n logger.info('# Validation # SSIM: {:.4e}'.format(avg_ssim))\n logger_val = logging.getLogger('val') # validation logger\n logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}, ssim:{:.4e}'.format(\n current_epoch, current_step, avg_psnr, avg_ssim))\n" ]
[ [ "numpy.concatenate" ] ]
ShiKaiWi/python-practice
[ "2ce82bd778b9a4022bdd26d0a3e1bee2ebec6f51" ]
[ "CVlib/GaussianFilter.py" ]
[ "import numpy as np\nimport pylab as plt\nimport mahotas as mh\n\nclass GaussianFilter:\n def __init__(self,img,sigma = 1,windsize = 3):\n self.img = mh.imread(img)\n self.M,self.N = self.img.shape\n self.windsize = windsize \n self.sigma = sigma\n self.gaussian_kernel = self.kernel()\n self.halfsize = self.windsize // 2\n\n def convolution(self,window):\n size = self.gaussian_kernel.size\n if size != window.size:\n return None\n return np.sum(self.gaussian_kernel * window)\n\n def kernel(self):\n N = self.windsize // 2\n x = np.linspace(-N,N,2*N+1)\n y = np.linspace(-N,N,2*N+1)\n xv,yv = np.meshgrid(x,y,indexing='xy')\n H = np.exp(-(np.square(xv)+np.square(yv))/(2*self.sigma*self.sigma))\n H = H / H.sum()\n return np.reshape(H,(self.windsize*self.windsize,1))\n\n def filter(self):\n imgnew = np.zeros((self.M,self.N))\n w = self.halfsize\n for i in range(0,self.M):\n for j in range(0,self.N):\n if i<w or j<w or i>self.M-1-w or j>self.N-1-w:\n imgnew[i][j] = self.img[i][j]\n continue\n imgnew[i][j]= self.convolution(np.reshape(self.img[i-w:i+1+w,j-w:j+1+w],(self.windsize*self.windsize,1)))\n return imgnew \n\n def demo(self):\n plt.imshow(self.filter())\n plt.gray()\n plt.show()\n\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.reshape", "numpy.square", "numpy.meshgrid", "numpy.linspace" ] ]
mihirp1998/EmbLang
[ "169b0468ccda554896973bcc226afb3e762a70e7" ]
[ "vis_imagine_static_voxels/lib_classes/modules/embnet2.py" ]
[ "\nfrom lib_classes.modules.utils_basic import *\nfrom lib_classes.modules import utils_improc\nimport constants as const\nimport ipdb\nst = ipdb.set_trace\nfrom sklearn.decomposition import PCA\n\n\nclass SimpleNetBlock(tf.keras.Model):\n def __init__(self,out_chans, blk_num,istrain):\n super(SimpleNetBlock, self).__init__()\n\n self.out_chans = out_chans\n self.istrain = istrain\n self.blk_num = blk_num\n\n \n self.conv2d = tf.keras.layers.Conv2D(out_chans*(2**self.blk_num) ,kernel_size=3, strides=2, activation=tf.nn.leaky_relu,\\\n padding='VALID',kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=1e-3))\n self.batchnorm = tf.keras.layers.BatchNormalization()\n \n self.conv2d_1 = tf.keras.layers.Conv2D(out_chans*(2**self.blk_num) ,kernel_size=3, dilation_rate=2, activation=tf.nn.leaky_relu,\\\n padding='VALID',kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=1e-3))\n self.batchnorm_1 = tf.keras.layers.BatchNormalization()\n\n self.conv2d_transpose = tf.keras.layers.Conv2DTranspose(out_chans, kernel_size=[4,4], strides=2,padding='SAME',\\\n activation=tf.nn.leaky_relu,kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=1e-3))\n self.batchnorm_2 = tf.keras.layers.BatchNormalization()\n\n def call(self,feat,blk_num):\n feat = tf.pad(tensor=feat, paddings=[[0,0],[1,1],[1,1],[0,0]], mode='SYMMETRIC')\n feat = self.conv2d(feat)\n print_shape(feat)\n feat = self.batchnorm(feat, self.istrain)\n \n feat = tf.pad(tensor=feat, paddings=[[0,0],[2,2],[2,2],[0,0]], mode='SYMMETRIC')\n feat = self.conv2d_1(feat)\n print_shape(feat)\n feat = self.batchnorm_1(feat, self.istrain)\n if blk_num > 0:\n upfeat = self.conv2d_transpose(feat)\n print_shape(upfeat)\n upfeat = self.batchnorm_2(upfeat, self.istrain)\n else:\n upfeat = feat\n return feat, upfeat\n\nclass SimpleNet(tf.keras.Model):\n # slim = tf.contrib.slim\n def __init__(self,out_chans,istrain):\n super(SimpleNet, self).__init__()\n nblocks = 2\n \n self.out_chans = out_chans\n self.nblocks = nblocks\n self.SimpleNetBlocks = []\n self.istrain = istrain\n self.conv2d = tf.keras.layers.Conv2D( out_chans ,kernel_size=5, activation=None)\n for blk_num in range(self.nblocks):\n self.SimpleNetBlocks.append(SimpleNetBlock(out_chans,blk_num, self.istrain))\n\n def call(self,input):\n print(\"rgb\")\n print_shape(input)\n B, H, W, C = input.shape.as_list()\n normalizer_fn = None\n weights_initializer = tf.compat.v1.initializers.truncated_normal(stddev=1e-3)\n\n upfeats = list()\n feat = input\n # tf.compat.v1.summary.histogram(feat.name, feat)\n for blk_num in range(self.nblocks):\n feat, upfeat = self.SimpleNetBlocks[blk_num](feat, blk_num)\n upfeats.append(upfeat)\n upfeat = tf.concat(upfeats, axis = 3)\n # st()\n upfeat = tf.pad(tensor=upfeat, paddings=[[0,0],[2,2],[2,2],[0,0]], mode='SYMMETRIC')\n emb = self.conv2d(upfeat)\n # emb = slim.conv2d(upfeat, out_chans, kernel_size=1, activation_fn=None,\n # normalizer_fn=None, scope='conv_final')\n print_shape(emb)\n print(\"rgb_trans\")\n return emb\n\n\nclass embnet2(tf.keras.Model):\n def __init__(self,istrain):\n super(embnet2, self).__init__()\n self.simpleNet = SimpleNet(const.emb_dim,istrain=istrain)\n self.beta = tf.Variable(1.2, dtype=tf.float32, name='margin_beta')\n\n def batch_norm(x, istrain):\n # return tf.identity(x)\n # decay of 0.99 can take ~1k steps to learn (according to my plots)\n return self.batchnorm(x, decay=0.9, \n is_training=istrain,\n # updates_collections=None,\n center=True,\n scale=True,\n reuse=False)\n def get_distance(self,x):\n n = x.shape.as_list()[0]\n square = tf.reduce_sum(input_tensor=x**2, axis=1, keepdims=True)\n dis_square = square + tf.transpose(a=square) - 2.0 * tf.matmul(x, tf.transpose(a=x)) + EPS \n # st()\n return tf.sqrt(dis_square + tf.eye(n))\n\n def reduce_emb(self,emb, inbound=None, together=False):\n ## emb -- [S,H/2,W/2,C], inbound -- [S,H/2,W/2,1]\n ## Reduce number of chans to 3 with PCA. For vis.\n S,H,W,C = emb.shape.as_list()\n keep = 3\n if together:\n # emb = tf.py_function(self.pca_embed_together, [emb,keep], tf.float32)\n emb = tf.convert_to_tensor(self.pca_embed_together(emb,keep))\n\n else:\n emb = tf.py_function(self.pca_embed, [emb,keep], tf.float32)\n emb.set_shape([S,H,W,keep])\n emb = normalize(emb) - 0.5\n if inbound is not None:\n emb_inbound = emb*inbound\n else:\n emb_inbound = None\n return emb, emb_inbound\n\n def pca_embed_together(self,emb, keep):\n ## emb -- [S,H/2,W/2,C]\n ## keep is the number of principal components to keep\n ## Helper function for reduce_emb.\n S, H, W, K = np.shape(emb)\n if np.isnan(emb).any():\n out_img = np.zeros([S,H,W,keep], dtype=emb.dtype)\n pixelskd = np.reshape(emb, (S*H*W, K))\n P = PCA(keep)\n P.fit(pixelskd)\n pixels3d = P.transform(pixelskd)\n out_img = np.reshape(pixels3d, [S,H,W,keep]).astype(np.float32)\n if np.isnan(out_img).any():\n out_img = np.zeros([S,H,W,keep], dtype=np.float32)\n return out_img\n def distance_sampling(self,x, cutoff, nonzero_loss_cutoff, n_split):\n n, d = x.shape.as_list()\n split = n/n_split\n # st()\n distance = tf.maximum(self.get_distance(x), cutoff)\n log_weights = ((2.0 - float(d)) * tf.math.log(distance)\n - (float(d-3)/2) * tf.math.log(1.0 - 0.25*(distance**2)))\n # st()\n weights = tf.exp(log_weights - tf.reduce_max(input_tensor=log_weights))\n\n mask = np.ones(weights.shape)\n for i in range(0, n):\n for idx_split in range(n_split):\n #mask[i,i] = 0\n # st()\n mask[i,int((i+split*idx_split)%n)] = 0\n # st()\n mask = tf.constant(mask, tf.float32)\n weights = weights * mask * tf.cast((distance < nonzero_loss_cutoff), tf.float32)\n weights = weights / tf.reduce_sum(input_tensor=weights, axis=1, keepdims=True)\n #a_indices = tf.random.uniform([n, 1], maxval=n, dtype=tf.int32)\n a_indices = tf.random.shuffle(tf.range(start=0, limit=n, delta=1, dtype=tf.int32))\n a_indices = tf.reshape(a_indices, [n, 1])\n #positive samples: interval equals to split\n # st()\n split_indices =int(split)*tf.random.uniform([n,1], minval=1, maxval=n_split, dtype=tf.int32)\n p_indices = tf.floormod((a_indices + split_indices), tf.constant(n, dtype=tf.int32))\n weights_sampled = tf.gather_nd(weights, a_indices)\n n_indices = tf.random.categorical(tf.math.log(weights_sampled), 1)\n n_indices = tf.reshape(n_indices, [n, 1])\n #print(a_indices.shape.as_list(), p_indices.shape.as_list(), n_indices.shape.as_list())\n return a_indices, p_indices, n_indices #shape: [n, 1]\n\n\n # def SimpleNetBlock(feat, blk_num, out_chans, istrain):\n # from tensorflow.contrib.slim import conv2d, conv2d_transpose\n\n # with tf.compat.v1.variable_scope('Block%d' % blk_num):\n # feat = tf.pad(tensor=feat, paddings=[[0,0],[1,1],[1,1],[0,0]], mode='SYMMETRIC')\n # feat = conv2d(feat, out_chans*(2**blk_num), stride=2, scope='conv')\n # print_shape(feat)\n # feat = batch_norm(feat, istrain)\n \n # feat = tf.pad(tensor=feat, paddings=[[0,0],[2,2],[2,2],[0,0]], mode='SYMMETRIC')\n # feat = conv2d(feat, out_chans*(2**blk_num), rate=2, scope='dilconv')\n # print_shape(feat)\n # feat = batch_norm(feat, istrain)\n # if blk_num > 0:\n # upfeat = conv2d_transpose(feat, out_chans, kernel_size=[4,4], stride=2,\n # padding='SAME', scope='deconv')\n # print_shape(upfeat)\n # upfeat = batch_norm(upfeat, istrain)\n # else:\n # upfeat = feat\n # return feat, upfeat\n def margin_loss(self,emb, n_sampling, n_split):\n alpha = 0.2\n cutoff = 0.5\n nonzero_loss_cutoff = 1.4\n a_indices, p_indices, n_indices = self.distance_sampling(emb, cutoff, nonzero_loss_cutoff, n_split)\n emb_a = tf.gather_nd(emb, a_indices)\n emb_p = tf.gather_nd(emb, p_indices)\n emb_n = tf.gather_nd(emb, n_indices)\n d_ap = tf.sqrt(tf.reduce_sum(input_tensor=(emb_p - emb_a)**2, axis=1) + 1e-8)\n d_an = tf.sqrt(tf.reduce_sum(input_tensor=(emb_n - emb_a)**2, axis=1) + 1e-8)\n\n loss_p = tf.maximum(d_ap - self.beta + alpha, 0.0)\n loss_n = tf.maximum(self.beta - d_an + alpha, 0.0)\n\n pair_num = tf.reduce_sum(input_tensor=tf.cast(loss_p > 0.0, tf.float32)+tf.cast(loss_n > 0.0, tf.float32))\n loss = tf.reduce_sum(input_tensor=loss_p + loss_n)/pair_num\n\n return loss\n def emb_vis(self,rgb, emb, emb_pred, inbound):\n ## emb,emb_pred -- [S,H/2,W/2,C] where C is length of emb vector per pixel.\n ## rgb -- [S,H/2,W/2,3], inbound -- [S,H/2,W/2,1]\n S,H,W,C = emb.shape.as_list()\n embs = tf.concat([emb, emb_pred], axis=0)\n inbounds = tf.concat([inbound, inbound], axis=0)\n # emb, emb_inbound = reduce_emb(emb, inbound)\n # emb_pred, emb_pred_inbound = reduce_emb(emb_pred, inbound)\n \n embs, embs_inbound = self.reduce_emb(embs, inbounds, together=True)\n # emb_inbound, emb_pred_inbound = tf.split(embs_inbound, 2, axis=0)\n emb, emb_pred = tf.split(embs, 2, axis=0)\n rgb_emb_vis = tf.concat([rgb, emb, emb_pred], axis=2)\n # utils_improc.summ_rgb('rgb_emb_embpred', rgb_emb_vis)\n # return emb_inbound, emb_pred_inbound\n return emb, emb_pred\n\n\n # def EmbNet3D(emb_pred, emb, istrain):\n # total_loss = 0.0\n\n # with tf.variable_scope('emb3D'):\n # print 'EmbNet3D...'\n\n # B, H, W, D, C = emb_pred.shape.as_list()\n # # assert(C==hyp.emb_dim)\n \n # loss = margin_loss_3D(emb, emb_pred)\n # emb_pca, emb_pred_pca = emb_vis(rgb, emb, emb_pred)\n # total_loss = utils_misc.add_loss(total_loss, loss,\n # hyp.emb_coeff, 'margin_3D')\n\n # # smooth_loss = edge_aware_smooth_loss(emb, rgb)\n # # smooth_loss += edge_aware_smooth_loss(emb_pred, rgb)\n # # total_loss = utils_misc.add_loss(total_loss, smooth_loss,\n # # hyp.emb_smooth_coeff, 'smooth')\n\n # # l1_loss = l1_on_axis(emb-emb_pred)\n # # utils_improc.summ_oned('l1_loss', l1_loss)\n # # # l1_loss = reduce_masked_mean(l1_loss, inbound)\n # # total_loss = utils_misc.add_loss(total_loss, l1_loss,\n # # hyp.emb_l1_coeff, 'l1')\n\n # # # emb = emb / l2_on_axis(emb, axis=3)\n # # # emb_pred = emb_pred / l2_on_axis(emb_pred, axis=3)\n # # return total_loss, emb, emb_pred, emb_pca, emb_pred_pca\n # # # return total_loss\n # return total_loss\n\n\n # def margin_loss_3D(emb0, emb1):\n # # emb0 and emb1 are B x H x W x D x C\n # B,H,W,D,C = emb0.shape.as_list()\n # loss = 0.0\n # emb0_all = []\n # emb1_all = []\n # for s in range(B):\n # n_sampling = 960\n # sample_indicies = tf.random.uniform([n_sampling, 1], maxval=H*W*D, dtype=tf.int32)\n # emb0_s_ = tf.reshape(emb0[s], [H*W*D, C])\n # emb1_s_ = tf.reshape(emb1[s], [H*W*D, C])\n # emb0_s_ = tf.gather_nd(emb0_s_, sample_indicies)\n # emb1_s_ = tf.gather_nd(emb1_s_, sample_indicies)\n # # these are N x D\n # emb0_all.append(emb0_s_)\n # emb1_all.append(emb1_s_)\n # emb0_all = tf.concat(emb0_all, axis=0)\n # emb1_all = tf.concat(emb1_all, axis=0)\n # emb_all = tf.concat([emb0_all, emb1_all], axis=0)\n # n_split = 2\n # loss = margin_loss(emb_all, n_sampling, n_split) / float(B)\n # return loss\n def margin_loss_2D(self,emb, emb_pred):\n ## emb,emb_pred,emb_aug -- [S,H/2,W/2,C]\n ## Use lifted_struct_loss between emb,emb_pred,emb_aug treating\n ## every s in S as a separate loss.\n\n # losstype = hyp.emb_loss\n # assert losstype in {'lifted', 'npairs'}\n # losstype = 'lifted'\n B,H,W,C = emb.shape.as_list()\n losstype = 'margin'\n # S,H,W,C = emb.shape.as_list()\n loss = 0.0\n emb_all = []\n emb_pred_all = []\n for s in range(B):\n n_sampling = 960\n sample_indicies = tf.random.uniform([n_sampling, 1], maxval=H*W, dtype=tf.int32)\n emb_s_ = tf.reshape(emb[s], [H*W, C])\n emb_s_ = tf.gather_nd(emb_s_, sample_indicies)\n emb_pred_s_ = tf.reshape(emb_pred[s], [H*W, C])\n emb_pred_s_ = tf.gather_nd(emb_pred_s_, sample_indicies)\n emb_all.append(emb_s_)\n emb_pred_all.append(emb_pred_s_)\n\n emb_all = tf.concat(emb_all, axis=0)\n emb_pred_all = tf.concat(emb_pred_all, axis=0)\n emb_all = tf.concat([emb_all, emb_pred_all], axis=0)\n n_split = 2\n loss = self.margin_loss(emb_all, n_sampling, n_split) / float(B)\n return loss\n\n\n\n @tf.function\n def call(self,rgb, emb_pred):\n # rgb is [S,H,W,3]\n # inbound is [S,H,W,1]\n # emb_pred -- [S,H/2,W/2,C] where C is length of emb vector per pixel.\n\n ## Compute embs for `rgb` using EmbNet(SimpleNet) and\n ## compare/loss against `emb_pred`. Use loss only within\n ## the mask `inbound`.\n\n total_loss = 0.0\n # st()\n with tf.compat.v1.name_scope('emb'):\n # print 'EmbNet...'\n\n B, H, W, C = emb_pred.shape.as_list()\n assert(C==const.emb_dim)\n \n # inbound = tf.image.resize_nearest_neighbor(inbound, [H, W])\n inbound = tf.ones([B,H,W,1])\n\n # if hyp.emb_use_aug:\n # # ignore/replace emb_pred\n # rgb_aug = random_color_augs(rgb)\n # rgb_all = tf.concat([rgb, rgb_aug], axis=0)\n # emb_all = SimpleNet(rgb_all, istrain, C)\n # emb, emb_pred = tf.split(emb_all, 2, axis=0)\n # inbound = tf.ones_like(inbound)\n # emb_aug = None # support old code that used BOTH aug and pred\n # else:\n emb = self.simpleNet(rgb)\n \n emb = emb / (EPS + l2_on_axis(emb, axis=3))\n emb_pred = emb_pred / (EPS + l2_on_axis(emb_pred, axis=3))\n # st()\n emb_aug = None # support old code that used BOTH aug and pred\n \n rgb = tf.image.resize(rgb, [H, W], method=tf.image.ResizeMethod.BILINEAR)\n\n loss = self.margin_loss_2D(emb, emb_pred)\n # emb_pca, emb_pred_pca = self.emb_vis(rgb, emb, emb_pred, inbound)\n\n total_loss = add_loss(total_loss, loss,\n const.emb_coeff, 'metric')\n\n # loss = metric_loss(rgb, emb, emb_pred, emb_aug, inbound)\n # emb_pca, emb_pred_pca = emb_vis(rgb, emb, emb_pred, inbound)\n # total_loss = utils_misc.add_loss(total_loss, loss,\n # hyp.emb_coeff, 'metric')\n\n # smooth_loss = edge_aware_smooth_loss(emb, rgb)\n # smooth_loss += edge_aware_smooth_loss(emb_pred, rgb)\n # total_loss = utils_misc.add_loss(total_loss, smooth_loss,\n # hyp.emb_smooth_coeff, 'smooth')\n\n l1_loss_im = l1_on_chans(emb-emb_pred)\n # utils_improc.summ_oned('l1_loss', l1_loss_im*inbound)\n l1_loss = reduce_masked_mean(l1_loss_im, inbound)\n total_loss = add_loss(total_loss, l1_loss,\n const.emb_l1_coeff, 'l1')\n\n # loss_3D = margin_loss_3D(emb3D_g, emb3D_e)\n # total_loss = utils_misc.add_loss(total_loss, loss_3D,\n # hyp.emb_3D_coeff, '3D')\n\n # dx, dy, dz = gradient3D(emb3D_e, absolute=True)\n # smooth_vox = tf.reduce_mean(dx+dy+dx, axis=4, keepdims=True)\n # smooth_loss = tf.reduce_mean(smooth_vox)\n # total_loss = utils_misc.add_loss(total_loss, smooth_loss, hyp.emb_smooth3D_coeff, 'smooth3D')\n # total_loss, emb, emb_pred, inbound, emb_pca, emb_pred_pca\n # emb = emb / l2_on_axis(emb, axis=3)\n # emb_pred = emb_pred / l2_on_axis(emb_pred, axis=3)\n return total_loss,rgb,emb,emb_pred\n # return total_loss" ]
[ [ "sklearn.decomposition.PCA" ] ]
jsikyoon/dreamer
[ "c422d14bba523083c69a862d8c16b41d686c5028" ]
[ "models.py" ]
[ "import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers as tfkl\nfrom tensorflow_probability import distributions as tfd\nfrom tensorflow.keras.mixed_precision import experimental as prec\n\nimport tools\nfrom trxls import TrXL\n\n\nclass RSSM(tools.Module):\n\n def __init__(self, stoch=30, deter=200, hidden=200, act=tf.nn.elu,\n model='trxl',\n pre_lnorm=False, gate='plus',\n n_layer=2, n_head=10, mem_len=64):\n super().__init__()\n self._activation = act\n self._stoch_size = stoch\n self._deter_size = deter\n self._hidden_size = hidden\n\n # memory module\n self._model = model\n self._deter = deter\n self._n_layer = n_layer\n self._mem_len = mem_len\n self._num_var = n_layer * mem_len * deter\n\n assert model in ['gru', 'trxl']\n\n if self._model=='gru':\n self._cell = tfkl.GRUCell(self._deter_size)\n else:\n self._cell = TrXL(pre_lnorm=pre_lnorm,\n gate=gate,\n n_layer=n_layer,\n d_model=deter,\n n_head=n_head,\n d_head=deter//n_head,\n d_inner=deter,\n mem_len=mem_len)\n\n def initial(self, batch_size):\n dtype = prec.global_policy().compute_dtype\n if self._model=='gru':\n deter = self._cell.get_initial_state(None, batch_size, dtype)\n else:\n deter = tf.zeros([self._n_layer,\n self._mem_len,\n batch_size,\n self._deter], dtype)\n deter = tf.transpose(deter, perm=[2,1,0,3])\n deter = tf.reshape(deter, [deter.shape[0], -1])\n deter = tf.concat([tf.zeros([batch_size, self._deter], dtype),\n deter],\n axis=-1)\n return dict(\n mean=tf.zeros([batch_size, self._stoch_size], dtype),\n std=tf.zeros([batch_size, self._stoch_size], dtype),\n stoch=tf.zeros([batch_size, self._stoch_size], dtype),\n deter=deter)\n\n @tf.function\n def observe(self, embed, action, state=None):\n if state is None:\n state = self.initial(tf.shape(action)[0])\n embed = tf.transpose(embed, [1, 0, 2])\n action = tf.transpose(action, [1, 0, 2])\n post, prior = tools.static_scan(\n lambda prev, inputs: self.obs_step(prev[0], *inputs),\n (action, embed), (state, state))\n post = {k: tf.transpose(v, [1, 0, 2]) for k, v in post.items()}\n prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()}\n return post, prior\n\n @tf.function\n def imagine(self, action, state=None):\n if state is None:\n state = self.initial(tf.shape(action)[0])\n assert isinstance(state, dict), state\n action = tf.transpose(action, [1, 0, 2])\n prior = tools.static_scan(self.img_step, action, state)\n prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()}\n return prior\n\n def get_feat(self, state):\n if self._model=='gru':\n return tf.concat([state['stoch'], state['deter']], -1)\n else:\n deter = tf.split(state['deter'],\n [self._deter, self._num_var], axis=-1)[0]\n return tf.concat([state['stoch'], deter], -1)\n\n def get_dist(self, state):\n return tfd.MultivariateNormalDiag(state['mean'], state['std'])\n\n @tf.function\n def obs_step(self, prev_state, prev_action, embed):\n prior = self.img_step(prev_state, prev_action)\n if self._model=='gru':\n x = tf.concat([prior['deter'], embed], -1)\n else:\n deter = tf.split(prior['deter'],\n [self._deter, self._num_var], axis=-1)[0]\n x = tf.concat([deter, embed], -1)\n x = self.get('obs1', tfkl.Dense, self._hidden_size, self._activation)(x)\n x = self.get('obs2', tfkl.Dense, 2 * self._stoch_size, None)(x)\n mean, std = tf.split(x, 2, -1)\n std = tf.nn.softplus(std) + 0.1\n stoch = self.get_dist({'mean': mean, 'std': std}).sample()\n post = {'mean': mean, 'std': std, 'stoch': stoch, 'deter': prior['deter']}\n return post, prior\n\n @tf.function\n def img_step(self, prev_state, prev_action):\n x = tf.concat([prev_state['stoch'], prev_action], -1)\n x = self.get('img1', tfkl.Dense, self._hidden_size, self._activation)(x)\n if self._model=='gru':\n x, deter = self._cell(x, [prev_state['deter']])\n deter = deter[0] # Keras wraps the state in a list.\n else:\n deter = tf.split(prev_state['deter'],\n [self._deter, self._num_var], axis=-1)[1]\n deter = tf.reshape(deter, [deter.shape[0], self._mem_len,\n self._n_layer, self._deter])\n deter = tf.transpose(deter, perm=[2,1,0,3])\n x, deter = self._cell(dec_inp=tf.expand_dims(x, axis=0),\n mems=deter)\n deter = tf.transpose(deter, perm=[2,1,0,3])\n deter = tf.reshape(deter, [deter.shape[0], -1])\n deter = tf.concat([x, deter], axis=-1)\n x = self.get('img2', tfkl.Dense, self._hidden_size, self._activation)(x)\n x = self.get('img3', tfkl.Dense, 2 * self._stoch_size, None)(x)\n mean, std = tf.split(x, 2, -1)\n std = tf.nn.softplus(std) + 0.1\n stoch = self.get_dist({'mean': mean, 'std': std}).sample()\n prior = {'mean': mean, 'std': std, 'stoch': stoch, 'deter': deter}\n return prior\n\n\nclass ConvEncoder(tools.Module):\n\n def __init__(self, depth=32, act=tf.nn.relu):\n self._act = act\n self._depth = depth\n\n def __call__(self, obs):\n kwargs = dict(strides=2, activation=self._act)\n x = tf.reshape(obs['image'], (-1,) + tuple(obs['image'].shape[-3:]))\n x = self.get('h1', tfkl.Conv2D, 1 * self._depth, 4, **kwargs)(x)\n x = self.get('h2', tfkl.Conv2D, 2 * self._depth, 4, **kwargs)(x)\n x = self.get('h3', tfkl.Conv2D, 4 * self._depth, 4, **kwargs)(x)\n x = self.get('h4', tfkl.Conv2D, 8 * self._depth, 4, **kwargs)(x)\n shape = tf.concat([tf.shape(obs['image'])[:-3], [32 * self._depth]], 0)\n return tf.reshape(x, shape)\n\n\nclass ConvDecoder(tools.Module):\n\n def __init__(self, depth=32, act=tf.nn.relu, shape=(64, 64, 3)):\n self._act = act\n self._depth = depth\n self._shape = shape\n\n def __call__(self, features):\n kwargs = dict(strides=2, activation=self._act)\n x = self.get('h1', tfkl.Dense, 32 * self._depth, None)(features)\n x = tf.reshape(x, [-1, 1, 1, 32 * self._depth])\n x = self.get('h2', tfkl.Conv2DTranspose, 4 * self._depth, 5, **kwargs)(x)\n x = self.get('h3', tfkl.Conv2DTranspose, 2 * self._depth, 5, **kwargs)(x)\n x = self.get('h4', tfkl.Conv2DTranspose, 1 * self._depth, 6, **kwargs)(x)\n x = self.get('h5', tfkl.Conv2DTranspose, self._shape[-1], 6, strides=2)(x)\n mean = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0))\n return tfd.Independent(tfd.Normal(mean, 1), len(self._shape))\n\n\nclass DenseDecoder(tools.Module):\n\n def __init__(self, shape, layers, units, dist='normal', act=tf.nn.elu):\n self._shape = shape\n self._layers = layers\n self._units = units\n self._dist = dist\n self._act = act\n\n def __call__(self, features):\n x = features\n for index in range(self._layers):\n x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)\n x = self.get(f'hout', tfkl.Dense, np.prod(self._shape))(x)\n x = tf.reshape(x, tf.concat([tf.shape(features)[:-1], self._shape], 0))\n if self._dist == 'normal':\n return tfd.Independent(tfd.Normal(x, 1), len(self._shape))\n if self._dist == 'binary':\n return tfd.Independent(tfd.Bernoulli(x), len(self._shape))\n raise NotImplementedError(self._dist)\n\n\nclass ActionDecoder(tools.Module):\n\n def __init__(\n self, size, layers, units, dist='tanh_normal', act=tf.nn.elu,\n min_std=1e-4, init_std=5, mean_scale=5):\n self._size = size\n self._layers = layers\n self._units = units\n self._dist = dist\n self._act = act\n self._min_std = min_std\n self._init_std = init_std\n self._mean_scale = mean_scale\n\n def __call__(self, features):\n raw_init_std = np.log(np.exp(self._init_std) - 1)\n x = features\n for index in range(self._layers):\n x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)\n if self._dist == 'tanh_normal':\n # https://www.desmos.com/calculator/rcmcf5jwe7\n x = self.get(f'hout', tfkl.Dense, 2 * self._size)(x)\n mean, std = tf.split(x, 2, -1)\n mean = self._mean_scale * tf.tanh(mean / self._mean_scale)\n std = tf.nn.softplus(std + raw_init_std) + self._min_std\n dist = tfd.Normal(mean, std)\n dist = tfd.TransformedDistribution(dist, tools.TanhBijector())\n dist = tfd.Independent(dist, 1)\n dist = tools.SampleDist(dist)\n elif self._dist == 'onehot':\n x = self.get(f'hout', tfkl.Dense, self._size)(x)\n dist = tools.OneHotDist(x)\n else:\n raise NotImplementedError(dist)\n return dist\n" ]
[ [ "tensorflow.keras.layers.GRUCell", "tensorflow.zeros", "tensorflow.shape", "tensorflow.reshape", "tensorflow.tanh", "tensorflow.expand_dims", "numpy.exp", "tensorflow.concat", "numpy.prod", "tensorflow.keras.mixed_precision.experimental.global_policy", "tensorflow.transpose", "tensorflow.split", "tensorflow.nn.softplus" ] ]
JE-Chen/je_old_repo
[ "a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5" ]
[ "LogSystem_JE/venv/Lib/site-packages/tqdm/gui.py" ]
[ "\"\"\"\r\nGUI progressbar decorator for iterators.\r\nIncludes a default `range` iterator printing to `stderr`.\r\n\r\nUsage:\r\n>>> from tqdm.gui import trange, tqdm\r\n>>> for i in trange(10):\r\n... ...\r\n\"\"\"\r\n# future division is important to divide integers and get as\r\n# a result precise floating numbers (instead of truncated int)\r\nfrom __future__ import division, absolute_import\r\n# import compatibility functions and utilities\r\nfrom .utils import _range\r\n# to inherit from the tqdm class\r\nfrom .std import tqdm as std_tqdm\r\nfrom .std import TqdmExperimentalWarning\r\nfrom warnings import warn\r\n\r\n\r\n__author__ = {\"github.com/\": [\"casperdcl\", \"lrq3000\"]}\r\n__all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange']\r\n\r\n\r\nclass tqdm_gui(std_tqdm): # pragma: no cover\r\n \"\"\"\r\n Experimental GUI version of tqdm!\r\n \"\"\"\r\n\r\n # TODO: @classmethod: write() on GUI?\r\n\r\n def __init__(self, *args, **kwargs):\r\n import matplotlib as mpl\r\n import matplotlib.pyplot as plt\r\n from collections import deque\r\n kwargs['gui'] = True\r\n\r\n super(tqdm_gui, self).__init__(*args, **kwargs)\r\n\r\n # Initialize the GUI display\r\n if self.disable or not kwargs['gui']:\r\n return\r\n\r\n warn('GUI is experimental/alpha', TqdmExperimentalWarning, stacklevel=2)\r\n self.mpl = mpl\r\n self.plt = plt\r\n self.sp = None\r\n\r\n # Remember if external environment uses toolbars\r\n self.toolbar = self.mpl.rcParams['toolbar']\r\n self.mpl.rcParams['toolbar'] = 'None'\r\n\r\n self.mininterval = max(self.mininterval, 0.5)\r\n self.fig, ax = plt.subplots(figsize=(9, 2.2))\r\n # self.fig.subplots_adjust(bottom=0.2)\r\n total = self.__len__() # avoids TypeError on None #971\r\n if total is not None:\r\n self.xdata = []\r\n self.ydata = []\r\n self.zdata = []\r\n else:\r\n self.xdata = deque([])\r\n self.ydata = deque([])\r\n self.zdata = deque([])\r\n self.line1, = ax.plot(self.xdata, self.ydata, color='b')\r\n self.line2, = ax.plot(self.xdata, self.zdata, color='k')\r\n ax.set_ylim(0, 0.001)\r\n if total is not None:\r\n ax.set_xlim(0, 100)\r\n ax.set_xlabel('percent')\r\n self.fig.legend((self.line1, self.line2), ('cur', 'est'),\r\n loc='center right')\r\n # progressbar\r\n self.hspan = plt.axhspan(0, 0.001,\r\n xmin=0, xmax=0, color='g')\r\n else:\r\n # ax.set_xlim(-60, 0)\r\n ax.set_xlim(0, 60)\r\n ax.invert_xaxis()\r\n ax.set_xlabel('seconds')\r\n ax.legend(('cur', 'est'), loc='lower left')\r\n ax.grid()\r\n # ax.set_xlabel('seconds')\r\n ax.set_ylabel((self.unit if self.unit else 'it') + '/s')\r\n if self.unit_scale:\r\n plt.ticklabel_format(style='sci', axis='y',\r\n scilimits=(0, 0))\r\n ax.yaxis.get_offset_text().set_x(-0.15)\r\n\r\n # Remember if external environment is interactive\r\n self.wasion = plt.isinteractive()\r\n plt.ion()\r\n self.ax = ax\r\n\r\n def __iter__(self):\r\n # TODO: somehow allow the following:\r\n # if not self.gui:\r\n # return super(tqdm_gui, self).__iter__()\r\n iterable = self.iterable\r\n if self.disable:\r\n for obj in iterable:\r\n yield obj\r\n return\r\n\r\n # ncols = self.ncols\r\n mininterval = self.mininterval\r\n maxinterval = self.maxinterval\r\n miniters = self.miniters\r\n dynamic_miniters = self.dynamic_miniters\r\n last_print_t = self.last_print_t\r\n last_print_n = self.last_print_n\r\n n = self.n\r\n # dynamic_ncols = self.dynamic_ncols\r\n smoothing = self.smoothing\r\n avg_time = self.avg_time\r\n time = self._time\r\n\r\n for obj in iterable:\r\n yield obj\r\n # Update and possibly print the progressbar.\r\n # Note: does not call self.update(1) for speed optimisation.\r\n n += 1\r\n # check counter first to avoid calls to time()\r\n if n - last_print_n >= self.miniters:\r\n miniters = self.miniters # watch monitoring thread changes\r\n delta_t = time() - last_print_t\r\n if delta_t >= mininterval:\r\n cur_t = time()\r\n delta_it = n - last_print_n\r\n # EMA (not just overall average)\r\n if smoothing and delta_t and delta_it:\r\n rate = delta_t / delta_it\r\n avg_time = self.ema(rate, avg_time, smoothing)\r\n self.avg_time = avg_time\r\n\r\n self.n = n\r\n self.display()\r\n\r\n # If no `miniters` was specified, adjust automatically\r\n # to the max iteration rate seen so far between 2 prints\r\n if dynamic_miniters:\r\n if maxinterval and delta_t >= maxinterval:\r\n # Adjust miniters to time interval by rule of 3\r\n if mininterval:\r\n # Set miniters to correspond to mininterval\r\n miniters = delta_it * mininterval / delta_t\r\n else:\r\n # Set miniters to correspond to maxinterval\r\n miniters = delta_it * maxinterval / delta_t\r\n elif smoothing:\r\n # EMA-weight miniters to converge\r\n # towards the timeframe of mininterval\r\n rate = delta_it\r\n if mininterval and delta_t:\r\n rate *= mininterval / delta_t\r\n miniters = self.ema(rate, miniters, smoothing)\r\n else:\r\n # Maximum nb of iterations between 2 prints\r\n miniters = max(miniters, delta_it)\r\n\r\n # Store old values for next call\r\n self.n = self.last_print_n = last_print_n = n\r\n self.last_print_t = last_print_t = cur_t\r\n self.miniters = miniters\r\n\r\n # Closing the progress bar.\r\n # Update some internal variables for close().\r\n self.last_print_n = last_print_n\r\n self.n = n\r\n self.miniters = miniters\r\n self.close()\r\n\r\n def update(self, n=1):\r\n # if not self.gui:\r\n # return super(tqdm_gui, self).close()\r\n if self.disable:\r\n return\r\n\r\n if n < 0:\r\n self.last_print_n += n # for auto-refresh logic to work\r\n self.n += n\r\n\r\n # check counter first to reduce calls to time()\r\n if self.n - self.last_print_n >= self.miniters:\r\n delta_t = self._time() - self.last_print_t\r\n if delta_t >= self.mininterval:\r\n cur_t = self._time()\r\n delta_it = self.n - self.last_print_n # >= n\r\n # elapsed = cur_t - self.start_t\r\n # EMA (not just overall average)\r\n if self.smoothing and delta_t and delta_it:\r\n rate = delta_t / delta_it\r\n self.avg_time = self.ema(\r\n rate, self.avg_time, self.smoothing)\r\n\r\n self.display()\r\n\r\n # If no `miniters` was specified, adjust automatically to the\r\n # maximum iteration rate seen so far between two prints.\r\n # e.g.: After running `tqdm.update(5)`, subsequent\r\n # calls to `tqdm.update()` will only cause an update after\r\n # at least 5 more iterations.\r\n if self.dynamic_miniters:\r\n if self.maxinterval and delta_t >= self.maxinterval:\r\n if self.mininterval:\r\n self.miniters = delta_it * self.mininterval \\\r\n / delta_t\r\n else:\r\n self.miniters = delta_it * self.maxinterval \\\r\n / delta_t\r\n elif self.smoothing:\r\n self.miniters = self.smoothing * delta_it * \\\r\n (self.mininterval / delta_t\r\n if self.mininterval and delta_t\r\n else 1) + \\\r\n (1 - self.smoothing) * self.miniters\r\n else:\r\n self.miniters = max(self.miniters, delta_it)\r\n\r\n # Store old values for next call\r\n self.last_print_n = self.n\r\n self.last_print_t = cur_t\r\n return True\r\n\r\n def close(self):\r\n # if not self.gui:\r\n # return super(tqdm_gui, self).close()\r\n if self.disable:\r\n return\r\n\r\n self.disable = True\r\n\r\n with self.get_lock():\r\n self._instances.remove(self)\r\n\r\n # Restore toolbars\r\n self.mpl.rcParams['toolbar'] = self.toolbar\r\n # Return to non-interactive mode\r\n if not self.wasion:\r\n self.plt.ioff()\r\n if not self.leave:\r\n self.plt.close(self.fig)\r\n\r\n def display(self):\r\n n = self.n\r\n cur_t = self._time()\r\n elapsed = cur_t - self.start_t\r\n delta_it = n - self.last_print_n\r\n delta_t = cur_t - self.last_print_t\r\n\r\n # Inline due to multiple calls\r\n total = self.total\r\n xdata = self.xdata\r\n ydata = self.ydata\r\n zdata = self.zdata\r\n ax = self.ax\r\n line1 = self.line1\r\n line2 = self.line2\r\n # instantaneous rate\r\n y = delta_it / delta_t\r\n # overall rate\r\n z = n / elapsed\r\n # update line data\r\n xdata.append(n * 100.0 / total if total else cur_t)\r\n ydata.append(y)\r\n zdata.append(z)\r\n\r\n # Discard old values\r\n # xmin, xmax = ax.get_xlim()\r\n # if (not total) and elapsed > xmin * 1.1:\r\n if (not total) and elapsed > 66:\r\n xdata.popleft()\r\n ydata.popleft()\r\n zdata.popleft()\r\n\r\n ymin, ymax = ax.get_ylim()\r\n if y > ymax or z > ymax:\r\n ymax = 1.1 * y\r\n ax.set_ylim(ymin, ymax)\r\n ax.figure.canvas.draw()\r\n\r\n if total:\r\n line1.set_data(xdata, ydata)\r\n line2.set_data(xdata, zdata)\r\n try:\r\n poly_lims = self.hspan.get_xy()\r\n except AttributeError:\r\n self.hspan = self.plt.axhspan(\r\n 0, 0.001, xmin=0, xmax=0, color='g')\r\n poly_lims = self.hspan.get_xy()\r\n poly_lims[0, 1] = ymin\r\n poly_lims[1, 1] = ymax\r\n poly_lims[2] = [n / total, ymax]\r\n poly_lims[3] = [poly_lims[2, 0], ymin]\r\n if len(poly_lims) > 4:\r\n poly_lims[4, 1] = ymin\r\n self.hspan.set_xy(poly_lims)\r\n else:\r\n t_ago = [cur_t - i for i in xdata]\r\n line1.set_data(t_ago, ydata)\r\n line2.set_data(t_ago, zdata)\r\n\r\n ax.set_title(self.format_meter(\r\n n, total, elapsed, 0,\r\n self.desc, self.ascii, self.unit, self.unit_scale,\r\n 1 / self.avg_time if self.avg_time else None, self.bar_format,\r\n self.postfix, self.unit_divisor),\r\n fontname=\"DejaVu Sans Mono\", fontsize=11)\r\n self.plt.pause(1e-9)\r\n\r\n\r\ndef tgrange(*args, **kwargs):\r\n \"\"\"\r\n A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`.\r\n On Python3+, `range` is used instead of `xrange`.\r\n \"\"\"\r\n return tqdm_gui(_range(*args), **kwargs)\r\n\r\n\r\n# Aliases\r\ntqdm = tqdm_gui\r\ntrange = tgrange\r\n" ]
[ [ "matplotlib.pyplot.ticklabel_format", "matplotlib.pyplot.subplots", "matplotlib.pyplot.isinteractive", "matplotlib.pyplot.ion", "matplotlib.pyplot.axhspan" ] ]
jungtaekkim/bayeso-benchmarks
[ "3650aaeeaa123da14f0f839da664b071ee17bf9a" ]
[ "tests/test_inf_dim_ackley.py" ]
[ "#\n# author: Jungtaek Kim ([email protected])\n# last updated: February 8, 2021\n#\n\nimport numpy as np\nimport pytest\n\nfrom bayeso_benchmarks.inf_dim_ackley import *\n\nclass_fun = Ackley\n\nTEST_EPSILON = 1e-5\n\n\ndef test_init():\n obj_fun = class_fun(2)\n\n with pytest.raises(TypeError) as error:\n class_fun()\n with pytest.raises(AssertionError) as error:\n class_fun('abc')\n with pytest.raises(AssertionError) as error:\n class_fun(2.1)\n with pytest.raises(AssertionError) as error:\n class_fun(2, seed='abc')\n with pytest.raises(AssertionError) as error:\n class_fun(2, seed=2.1)\n\ndef test_validate_properties():\n obj_fun = class_fun(5)\n obj_fun.validate_properties()\n\ndef test_output():\n obj_fun = class_fun(3)\n bounds = obj_fun.get_bounds()\n\n grids = obj_fun.sample_grids(3)\n truths_grids = np.array([\n [2.15703112e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [4.44089210e-16],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.02411230e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n [2.11187470e+01],\n [2.15703112e+01],\n ])\n \n print(grids)\n print(obj_fun.output(grids))\n print(np.abs(obj_fun.output(grids) - truths_grids) < TEST_EPSILON)\n assert np.all(np.abs(obj_fun.output(grids) - truths_grids) < TEST_EPSILON)\n" ]
[ [ "numpy.array" ] ]
tzole1155/moai
[ "d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180" ]
[ "moai/validation/single.py" ]
[ "import moai.utils.engine as mieng\n\nimport torch\nimport omegaconf.omegaconf\nimport typing\nimport logging\nimport inspect\nimport itertools\n\nlog = logging.getLogger(__name__)\n\n__all__ = ['Metric']\n\nclass Metric(mieng.Single):\n def __init__(self,\n metrics: omegaconf.DictConfig,\n **kwargs: typing.Mapping[str, typing.Any],\n ):\n super(Metric, self).__init__(\n items=metrics, \n name=\"metric\",\n )\n loop = ((key, params) for key, params in kwargs.items() if hasattr(indicators, key))\n for k, p in loop:\n last_module = self.metric\n sig = inspect.signature(last_module.forward)\n for keys in zip(*list(p[prop] for prop in itertools.chain(sig.parameters, ['out']))):\n self.execs.append(lambda tensor_dict, metric_dict, k=keys, p=sig.parameters.keys(), f=last_module:\n metric_dict.update({\n k[-1]: f(**dict(zip(p, \n list(tensor_dict[i] for i in k[:-1])\n )))\n })\n )\n\n def forward(self,\n tensors: typing.Dict[str, torch.Tensor]\n ) -> typing.Dict[str, torch.Tensor]:\n metrics = { } \n for exe in self.execs:\n exe(tensors, metrics)\n returned = { }\n for k, m in metrics.items():\n returned[k] = torch.mean(m) if len(m.size()) > 0 else m \n return returned" ]
[ [ "torch.mean" ] ]
shoshijak/NTPoly
[ "04ee94f743727775bbc97120325c57bf393932e9" ]
[ "UnitTests/test_matrix.py" ]
[ "\"\"\"\nA test suite for local matrices.\n\"\"\"\nimport unittest\nimport NTPolySwig as nt\n\nfrom scipy.io import mmwrite, mmread\n\n\nclass TestParameters:\n '''An internal class for holding test parameters.'''\n\n def __init__(self, rows, columns, sparsity):\n '''Default constructor\n @param[in] rows matrix rows.\n @param[in] columns matrix columns.\n @param[in] sparsity matrix sparsity.\n '''\n # Matrix rows.\n self.rows = rows\n # Matrix columns.\n self.columns = columns\n # Matrix sparsity.\n self.sparsity = sparsity\n\n def create_matrix(self, square=False, complex=False):\n '''\n Function to create a matrix for a given set of parameters.\n '''\n from scipy.sparse import random, csr_matrix\n\n r = self.rows\n c = self.columns\n s = self.sparsity\n if square:\n r = c\n if complex:\n mat = random(r, c, s, format=\"csr\")\n mat += 1j * random(r, c, s, format=\"csr\")\n else:\n mat = random(r, c, s, format=\"csr\")\n\n return csr_matrix(mat)\n\n\nclass TestLocalMatrix(unittest.TestCase):\n '''A test class for local matrices.'''\n from os import environ\n from os.path import join\n # Parameters for the matrices\n parameters = []\n # Location of the scratch directory.\n scratch_dir = environ['SCRATCHDIR']\n file1 = join(scratch_dir, \"matrix1.mtx\")\n file2 = join(scratch_dir, \"matrix2.mtx\")\n file3 = join(scratch_dir, \"matrix3.mtx\")\n SMatrix = nt.Matrix_lsr\n MatrixMemoryPool = nt.MatrixMemoryPool_r\n complex = False\n\n def _compare_mat(self, val1, val2):\n from helpers import THRESHOLD\n from scipy.sparse.linalg import norm\n\n normval = abs(norm(val1 - val2))\n self.assertLessEqual(normval, THRESHOLD)\n\n def _compare(self, val1, val2):\n from helpers import THRESHOLD\n from scipy.linalg import norm\n\n normval = abs(norm(val1 - val2))\n self.assertLessEqual(normval, THRESHOLD)\n\n def setUp(self):\n '''Set up a test.'''\n self.parameters = []\n self.parameters.append(TestParameters(2, 4, 0.0))\n self.parameters.append(TestParameters(8, 8, 0.0))\n self.parameters.append(TestParameters(2, 2, 1.0))\n self.parameters.append(TestParameters(4, 4, 1.0))\n self.parameters.append(TestParameters(19, 19, 1.0))\n self.parameters.append(TestParameters(4, 2, 1.0))\n self.parameters.append(TestParameters(2, 4, 1.0))\n self.parameters.append(TestParameters(4, 4, 0.2))\n self.parameters.append(TestParameters(8, 8, 1.0))\n\n def test_read(self):\n '''Test routines to read and write matrices.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n matrix2 = self.SMatrix(self.file1)\n matrix2.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n self._compare_mat(matrix1, ResultMat)\n\n def test_readcircular(self):\n '''Test routines to read a matrix produced by ntpoly.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n matrix2 = self.SMatrix(self.file1)\n matrix2.WriteToMatrixMarket(self.file2)\n matrix3 = self.SMatrix(self.file2)\n matrix3.WriteToMatrixMarket(self.file3)\n ResultMat = mmread(self.file3)\n\n self._compare_mat(matrix1, ResultMat)\n\n def test_readsymmetric(self):\n '''Test routines to read and write matrices.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex, square=True)\n matrix1 = matrix1 + matrix1.H\n mmwrite(self.file1, matrix1)\n matrix2 = self.SMatrix(self.file1)\n matrix2.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n\n self._compare_mat(matrix1, ResultMat)\n\n def test_addition(self):\n '''Test routines to add together matrices.'''\n from random import uniform\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n alpha = uniform(1.0, 2.0)\n CheckMat = alpha * matrix1 + matrix2\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(self.file2)\n matrix2.Increment(matrix1, alpha, 0.0)\n matrix2.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n\n self._compare_mat(CheckMat, ResultMat)\n\n def test_addzero(self):\n '''Test routines to add together a matrix and zero.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n\n CheckMat = matrix1\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(matrix1.GetColumns(), matrix1.GetRows())\n matrix2.Increment(matrix1, 1.0, 0.0)\n matrix2.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_addzeroreverse(self):\n '''Test routines to add together a matrix and zero.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n\n CheckMat = matrix1\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(matrix1.GetColumns(), matrix1.GetRows())\n matrix1.Increment(matrix2, 1.0, 0.0)\n matrix1.WriteToMatrixMarket(self.file2)\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_dot(self):\n '''Test routines to dot two matrices.'''\n from numpy import sum, multiply\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n check = sum(multiply(matrix1.todense(), matrix2.todense()))\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(self.file2)\n result = matrix2.Dot(matrix1)\n\n self._compare(result, check)\n\n def test_transpose(self):\n '''Test routines to transpose a matrix.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n\n matrix2 = self.SMatrix(self.file1)\n matrix2T = self.SMatrix(matrix2.GetRows(), matrix2.GetColumns())\n matrix2T.Transpose(matrix2)\n matrix2T.WriteToMatrixMarket(self.file2)\n\n CheckMat = matrix1.T\n ResultMat = mmread(self.file2)\n\n self._compare_mat(CheckMat, ResultMat)\n\n def test_pairwise(self):\n '''Test routines to pairwise multiply two matrices.'''\n from scipy.sparse import csr_matrix\n from numpy import multiply\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n CheckMat = csr_matrix(\n multiply(matrix1.todense(), matrix2.todense()))\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(self.file2)\n ntmatrix3 = self.SMatrix(\n ntmatrix1.GetColumns(), ntmatrix1.GetRows())\n ntmatrix3.PairwiseMultiply(ntmatrix1, ntmatrix2)\n ntmatrix3.WriteToMatrixMarket(self.file3)\n\n ResultMat = mmread(self.file3)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_multiply(self):\n '''Test routines to multiply two matrices.'''\n from random import uniform\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex).H\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n alpha = uniform(1.0, 2.0)\n beta = 0.0\n if abs(beta) > 0.0:\n CheckMat = alpha * matrix1.dot(matrix2) + beta * matrix1\n else:\n CheckMat = alpha * matrix1.dot(matrix2)\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(self.file2)\n ntmatrix3 = self.SMatrix(ntmatrix2.GetColumns(),\n ntmatrix1.GetRows())\n memory_pool = self.MatrixMemoryPool(ntmatrix2.GetColumns(),\n ntmatrix1.GetRows())\n ntmatrix3.Gemm(ntmatrix1, ntmatrix2, False, False, alpha, beta,\n 0.0, memory_pool)\n ntmatrix3.WriteToMatrixMarket(self.file3)\n\n ResultMat = mmread(self.file3)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_multiply_zero(self):\n '''Test routines to multiply two matrices where one is zero.'''\n from random import uniform\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = 0 * param.create_matrix(complex=self.complex).H\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n alpha = uniform(1.0, 2.0)\n beta = 0.0\n if abs(beta) > 0.0:\n CheckMat = alpha * matrix1.dot(matrix2) + beta * matrix1\n else:\n CheckMat = alpha * matrix1.dot(matrix2)\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(self.file2)\n ntmatrix3 = self.SMatrix(ntmatrix2.GetColumns(),\n ntmatrix1.GetRows())\n memory_pool = self.MatrixMemoryPool(ntmatrix2.GetColumns(),\n ntmatrix1.GetRows())\n ntmatrix3.Gemm(ntmatrix1, ntmatrix2, False, False, alpha, beta,\n 0.0, memory_pool)\n ntmatrix3.WriteToMatrixMarket(self.file3)\n\n ResultMat = mmread(self.file3)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_get_row(self):\n '''Test function that extracts a row from the matrix'''\n from random import randint\n for param in self.parameters:\n if param.rows == 0:\n continue\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n row_num = randint(0, param.rows - 1)\n CheckMat = matrix1[row_num, :]\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(ntmatrix1.GetColumns(), 1)\n ntmatrix1.ExtractRow(row_num, ntmatrix2)\n ntmatrix2.WriteToMatrixMarket(self.file2)\n\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_get_column(self):\n '''Test function that extracts a column from the matrix'''\n from random import randint\n for param in self.parameters:\n if param.columns == 0:\n continue\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n column_num = randint(0, param.columns - 1)\n CheckMat = matrix1[:, column_num]\n\n ntmatrix1 = self.SMatrix(self.file1)\n ntmatrix2 = self.SMatrix(1, ntmatrix1.GetRows())\n ntmatrix1.ExtractColumn(column_num, ntmatrix2)\n ntmatrix2.WriteToMatrixMarket(self.file2)\n\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n\nclass TestLocalMatrix_c(TestLocalMatrix):\n '''Specialization for complex matrices'''\n SMatrix = nt.Matrix_lsc\n MatrixMemoryPool = nt.MatrixMemoryPool_c\n complex = True\n\n def test_conjugatetranspose(self):\n '''Test routines to compute the conjugate transpose of a matrix.'''\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n\n matrix2 = self.SMatrix(self.file1)\n matrix2T = self.SMatrix(matrix2.GetRows(), matrix2.GetColumns())\n matrix2T.Transpose(matrix2)\n matrix2T.Conjugate()\n matrix2T.WriteToMatrixMarket(self.file2)\n\n CheckMat = matrix1.H\n ResultMat = mmread(self.file2)\n self._compare_mat(CheckMat, ResultMat)\n\n def test_dot(self):\n '''Test routines to dot two matrices.'''\n from numpy import sum, multiply, conj\n for param in self.parameters:\n matrix1 = param.create_matrix(complex=self.complex)\n matrix2 = param.create_matrix(complex=self.complex)\n mmwrite(self.file1, matrix1)\n mmwrite(self.file2, matrix2)\n check = sum(multiply(conj(matrix1.todense()), matrix2.todense()))\n matrix1 = self.SMatrix(self.file1)\n matrix2 = self.SMatrix(self.file2)\n result = matrix1.Dot(matrix2)\n\n self._compare(result, check)\n\n\n###############################################################################\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "scipy.sparse.random", "scipy.linalg.norm", "scipy.sparse.csr_matrix", "scipy.io.mmread", "scipy.io.mmwrite" ] ]
suryadheeshjith/Frustum-PointNet
[ "10e7b1c0ee8183c4791e67c44e7e2ba6c265486c" ]
[ "mayavi/test_drawline.py" ]
[ "import numpy\nfrom mayavi.mlab import *\n\ndef test_plot3d():\n \"\"\"Generates a pretty set of lines.\"\"\"\n n_mer, n_long = 6, 11\n pi = numpy.pi\n dphi = pi / 1000.0\n phi = numpy.arange(0.0, 2 * pi + 0.5 * dphi, dphi)\n mu = phi * n_mer\n x = numpy.cos(mu) * (1 + numpy.cos(n_long * mu / n_mer) * 0.5)\n y = numpy.sin(mu) * (1 + numpy.cos(n_long * mu / n_mer) * 0.5)\n z = numpy.sin(n_long * mu / n_mer) * 0.5\n\n l = plot3d(x, y, z, numpy.sin(mu), tube_radius=0.025, colormap='Spectral')\n return l\n\ntest_plot3d()\ninput()\n" ]
[ [ "numpy.arange", "numpy.sin", "numpy.cos" ] ]
m-mirz/proloaf
[ "4109665b2e6eb1dbdc37dae4a3c0afd2ca6af87f" ]
[ "source/fc_prep.py" ]
[ "# Copyright 2021 The ProLoaF Authors. All Rights Reserved.\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# ==============================================================================\n\"\"\"\nPreprocesses your input data for use with ProLoaF\n\nTransforms the data to a common format (pandas.DataFrame as csv) for all stations.\n\nNotes\n-----\n- This script can load xlsx or csv files.\n- If your data does not match the criteria, you can use a custom script that saves your\ndata as a pandas.DataFrame with datetimeindex to a csv file with a “;” as separator to\naccomplish the same thing.\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport sys\nimport json\nimport os\n\nMAIN_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(MAIN_PATH)\n\nfrom utils.config_util import read_config, parse_basic\n#Import customized functions below this point\n\nimport utils.datatuner as dt\n\ndef load_raw_data_xlsx(files):\n \"\"\"\n Load data from an xlsx file\n\n After loading, the date column in the raw data is converted to a UTC datetime\n\n Parameters\n ----------\n files : list\n A list of files to read. See the Notes section for more information\n\n Returns\n -------\n list\n A list containing a DataFrame for each file that was read\n\n Notes\n -----\n - Files is an array of maps containing the following data with the keyword (keyword)\n + ('file_name') the name of the xlsx file\n + ('date_column') the name of the date_column in the raw_data\n + ('time_zone') specifier for the timezone the raw data is recorded in\n + ('sheet_name') name or list of names of the sheets that are to be read\n + ('combine') boolean, all datasheets with true are combined into one, all others are read individually\n + ('start_column') Columns between this and ('end_column') are loaded\n + ('end_column')\n\n \"\"\"\n print('Importing XLSX Data...')\n\n combined_files = []\n individual_files = []\n\n for xlsx_file in files:\n print('importing ' + xlsx_file['file_name'])\n # if isinstance(file_name, str):\n # file_name = [file_name,'UTC']\n date_column = xlsx_file['date_column']\n raw_data = pd.read_excel(INPATH + xlsx_file['file_name'], xlsx_file['sheet_name'],\n parse_dates=[date_column])\n\n # convert load data to UTC\n if(xlsx_file['time_zone'] != 'UTC'):\n raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(xlsx_file['time_zone'], ambiguous=\"infer\").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')\n else:\n if (xlsx_file['dayfirst']):\n raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)\n else:\n raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)\n\n if(xlsx_file['data_abs']):\n raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']] = raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']].abs()\n # rename column IDs, specifically Time, this will be used later as the df index\n raw_data.rename(columns={date_column: 'Time'}, inplace=True)\n raw_data.head() # now the data is positive and set to UTC\n raw_data.info()\n # interpolating for missing entries created by asfreq and original missing values if any\n raw_data.interpolate(method='time', inplace=True)\n\n if(xlsx_file['combine']):\n combined_files.append(raw_data)\n else:\n individual_files.append(raw_data)\n if(len(combined_files) > 0):\n individual_files.append(pd.concat(combined_files))\n return individual_files\n\ndef load_raw_data_csv(files):\n \"\"\"\n Load data from a csv file\n\n After loading, the date column in the raw data is converted to a UTC datetime\n\n Parameters\n ----------\n files : list\n A list of files to read. See the Notes section for more information\n\n Returns\n -------\n list\n A list containing a DataFrame for each file that was read\n\n Notes\n -----\n - Files is an array of maps containing the following data with the keyword (keyword)\n + ('file_name') the name of the load_file\n + ('date_column') the name of the date_column in the raw_data\n + ('dayfirst') specifier for the formatting of the read time\n + ('sep') separator used in this file\n + ('combine') boolean, all datasheets with true are combined into one, all others are read individually\n + ('use_columns') list of columns that are loaded\n\n \"\"\"\n\n print('Importing CSV Data...')\n\n\n combined_files = []\n individual_files = []\n\n for csv_file in files:\n print('Importing ' + csv_file['file_name'] + ' ...')\n date_column = csv_file['date_column']\n raw_data = pd.read_csv(INPATH + csv_file['file_name'], sep=csv_file['sep'], usecols=csv_file['use_columns'], parse_dates=[date_column] , dayfirst=csv_file['dayfirst'])\n # pd.read_csv(INPATH + name, sep=sep, usecols=cols, parse_dates=[date_column] , dayfirst=dayfirst)\n if (csv_file['time_zone'] != 'UTC'):\n raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(csv_file['time_zone'], ambiguous=\"infer\").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')\n else:\n if (csv_file['dayfirst']):\n raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)\n else:\n raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)\n\n print('...Importing finished. ')\n raw_data.rename(columns={date_column: 'Time'}, inplace=True)\n\n if(csv_file['combine']):\n combined_files.append(raw_data)\n else:\n individual_files.append(raw_data)\n\n if(len(combined_files) > 0):\n individual_files.append(pd.concat(combined_files, sort = False))\n #for frame in individual_files:\n # frame.rename(columns={date_column: 'Time'}, inplace=True)\n return individual_files\n\ndef set_to_hours(df):\n \"\"\"\n Sets the index of the DataFrame to 'Time' and the frequency to hours.\n\n Parameters\n ----------\n df : pandas.DataFrame\n The DataFrame whose index and frequency are to be changed\n\n Returns\n -------\n df\n The modified DataFrame\n\n \"\"\"\n\n df['Time'] = pd.to_datetime(df['Time'])\n df = df.set_index('Time')\n df = df.asfreq(freq='H')\n return df\n\n\nif __name__ == '__main__':\n\n ARGS = parse_basic()\n config_file = os.path.join(MAIN_PATH, 'targets', ARGS.station, 'preprocessing.json')\n PAR = read_config(config_path=config_file)\n\n # DEFINES\n if PAR['local'] == True:\n INPATH = os.path.join(MAIN_PATH, PAR['raw_path'])\n else:\n INPATH = PAR['raw_path']\n if ('xlsx_files' in PAR):\n XLSX_FILES = PAR['xlsx_files']\n if ('csv_files' in PAR):\n CSV_FILES = PAR['csv_files']\n OUTFILE = os.path.join(MAIN_PATH, PAR['data_path'])\n\n # Prepare Load Data\n df_list = []\n if ('xlsx_files' in PAR):\n xlsx_data = load_raw_data_xlsx(XLSX_FILES)\n for data in xlsx_data:\n hourly_data = set_to_hours(df=data)\n dt.fill_if_missing(hourly_data)\n df_list.append(hourly_data)\n\n if ('csv_files' in PAR):\n csv_data = load_raw_data_csv(CSV_FILES)\n for data in csv_data:\n hourly_data = set_to_hours(df=data)\n dt.fill_if_missing(hourly_data)\n print(hourly_data)\n df_list.append(hourly_data)\n\n print(df_list)\n # When concatenating, the arrays are filled with NaNs if the index is not available.\n # Since the DataFrames were already interpolated there are non \"natural\" NaNs left so\n # dropping all rows with NaNs finds the maximum overlap in indices\n # # Merge load and weather data to one df\n df = pd.concat(df_list, axis = 1)\n\n df.dropna(inplace = True)\n\n if not df.index.equals(pd.date_range(min(df.index),max(df.index),freq = df.index.freq)):\n raise ValueError(\"DateTime index is not continuous\")\n if not df.isnull().values.any():\n print('No missing data \\n')\n df.head()\n\n ## http://blog.davidkaleko.com/feature-engineering-cyclical-features.html\n df['hour_sin'] = np.sin(df.index.hour * (2. * np.pi / 24))\n df['hour_cos'] = np.cos(df.index.hour * (2. * np.pi / 24))\n df['mnth_sin'] = np.sin((df.index.month - 1) * (2. * np.pi / 12))\n df['mnth_cos'] = np.cos((df.index.month - 1) * (2. * np.pi / 12))\n # fetch back the datetime again\n\n # add one-hot encoding for Hour & Month\n hours = pd.get_dummies(df.index.hour, prefix='hour').set_index(df.index) # one-hot encoding of hours\n month = pd.get_dummies(df.index.month, prefix='month').set_index(df.index) # one-hot encoding of month\n weekday = pd.get_dummies(df.index.dayofweek, prefix='weekday').set_index(df.index) # one-hot encoding of month\n df = pd.concat([df, hours, month, weekday], axis=1)\n\n # store new df as csv\n df.head()\n df.to_csv(OUTFILE, sep=';', index=True)\n" ]
[ [ "pandas.read_csv", "pandas.read_excel", "numpy.cos", "pandas.to_datetime", "pandas.concat", "numpy.sin", "pandas.get_dummies" ] ]
robot-perception-group/AutonomousBlimpDRL
[ "a10a88b2e9c9f9a83435cff2e4bc7e16e83cfeee" ]
[ "RL/rl/rllib_script/test_agent/test_agent.py" ]
[ "import os\nimport pickle\n\nimport numpy as np\nimport ray\nimport sys\nimport rl.rllib_script.agent.model.ray_model\nfrom blimp_env.envs import ResidualPlanarNavigateEnv\nfrom ray.rllib.agents import ppo\nfrom ray.tune.logger import pretty_print\n\ncheckpoint_path = os.path.expanduser(\n \"~/catkin_ws/src/AutonomousBlimpDRL/RL/rl/trained_model/PPO_ResidualPlanarNavigateEnv_9d24f_00000_0_2022-02-21_17-09-14/checkpoint_001080/checkpoint-1080\"\n)\n\nauto_start_simulation = True # start simulation\nduration = int(0.5 * 3600 * 10 * 7) + 24193600\n\nnum_workers = 7\n\nreal_experiment = True # no reset\nevaluation_mode = False # fix robotid, don't support multiworker\nonline_training = False # if training during test\n\n\nif float(sys.argv[1]) == 0.0:\n run_pid = True\nelif float(sys.argv[1]) == 1.0:\n run_pid = False\n\nwindspeed = 0.5 * float(sys.argv[2])\nbuoyancy = 0.93 + 0.07 * float(sys.argv[3])\n\nif float(sys.argv[4]) == 0.0:\n traj = \"square\"\nelif float(sys.argv[4]) == 1.0:\n traj = \"coil\"\n\n\ntrigger_dist = 7\ninit_alt = 100\n\n###########################################\n\nENV = ResidualPlanarNavigateEnv\n\nrun_base_dir = os.path.dirname(os.path.dirname(checkpoint_path))\nconfig_path = os.path.join(run_base_dir, \"params.pkl\")\nwith open(config_path, \"rb\") as f:\n config = pickle.load(f)\n\nif run_pid:\n beta = 0.0\n disable_servo = True\nelse:\n beta = 0.5\n disable_servo = False\n\n\nenv_config = config[\"env_config\"]\nenv_config.update(\n {\n \"DBG\": False,\n \"evaluation_mode\": evaluation_mode,\n \"real_experiment\": real_experiment,\n \"seed\": 123,\n \"duration\": duration,\n \"beta\": beta,\n \"success_threshhold\": trigger_dist, # [meters]\n }\n)\nenv_config[\"simulation\"].update(\n {\n \"gui\": False,\n \"auto_start_simulation\": auto_start_simulation,\n \"enable_meshes\": True,\n \"enable_wind\": True,\n \"enable_wind_sampling\": True,\n \"wind_speed\": windspeed,\n \"wind_direction\": (1, 0),\n \"enable_buoyancy_sampling\": True,\n \"buoyancy_range\": [buoyancy, buoyancy],\n \"position\": (0, 0, init_alt),\n }\n)\n\nobs_config = {\n \"noise_stdv\": 0.05,\n}\nif \"observation\" in env_config:\n env_config[\"observation\"].update(obs_config)\nelse:\n env_config[\"observation\"] = obs_config\n\nact_config = {\n \"act_noise_stdv\": 0.5,\n \"disable_servo\": disable_servo,\n}\nif \"action\" in env_config:\n env_config[\"action\"].update(act_config)\nelse:\n env_config[\"action\"] = act_config\n\n\ndef generate_coil(points, radius, speed=5):\n li = []\n nwp_layer = 8\n for i in range(points):\n x = radius * np.sin(i * 2 * np.pi / nwp_layer)\n y = radius * np.cos(i * 2 * np.pi / nwp_layer)\n wp = (x, y, -init_alt - 2 * i, speed)\n li.append(wp)\n return li\n\n\ncoil = generate_coil(8 * 2 - 1, 30)\nsquare = [\n (40, 40, -init_alt, 3),\n (40, -40, -init_alt, 3),\n (-40, -40, -init_alt, 3),\n (-40, 40, -init_alt, 3),\n]\n\nif traj == \"coil\":\n wp_list = coil\nelif traj == \"square\":\n wp_list = square\ntarget_config = {\n \"type\": \"MultiGoal\",\n \"target_name_space\": \"goal_\",\n \"trigger_dist\": trigger_dist,\n \"wp_list\": wp_list,\n \"enable_random_goal\": False,\n}\nif \"target\" in env_config:\n env_config[\"target\"].update(target_config)\nelse:\n env_config[\"target\"] = target_config\n\n\nif online_training:\n config.update(\n {\n \"create_env_on_driver\": False,\n \"num_workers\": num_workers,\n \"num_gpus\": 1,\n \"explore\": False,\n \"env_config\": env_config,\n \"horizon\": 400,\n \"rollout_fragment_length\": 400,\n \"train_batch_size\": 5600,\n \"sgd_minibatch_size\": 512,\n \"lr\": 5e-4,\n \"lr_schedule\": None,\n \"num_sgd_iter\": 16,\n }\n )\nelse:\n config.update(\n {\n \"create_env_on_driver\": False,\n \"num_workers\": num_workers,\n \"num_gpus\": 1,\n \"explore\": False,\n \"env_config\": env_config,\n \"horizon\": 400,\n \"rollout_fragment_length\": 400,\n \"train_batch_size\": 5600,\n \"sgd_minibatch_size\": 512,\n \"lr\": 0,\n \"lr_schedule\": None,\n \"num_sgd_iter\": 0,\n }\n )\n\nprint(config)\nray.shutdown()\nray.init()\nagent = ppo.PPOTrainer(config=config, env=ENV)\nagent.restore(checkpoint_path)\nfor _ in range(int(duration)):\n result = agent.train()\n print(pretty_print(result))\n if result[\"timesteps_total\"] >= duration:\n break\nprint(\"done\")\n" ]
[ [ "numpy.sin", "numpy.cos" ] ]
MilanSusa/Skin-Cancer-Detection-Inference-API
[ "f4a62982ee6dfb3e2d56bdfc65fcc885aab69935" ]
[ "app.py" ]
[ "import os\nimport shutil\n\nfrom flask import Flask, request, jsonify\nfrom werkzeug.utils import secure_filename\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.metrics import top_k_categorical_accuracy\nfrom keras_preprocessing.image import ImageDataGenerator\nfrom keras.applications.mobilenet import preprocess_input\n\nFOLDER_REL_DIR = 'static' + os.path.sep + 'images' + os.path.sep\nFOLDER_ABS_DIR = os.path.join(os.getcwd(), FOLDER_REL_DIR)\n\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = FOLDER_ABS_DIR\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\n\ndef top_3_accuracy(y_true, y_pred):\n return top_k_categorical_accuracy(y_true, y_pred, k=3)\n\n\ndef top_2_accuracy(y_true, y_pred):\n return top_k_categorical_accuracy(y_true, y_pred, k=2)\n\n\nMODEL = load_model('./pretrained_models/mobilenet.h5',\n custom_objects={\n 'top_2_accuracy': top_2_accuracy,\n 'top_3_accuracy': top_3_accuracy\n })\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef remove_image():\n for filename in os.listdir(FOLDER_ABS_DIR):\n file_path = os.path.join(FOLDER_ABS_DIR, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(f'Failed to delete {file_path}: {e}')\n\n\[email protected]('/api/v1/inference', methods=['POST'])\ndef perform_inference():\n file = request.files['file']\n\n if file and allowed_file(file.filename):\n remove_image()\n\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n test_img_gen = ImageDataGenerator(preprocessing_function=preprocess_input)\n test_data_gen = test_img_gen.flow_from_directory(directory='static',\n target_size=(224, 224),\n color_mode='rgb')\n\n preds = MODEL.predict_generator(generator=test_data_gen,\n steps=1)\n remove_image()\n data = {\n \"akiec\": str(preds[0][0]),\n \"bcc\": str(preds[0][1]),\n \"bkl\": str(preds[0][2]),\n \"df\": str(preds[0][3]),\n \"mel\": str(preds[0][4]),\n \"nv\": str(preds[0][5]),\n \"vasc\": str(preds[0][6])\n }\n\n return jsonify({\"data\": data})\n\n\nif __name__ == '__main__':\n app.run()\n" ]
[ [ "tensorflow.keras.metrics.top_k_categorical_accuracy", "tensorflow.keras.models.load_model" ] ]
alisure-fork/BASNet
[ "0cc349a3190d92a2fe991107f711abdcce3531ec" ]
[ "src/MyThink_MIC5_Decoder8.py" ]
[ "import os\nimport glob\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom skimage import io\nfrom alisuretool.Tools import Tools\nfrom torch.utils.data import DataLoader\nfrom src.MyTrain_MIC5_Decoder8 import BASNet, DatasetUSOD\n\n\ndef one_decoder():\n # --------- 1. get path ---------\n has_mask = True\n more_obj = False\n # model_dir = './saved_models/my_train_mic5_decoder8_aug_mask_norm_5bce_d2/120_train_3.043.pth'\n # prediction_dir = Tools.new_dir('./test_data/my_train_mic5_decoder8_aug_mask_norm_5bce_d2_120_image_decoder')\n model_dir = './saved_models/my_train_mic5_decoder8_aug_mask_norm_5bce_d3/115_train_3.046.pth'\n prediction_dir = Tools.new_dir('./test_data/my_train_mic5_decoder8_aug_mask_norm_5bce_d3_115_image_decoder')\n\n # --------- 2. data loader ---------\n image_dir = '/mnt/4T/Data/SOD/DUTS/DUTS-TR/DUTS-TR-Image/'\n img_name_list = glob.glob(image_dir + '*.jpg')\n test_dataset = DatasetUSOD(img_name_list=img_name_list, is_train=False)\n test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=8)\n\n # --------- 3. model define ---------\n Tools.print(\"...load BASNet...\")\n net = BASNet(3, clustering_num_list=[128, 256, 512], pretrained=False, has_mask=has_mask, more_obj=more_obj)\n if torch.cuda.is_available():\n net.cuda()\n net.load_state_dict(torch.load(model_dir), strict=False)\n\n # --------- 4. inference for each image ---------\n net.eval()\n for i_test, (inputs_test, _) in enumerate(test_dataloader):\n Tools.print(\"inference: {} {}\".format(i_test, img_name_list[i_test]))\n inputs_test = inputs_test.type(torch.FloatTensor).cuda()\n\n return_m, return_d = net(inputs_test)\n\n top_k_value, top_k_index = torch.topk(return_m[\"m1\"][\"smc_logits\"], 1, 1)\n smc_result = top_k_index.cpu().detach().numpy()[0][0]\n\n img_name = img_name_list[i_test]\n result_path = os.path.join(prediction_dir, str(smc_result))\n result_path = Tools.new_dir(result_path)\n\n # 1\n result_name = os.path.join(result_path, os.path.split(img_name)[1])\n im_data = io.imread(img_name)\n io.imsave(result_name, im_data)\n\n # 2\n cam1 = return_d[\"label\"][\"cam_norm_1_up\"].squeeze().cpu().data.numpy()\n cam2 = return_d[\"label\"][\"cam_norm_2_up\"].squeeze().cpu().data.numpy()\n cam3 = return_d[\"label\"][\"cam_norm_3_up\"].squeeze().cpu().data.numpy()\n\n im1 = Image.fromarray(cam1 * 255).convert('RGB')\n im2 = Image.fromarray(cam2 * 255).convert('RGB')\n im3 = Image.fromarray(cam3 * 255).convert('RGB')\n\n imo1 = im1.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n imo2 = im2.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n imo3 = im3.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n\n imo1.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], 1, smc_result)))\n imo2.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], 2, smc_result)))\n imo3.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], 3, smc_result)))\n\n # 3\n camf = return_d[\"label\"][\"cam_norm_up\"].squeeze().cpu().data.numpy()\n imf = Image.fromarray(camf * 255).convert('RGB')\n imof = imf.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n imof.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], \"f\", smc_result)))\n\n # 4\n label = return_d[\"label\"][\"label\"].squeeze().cpu().data.numpy()\n im_label = Image.fromarray((np.asarray(label, dtype=np.uint8) + 1) * 127).convert('RGB')\n imo_label = im_label.resize((im_data.shape[1], im_data.shape[0]), resample=Image.BILINEAR)\n imo_label.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], \"l\", smc_result)))\n\n # 5\n for key in [\"d1\", \"d2\", \"d3\"]:\n d_out_up_sigmoid = return_d[key][\"out_up_sigmoid\"].squeeze().cpu().data.numpy()\n im_d_out_up_sigmoid = Image.fromarray(d_out_up_sigmoid * 255).convert('RGB')\n imo_d_out_up_sigmoid = im_d_out_up_sigmoid.resize((im_data.shape[1], im_data.shape[0]),\n resample=Image.BILINEAR)\n imo_d_out_up_sigmoid.save(os.path.join(result_path, '{}_{}_{}.png'.format(\n os.path.splitext(os.path.basename(img_name))[0], key, smc_result)))\n pass\n\n pass\n\n pass\n\n\nif __name__ == '__main__':\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n one_decoder()\n pass\n\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "numpy.asarray", "torch.topk", "torch.cuda.is_available" ] ]
walkagain/name_generator
[ "e7b43c917b8a68563518e65b8d63a6c40fc2285d" ]
[ "name_generator_rnn.py" ]
[ "# -*- coding:utf-8 -*-\r\nfrom __future__ import print_function, unicode_literals, division\r\nfrom io import open\r\nimport glob\r\nimport os\r\nimport unicodedata\r\nimport string\r\nimport argparse\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport random\r\n\r\nimport time\r\nimport math\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.ticker as ticker\r\n\r\n\r\nall_letters = string.ascii_letters + \" .,;'-\"\r\nn_letters = len(all_letters) + 1 # plus EOS marker\r\n\r\ncategory_line = {}\r\nall_category = []\r\nn_categories = 0\r\ntrain_category = None\r\n\r\nsave_dir=\"data/save\"\r\n\r\ndef parse():\r\n parser = argparse.ArgumentParser(description=\"rnn model for name generator\")\r\n parser.add_argument('-it', '--iteration', type=int, default=100000, help=\"iterations of training\")\r\n parser.add_argument('-p', '--print_every', type=int, default=5000, help=\"print the training result every iterations\")\r\n parser.add_argument('-pl', '--plot_every', type=int, default=500, help=\"plotting the loss every iterations\")\r\n parser.add_argument('-s', '--save_every', type=int, default=5000, help=\"save model params every iterations\")\r\n parser.add_argument('-tr', '--train', action='store_true', help=\"Train the model with dataset\")\r\n parser.add_argument('-te', '--test', action='store_true', help=\"test the saved model\")\r\n parser.add_argument('-lm', '--load_model', help=\"load the saved model(e.g.model/name_generator_model_100000.tar)\")\r\n parser.add_argument('-fn', '--filename', help=\"dataset file for training (e.g.data/names/*.txt)\")\r\n parser.add_argument('-sl', '--single_letter', help=\"generate name with a letter, e.g. -sl A\")\r\n parser.add_argument('-ml', '--multi_letters', help=\"generate names with letters, e.g. -ml ACD\")\r\n parser.add_argument('-lr', '--learning_rate', type=float, default=0.0001, help=\"learning rate for training\")\r\n parser.add_argument('-c', '--category', type=str, choices=['Arabic', 'Chinese', 'Czech', 'Dutch', 'English',\r\n 'French', 'German', 'Greek', 'Irish', 'Italian',\r\n 'Japanese', 'Korean', 'Polish', 'Portuguese', 'Russian',\r\n 'Scottish', 'Spanish', 'Vietnamese'],\r\n help=\"language category to train or test\")\r\n\r\n args = parser.parse_args()\r\n return args\r\n\r\n\r\n# search specify file type\r\ndef findFiles(path):\r\n return glob.glob(path)\r\n\r\n# turn unicode string to ascii plain, thanks to https://stackoverflow.com/a/518232/2809427\r\ndef Unicode2Ascii(s):\r\n return \"\".join(\r\n c for c in unicodedata.normalize(\"NFD\", s)\r\n if c in all_letters\r\n and \"MN\" != unicodedata.category(c))\r\n\r\n# read line from file and split by '\\n'\r\ndef readLines(filePath):\r\n lines = open(filePath, encoding=\"utf-8\").read().strip().split('\\n')\r\n return [Unicode2Ascii(line) for line in lines]\r\n\r\n# create dataset from files\r\n\"\"\"\r\nargs: filename with regular expression like data/names/*.txt\r\n\"\"\"\r\ndef loadTrainingDataset(filenames):\r\n global category_line\r\n global all_category\r\n global n_categories\r\n for fileName in findFiles(filenames):\r\n category = os.path.splitext(os.path.basename(fileName))[0]\r\n all_category.append(category)\r\n lines = readLines(fileName)\r\n category_line[category] = lines\r\n\r\n n_categories = len(all_category)\r\n if n_categories == 0:\r\n raise RuntimeError('Data not found. Make sure that you downloaded data '\r\n 'from https://download.pytorch.org/tutorial/data.zip and extract it to '\r\n 'the current directory.')\r\n\r\n # print(all_category)\r\n return category_line, all_category, n_categories\r\n\r\nclass RNN(nn.Module):\r\n def __init__(self, input_size, hidden_size, output_size):\r\n super(RNN, self).__init__()\r\n self.hidden_size = hidden_size\r\n self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size)\r\n self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size)\r\n self.o2o = nn.Linear(output_size + hidden_size, output_size)\r\n self.dropout = nn.Dropout(0.1)\r\n\r\n self.softmax = nn.LogSoftmax(dim=1)\r\n\r\n def forward(self, categories, input, hidden):\r\n in_combined = torch.cat((categories, input, hidden), dim=1)\r\n hidden = self.i2h(in_combined)\r\n output = self.i2o(in_combined)\r\n\r\n out_combined = torch.cat((output, hidden), dim=1)\r\n output = self.o2o(out_combined)\r\n\r\n output = self.softmax(self.dropout(output))\r\n return output, hidden\r\n\r\n def InitHidden(self):\r\n return torch.zeros(1, self.hidden_size)\r\n\r\n\r\n# prepare data for training\r\n# choose a item from list randomly\r\ndef randomChoice(l):\r\n return l[random.randint(0, len(l) -1)]\r\n\r\n# choose training data pairs\r\ndef randomTrainingPairs(category=None):\r\n global train_category\r\n if category is None:\r\n category = randomChoice(all_category)\r\n train_category = category\r\n name = randomChoice(category_line[category])\r\n return category, name\r\n\r\n\r\n# one-hot vector for category\r\ndef CategoryTensor(category):\r\n tensor = torch.zeros(1, n_categories)\r\n idx = all_category.index(category)\r\n tensor[0][idx] = 1\r\n return tensor\r\n\r\n# one-hot matrix for input, ont include EOS\r\ndef InputTensor(line):\r\n tensor = torch.zeros(len(line), 1, n_letters)\r\n for idx in range(len(line)):\r\n letter = line[idx]\r\n tensor[idx][0][all_letters.find(letter)] = 1\r\n\r\n return tensor\r\n\r\n# longTensor for second letter to EOS\r\ndef TargetTensor(line):\r\n letter_indexes = [all_letters.find(line[idx]) for idx in range(1, len(line))]\r\n letter_indexes.append(n_letters - 1) # add index of EOS\r\n return torch.LongTensor(letter_indexes)\r\n\r\n# make category, input and target tensors from random category, line pairs\r\n\r\ndef randomTrainingSample(category=None):\r\n category, line = randomTrainingPairs(category)\r\n category_tensor = CategoryTensor(category)\r\n input_line_tensor = InputTensor(line)\r\n target_line_tensor = TargetTensor(line)\r\n\r\n return category_tensor, input_line_tensor, target_line_tensor\r\n\r\ndef train(category_tensor, input_line_tensor, target_line_tensor):\r\n target_line_tensor.unsqueeze_(-1)\r\n hidden = rnn.InitHidden()\r\n rnn.zero_grad()\r\n loss = 0\r\n\r\n for i in range(input_line_tensor.size(0)):\r\n output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)\r\n per_loss = criterion(output, target_line_tensor[i])\r\n\r\n loss += per_loss\r\n\r\n loss.backward()\r\n\r\n for p in rnn.parameters():\r\n p.data.add_(-lr, p.grad.data)\r\n\r\n return output, loss.item() / input_line_tensor.size(0)\r\n\r\ndef TimeCalulate(since):\r\n now = time.time()\r\n interval = now - since\r\n m = math.floor(interval/60)\r\n s = interval - 60 * m\r\n return \"%dm %ds\" %(m,s)\r\n\r\ndef runTrainingModel(n_iters=100000, print_every=5000, plot_every=500, save_every=5000, category=None, modelFile=None):\r\n all_losses = []\r\n total_loss = 0 # Reset every plot_every iters\r\n start = time.time()\r\n\r\n checkpoint = None\r\n start_iteration = 1\r\n if modelFile:\r\n checkpoint = torch.load(modelFile)\r\n rnn.load_state_dict(checkpoint[\"rnn\"])\r\n start_iteration = checkpoint[\"iteration\"]\r\n\r\n for iter in range(start_iteration, n_iters + 1):\r\n output, loss = train(*randomTrainingSample(category))\r\n total_loss += loss\r\n\r\n if iter % print_every == 0:\r\n print('%s (%d %d%%) %.4f' % (TimeCalulate(start), iter, iter / n_iters * 100, loss))\r\n\r\n if iter % plot_every == 0:\r\n all_losses.append((total_loss / plot_every) if (iter - start_iteration >= plot_every) else loss)\r\n total_loss = 0\r\n\r\n if iter % save_every == 0:\r\n directory = os.path.join(save_dir, 'model')\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n torch.save({\r\n 'iteration': iter,\r\n 'rnn': rnn.state_dict(),\r\n 'category': train_category,\r\n 'loss': loss\r\n }, os.path.join(directory, '{}_{}.tar'.format('name_generator_model', iter)))\r\n\r\n return all_losses\r\n\r\n# sample from a category and starting letter\r\ndef Sample(category, start_letter='A', modelFile=None, max_lenght = 20):\r\n if modelFile:\r\n checkpoint = torch.load(modelFile)\r\n rnn.load_state_dict(checkpoint[\"rnn\"])\r\n if category is None:\r\n category = checkpoint[\"category\"]\r\n\r\n hidden = rnn.InitHidden()\r\n category_tensor = CategoryTensor(category)\r\n input_tensor = InputTensor(start_letter)\r\n output_name = start_letter\r\n for i in range(max_lenght):\r\n output, hidden = rnn(category_tensor, input_tensor[0], hidden)\r\n topv, topi = output.topk(1)\r\n\r\n idx = topi[0][0]\r\n if idx == n_letters - 1: break\r\n else:\r\n letter = all_letters[idx]\r\n output_name += letter\r\n input_tensor = InputTensor(letter)\r\n return output_name\r\n\r\ndef Sampeles(category, start_letters=\"ABC\", modelFile=None):\r\n names = []\r\n for letter in start_letters:\r\n names.append(Sample(category, letter, modelFile))\r\n return names\r\n\r\ndef run(args):\r\n modelFile = None\r\n if args.load_model:\r\n modelFile = args.load_model\r\n\r\n category = None\r\n if args.category:\r\n category = args.category\r\n if args.test:\r\n if modelFile is None:\r\n raise RuntimeError('Please choose a saved model to load')\r\n\r\n if args.single_letter:\r\n start_letter = args.single_letter\r\n print(Sample(category, start_letter, modelFile))\r\n elif args.multi_letters:\r\n print(Sampeles(category, args.multi_letters, modelFile))\r\n\r\n else:\r\n raise RuntimeError(\"please specify evaluate mode\")\r\n\r\n elif args.train:\r\n runTrainingModel(category=category, modelFile=modelFile)\r\n\r\n else:\r\n raise RuntimeError(\"please specify running mode[test/train]\")\r\n\r\nif __name__==\"__main__\":\r\n\r\n args = parse()\r\n filename = \"data/names/*.txt\"\r\n if args.filename:\r\n filename = args.filename\r\n loadTrainingDataset(filename)\r\n\r\n criterion = nn.NLLLoss()\r\n lr = 0.0001\r\n if args.learning_rate:\r\n lr = args.learning_rate\r\n\r\n rnn = RNN(n_letters, 128, n_letters)\r\n run(args)" ]
[ [ "torch.nn.NLLLoss", "torch.nn.Linear", "torch.load", "torch.nn.LogSoftmax", "torch.zeros", "torch.LongTensor", "torch.cat", "torch.nn.Dropout" ] ]
dan1keen/dissertation_counter
[ "1265ee9563d349849c9a68d204e0f427e33f0f48" ]
[ "kalman_tracker/main.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport glob\n#from moviepy.editor import VideoFileClip\nfrom collections import deque\nfrom sklearn.utils.linear_assignment_ import linear_assignment\n\nfrom kalman_tracker import helpers\nfrom kalman_tracker import detector\nfrom kalman_tracker import tracker\nimport cv2\n\n# Global variables to be used by funcitons of VideoFileClop\nframe_count = 0 # frame counter\n\nmax_age = 15 # no.of consecutive unmatched detection before\n # a track is deleted\n\nmin_hits =1 # no. of consecutive matches needed to establish a track\n\ntracker_list =[] # list for trackers\n# list for track ID\ntrack_id_list= deque(['1', '2', '3', '4', '5', '6', '7', '7', '8', '9', '10'])\n\ndef assign_detections_to_trackers(trackers, detections, iou_thrd = 0.3):\n '''\n From current list of trackers and new detections, output matched detections,\n unmatchted trackers, unmatched detections.\n '''\n\n IOU_mat = np.zeros((len(trackers), len(detections)), dtype=np.float32)\n for t, trk in enumerate(trackers):\n # trk = convert_to_cv2bbox(trk)\n for d, det in enumerate(detections):\n # det = convert_to_cv2bbox(det)\n IOU_mat[t, d] = helpers.box_iou2(trk, det)\n\n # Produces matches\n # Solve the maximizing the sum of IOU assignment problem using the\n # Hungarian algorithm (also known as Munkres algorithm)\n\n matched_idx = linear_assignment(-IOU_mat)\n\n unmatched_trackers, unmatched_detections = [], []\n for t, trk in enumerate(trackers):\n if (t not in matched_idx[:, 0]):\n unmatched_trackers.append(t)\n\n for d, det in enumerate(detections):\n if (d not in matched_idx[:, 1]):\n unmatched_detections.append(d)\n\n matches = []\n # For creating trackers we consider any detection with an\n # overlap less than iou_thrd to signifiy the existence of\n # an untracked object\n\n for m in matched_idx:\n if (IOU_mat[m[0], m[1]] < iou_thrd):\n unmatched_trackers.append(m[0])\n unmatched_detections.append(m[1])\n else:\n matches.append(m.reshape(1, 2))\n\n if (len(matches) == 0):\n matches = np.empty((0, 2), dtype=int)\n else:\n matches = np.concatenate(matches, axis=0)\n\n return matches, np.array(unmatched_detections), np.array(unmatched_trackers)\n\ndef pipeline(img, det):\n '''\n Pipeline function for detection and tracking\n '''\n global frame_count\n global tracker_list\n global max_age\n global min_hits\n global track_id_list\n\n frame_count+=1\n\n img_dim = (img.shape[1], img.shape[0])\n z_box = det.get_localization(img) # measurement\n x_box = []\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n if len(tracker_list) > 0:\n for trk in tracker_list:\n x_box.append(trk.box)\n\n matched, unmatched_dets, unmatched_trks \\\n = assign_detections_to_trackers(x_box, z_box, iou_thrd = 0.3)\n\n # Deal with matched detections\n if matched.size > 0:\n for trk_idx, det_idx in matched:\n z = z_box[det_idx]\n z = np.expand_dims(z, axis=0).T\n tmp_trk = tracker_list[trk_idx]\n tmp_trk.kalman_filter(z)\n xx = tmp_trk.x_state.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n x_box[trk_idx] = xx\n tmp_trk.box = xx\n tmp_trk.hits += 1\n\n # Deal with unmatched detections\n if len(unmatched_dets) > 0:\n for idx in unmatched_dets:\n z = z_box[idx]\n z = np.expand_dims(z, axis=0).T\n tmp_trk = tracker.Tracker() # Create a new tracker\n x = np.array([[z[0], 0, z[1], 0, z[2], 0, z[3], 0]]).T\n tmp_trk.x_state = x\n tmp_trk.predict_only()\n xx = tmp_trk.x_state\n xx = xx.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n tmp_trk.box = xx\n tmp_trk.id = track_id_list.popleft() # assign an ID for the tracker\n print(tmp_trk.id, 'wasd')\n tracker_list.append(tmp_trk)\n x_box.append(xx)\n\n # Deal with unmatched tracks\n if len(unmatched_trks) > 0:\n for trk_idx in unmatched_trks:\n tmp_trk = tracker_list[trk_idx]\n tmp_trk.no_losses += 1\n tmp_trk.predict_only()\n xx = tmp_trk.x_state\n xx = xx.T[0].tolist()\n xx = [xx[0], xx[2], xx[4], xx[6]]\n tmp_trk.box = xx\n x_box[trk_idx] = xx\n\n # The list of tracks to be annotated\n good_tracker_list = []\n for trk in tracker_list:\n if ((trk.hits >= min_hits) and (trk.no_losses <= max_age)):\n good_tracker_list.append(trk)\n x_cv2 = trk.box\n img = helpers.draw_box_label(trk.id, img, x_cv2) # Draw the bounding boxes on the images\n tracker_coordinate = (x_cv2[0] + x_cv2[2]) / 2\n # if (tracker_coordinate >= roi):\n # counter.append(trk)\n # Book keeping\n deleted_tracks = filter(lambda x: x.no_losses > max_age, tracker_list)\n\n for trk in deleted_tracks:\n track_id_list.append(trk.id)\n\n tracker_list = [x for x in tracker_list if x.no_losses <= max_age]\n # cv2.line(img, (roi, 0), (roi, height), (0, 0, 0xFF), 5)\n # cv2.line(img, (0, roi), (width, roi), (0, 0, 0xFF), 5)\n\n cv2.putText(img,\n 'Detected Pedestrians: ' + str(len(good_tracker_list)),\n (10, 35),\n font,\n 0.8,\n (0, 0xFF, 0xFF),\n 2,\n cv2.LINE_4)\n\n # cv2.putText(\n # img,\n # 'ROI Line',\n # (545, roi - 10),\n # font,\n # 0.6,\n # (0, 0, 0xFF),\n # 2,\n # cv2.LINE_AA,\n # )\n\n cv2.imshow(\"frame\", img)\n return img\n\nif __name__ == \"__main__\":\n det = detector.PersonDetector()\n cap = cv2.VideoCapture('inputs/example_01.mp4')\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('output.avi', fourcc, 8.0, (640, 480))\n roi = 200\n counter = []\n\n if cap.isOpened():\n # get cap property\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n while (True):\n\n ret, img = cap.read()\n # print(img)\n\n np.asarray(img)\n font = cv2.FONT_HERSHEY_SIMPLEX\n # trackers_count = pipeline(img)[1]\n\n new_img = pipeline(img)\n out.write(new_img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n" ]
[ [ "numpy.empty", "numpy.asarray", "numpy.expand_dims", "sklearn.utils.linear_assignment_.linear_assignment", "numpy.array", "numpy.concatenate" ] ]
s10singh97/GSQuantify2018
[ "a18df022414659cafdbc010df31db5a4f957a1d6" ]
[ "1.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('train.csv')\nX = dataset.iloc[:, 1:4].values\ny = dataset.iloc[:, 0].values\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 2] = labelencoder_X_1.fit_transform(X[:, 2])\nonehotencoder = OneHotEncoder(categorical_features = [2])\nX = onehotencoder.fit_transform(X).toarray()\nX = X[:, 1:]\n\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X, y)\n\ndataset_test = pd.read_csv('private_test_x.csv')\nX_test = dataset_test.iloc[:, 1:4].values\n\nX_test[:, 2] = labelencoder_X_1.transform(X_test[:, 2])\nX_test = onehotencoder.transform(X_test).toarray()\nX_test = X_test[:, 1:]\n\ny_expected = dataset_test.iloc[:, 0].values\ny_pred = regressor.predict(X_test)\ny_pred = y_pred.astype(np.int64)\n\nxx = dataset_test.iloc[:, 1:4].values\nxx = xx.tolist()\n#xx[:, 0:2] = xx[:, 0:2].astype(np.int64)\noutput = np.column_stack((y_pred, xx))\nheaders = [\"Usage\", \"Timestep\", \"InventoryCode\", \"Domain\"]\nop = np.row_stack((headers, output))\ndf = pd.DataFrame(op)\ndf.to_csv(\"privatetest.csv\", index = False, header = None)\n\nfinal = pd.read_csv(\"privatetest.csv\")" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "sklearn.tree.DecisionTreeRegressor", "numpy.column_stack", "numpy.row_stack", "sklearn.preprocessing.LabelEncoder", "sklearn.preprocessing.OneHotEncoder" ] ]
nicehiro/multiagent-particle-envs
[ "9028a9f73306b4044d352dd46356ed451ca82c7b" ]
[ "multiagent/environment.py" ]
[ "import gym\nfrom gym import spaces\nfrom gym.envs.registration import EnvSpec\nimport numpy as np\nfrom gym.spaces import MultiDiscrete\n\n\nclass MultiAgentEnv(gym.Env):\n \"\"\"Environment for all agents in the multiagent world.\n currently code assumes that no agents will be created/destroyed at runtime!\n \"\"\"\n metadata = {\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, world, reset_callback=None, reward_callback=None,\n observation_callback=None, info_callback=None,\n done_callback=None, shared_viewer=True):\n\n self.world = world\n self.agents = self.world.policy_agents\n # set required vectorized gym env property\n self.n = len(world.policy_agents)\n # scenario callbacks\n self.reset_callback = reset_callback\n self.reward_callback = reward_callback\n self.observation_callback = observation_callback\n self.info_callback = info_callback\n self.done_callback = done_callback\n # environment parameters\n self.discrete_action_space = True\n # if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector\n self.discrete_action_input = False\n # if true, even the action is continuous, action will be performed discretely\n self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False\n # if true, every agent has the same reward\n self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False\n self.time = 0\n\n # configure spaces\n self.action_space = []\n self.observation_space = []\n for agent in self.agents:\n total_action_space = []\n # physical action space\n if self.discrete_action_space:\n u_action_space = spaces.Discrete(world.dim_p * 2 + 1)\n else:\n u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)\n if agent.movable:\n total_action_space.append(u_action_space)\n # communication action space\n if self.discrete_action_space:\n c_action_space = spaces.Discrete(world.dim_c)\n else:\n c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)\n if not agent.silent:\n total_action_space.append(c_action_space)\n # total action space\n if len(total_action_space) > 1:\n # all action spaces are discrete, so simplify to MultiDiscrete action space\n if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):\n act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])\n else:\n act_space = spaces.Tuple(total_action_space)\n self.action_space.append(act_space)\n else:\n self.action_space.append(total_action_space[0])\n # observation space\n obs_dim = len(observation_callback(agent, self.world))\n self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))\n agent.action.c = np.zeros(self.world.dim_c)\n\n # rendering\n self.shared_viewer = shared_viewer\n if self.shared_viewer:\n self.viewers = [None]\n else:\n self.viewers = [None] * self.n\n self._reset_render()\n\n def step(self, action_n):\n \"\"\"Make a step for every movable agent.\n \"\"\"\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n self.agents = self.world.policy_agents\n # set action for each agent\n for i, agent in enumerate(self.agents):\n self._set_action(action_n[i], agent, self.action_space[i])\n # advance world state\n self.world.step()\n # record observation for each agent\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n reward_n.append(self._get_reward(agent))\n done_n.append(self._get_done(agent))\n\n info_n['n'].append(self._get_info(agent))\n\n # all agents get total reward in cooperative case\n reward = np.sum(reward_n)\n if self.shared_reward:\n reward_n = [reward] * self.n\n\n return obs_n, reward_n, done_n, info_n\n\n def reset(self):\n \"\"\"Reset environment and agents.\n \"\"\"\n # reset world\n self.reset_callback(self.world)\n # reset renderer\n self._reset_render()\n # record observations for each agent\n obs_n = []\n self.agents = self.world.policy_agents\n for agent in self.agents:\n obs_n.append(self._get_obs(agent))\n return obs_n\n\n def _get_info(self, agent):\n \"\"\"Get info used for benchmarking.\n \"\"\"\n if self.info_callback is None:\n return {}\n return self.info_callback(agent, self.world)\n\n def _get_obs(self, agent):\n \"\"\"Get observation for a particular agent.\n \"\"\"\n if self.observation_callback is None:\n return np.zeros(0)\n return self.observation_callback(agent, self.world)\n\n def _get_done(self, agent):\n \"\"\"Get dones for a particular agent.\n TODO: Unused right now -- agents are allowed to go beyond the viewing screen.\n \"\"\"\n if self.done_callback is None:\n return False\n return self.done_callback(agent, self.world)\n\n def _get_reward(self, agent):\n \"\"\"Get reward for a particular agent.\n \"\"\"\n if self.reward_callback is None:\n return 0.0\n return self.reward_callback(agent, self.world)\n\n def _set_action(self, action, agent, action_space, time=None):\n \"\"\"Set env action for a particular agent.\n \"\"\"\n agent.action.u = np.zeros(self.world.dim_p)\n agent.action.c = np.zeros(self.world.dim_c)\n # process action\n if isinstance(action_space, MultiDiscrete):\n act = []\n size = action_space.high - action_space.low + 1\n index = 0\n for s in size:\n act.append(action[index:(index+s)])\n index += s\n action = act\n else:\n action = [action]\n\n if agent.movable:\n # physical action\n if self.discrete_action_input:\n agent.action.u = np.zeros(self.world.dim_p)\n # process discrete action\n if action[0] == 1: agent.action.u[0] = -1.0\n if action[0] == 2: agent.action.u[0] = +1.0\n if action[0] == 3: agent.action.u[1] = -1.0\n if action[0] == 4: agent.action.u[1] = +1.0\n else:\n if self.force_discrete_action:\n d = np.argmax(action[0])\n action[0][:] = 0.0\n action[0][d] = 1.0\n if self.discrete_action_space:\n agent.action.u[0] += action[0][1] - action[0][2]\n agent.action.u[1] += action[0][3] - action[0][4]\n else:\n agent.action.u = action[0]\n sensitivity = 5.0\n if agent.accel is not None:\n sensitivity = agent.accel\n agent.action.u *= sensitivity\n action = action[1:]\n if not agent.silent:\n # communication action\n if self.discrete_action_input:\n agent.action.c = np.zeros(self.world.dim_c)\n agent.action.c[action[0]] = 1.0\n else:\n agent.action.c = action[0]\n action = action[1:]\n # make sure we used all elements of action\n assert len(action) == 0\n\n # reset rendering assets\n def _reset_render(self):\n self.render_geoms = None\n self.render_geoms_xform = None\n\n # render environment\n def render(self, mode='human'):\n if mode == 'human':\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n message = ''\n for agent in self.world.agents:\n comm = []\n for other in self.world.agents:\n if other is agent: continue\n if np.all(other.state.c == 0):\n word = '_'\n else:\n word = alphabet[np.argmax(other.state.c)]\n message += (other.name + ' to ' + agent.name + ': ' + word + ' ')\n print(message)\n\n for i in range(len(self.viewers)):\n # create viewers (if necessary)\n if self.viewers[i] is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from multiagent import rendering\n self.viewers[i] = rendering.Viewer(700,700)\n\n # create rendering geometry\n if self.render_geoms is None:\n # import rendering only if we need it (and don't import for headless machines)\n #from gym.envs.classic_control import rendering\n from multiagent import rendering\n self.render_geoms = []\n self.render_geoms_xform = []\n for entity in self.world.entities:\n geom = rendering.make_circle(entity.size)\n xform = rendering.Transform()\n if 'agent' in entity.name:\n geom.set_color(*entity.color, alpha=0.5)\n else:\n geom.set_color(*entity.color)\n geom.add_attr(xform)\n self.render_geoms.append(geom)\n self.render_geoms_xform.append(xform)\n\n # add geoms to viewer\n for viewer in self.viewers:\n viewer.geoms = []\n for geom in self.render_geoms:\n viewer.add_geom(geom)\n\n results = []\n for i in range(len(self.viewers)):\n from multiagent import rendering\n # update bounds to center around agent\n cam_range = 1\n if self.shared_viewer:\n pos = np.zeros(self.world.dim_p)\n else:\n pos = self.agents[i].state.p_pos\n self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)\n # update geometry positions\n for e, entity in enumerate(self.world.entities):\n self.render_geoms_xform[e].set_translation(*entity.state.p_pos)\n # render to display or array\n results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))\n\n return results\n\n def _make_receptor_locations(self, agent):\n \"\"\"Create receptor field locations in local coordinate frame.\n \"\"\"\n receptor_type = 'polar'\n range_min = 0.05 * 2.0\n range_max = 1.00\n dx = []\n # circular receptive field\n if receptor_type == 'polar':\n for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):\n for distance in np.linspace(range_min, range_max, 3):\n dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))\n # add origin\n dx.append(np.array([0.0, 0.0]))\n # grid receptive field\n if receptor_type == 'grid':\n for x in np.linspace(-range_max, +range_max, 5):\n for y in np.linspace(-range_max, +range_max, 5):\n dx.append(np.array([x,y]))\n return dx\n\n\nclass BatchMultiAgentEnv(gym.Env):\n \"\"\"Vectorized wrapper for a batch of multi-agent environments.\n assumes all environments have the same observation and action space\n \"\"\"\n metadata = {\n 'runtime.vectorized': True,\n 'render.modes' : ['human', 'rgb_array']\n }\n\n def __init__(self, env_batch):\n self.env_batch = env_batch\n\n @property\n def n(self):\n return np.sum([env.n for env in self.env_batch])\n\n @property\n def action_space(self):\n return self.env_batch[0].action_space\n\n @property\n def observation_space(self):\n return self.env_batch[0].observation_space\n\n def step(self, action_n, time):\n obs_n = []\n reward_n = []\n done_n = []\n info_n = {'n': []}\n i = 0\n for env in self.env_batch:\n obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)\n i += env.n\n obs_n += obs\n # reward = [r / len(self.env_batch) for r in reward]\n reward_n += reward\n done_n += done\n return obs_n, reward_n, done_n, info_n\n\n def reset(self):\n obs_n = []\n for env in self.env_batch:\n obs_n += env.reset()\n return obs_n\n\n # render environment\n def render(self, mode='human', close=True):\n results_n = []\n for env in self.env_batch:\n results_n += env.render(mode, close)\n return results_n\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.cos", "numpy.argmax", "numpy.all", "numpy.array", "numpy.sin", "numpy.linspace" ] ]
artanzand/neural_style_transfer
[ "134ff775a706e1c08d836b43e11986b6f2d00543" ]
[ "stylize.py" ]
[ "# author: Artan Zandian\r\n# date: 2022-01-22\r\n\r\n\"\"\"\r\nReads two source images, one as the initial content image and second as the target style image,\r\nand applies Neural Style Transfer on the content image to create a stylized rendering of the content\r\nimage based on the texture and style of the style image.\r\nUsage: python stylize.py --content <content image> --style <style image> --save <save directory> --similarity <direction> --epochs <num_iter>\r\nOptions:\r\n--content=<image_path> file path of the content image - initial \r\n--style=<csv_path> file path of the style image - target\r\n--save=<save_path> file path to save the stylized image without image format\r\n--similarity=<direction> Whether the generated image is similar to \"content\", \"style\", \"balanced\"\r\n--epochs=<num_iter> number of epochs - 2,000 for speed, 10,000 for quality\r\n\"\"\"\r\n\r\n\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\n\r\nfrom docopt import docopt\r\n\r\nopt = docopt(__doc__)\r\n\r\n\r\ndef main(content, style, save, similarity=\"balanced\", epochs=500):\r\n \"\"\"\r\n The main function reads two source images, one as the initial content image\r\n and second as the target style image, and applies Neural Style Transfer on\r\n the content image to create a stylized rendering of the content image based on\r\n the texture and style of the style image.\r\n Parameters\r\n ----------\r\n content: str\r\n The image path to the content image to start from\r\n style: str\r\n The image path to the target style image\r\n save: str\r\n The path to save the image without image type\r\n similarity: str, optional\r\n whether the generate image is similar to 'content', 'style' or 'balanced'\r\n epochs: int, optional\r\n number of iterations to train the generate image.\r\n Returns\r\n -------\r\n image\r\n saved stylized image\r\n \"\"\"\r\n # Exception handelings\r\n try:\r\n type(int(epochs)) == int\r\n except Exception:\r\n raise (\"epochs should be an integer value!\")\r\n\r\n try:\r\n # Limit the image size to increase performance\r\n image_size = 400\r\n\r\n # capture content image size to reshape at end\r\n content_image = Image.open(content)\r\n content_width, content_height = content_image.size\r\n\r\n # Load pretrained VGG19 model\r\n vgg = tf.keras.applications.VGG19(\r\n include_top=False,\r\n input_shape=(image_size, image_size, 3),\r\n weights=\"imagenet\",\r\n )\r\n # Lock in the model weights\r\n vgg.trainable = False\r\n\r\n # Load Content and Style images\r\n content_image = preprocess_image(content, image_size)\r\n style_image = preprocess_image(style, image_size)\r\n\r\n # Randomly initialize Generated image\r\n # Define the generated image as as tensorflow variable to optimize\r\n generated_image = tf.Variable(\r\n tf.image.convert_image_dtype(content_image, tf.float32)\r\n )\r\n # Add random noise to initial generated image\r\n noise = tf.random.uniform(tf.shape(generated_image), -0.25, 0.25)\r\n generated_image = tf.add(generated_image, noise)\r\n generated_image = tf.clip_by_value(\r\n generated_image, clip_value_min=0.0, clip_value_max=1.0\r\n )\r\n\r\n # Define output layers\r\n style_layers = get_style_layers(similarity=similarity)\r\n content_layer = [(\"block5_conv4\", 1)] # The last layer of VGG19\r\n\r\n vgg_model_outputs = get_layer_outputs(vgg, style_layers + content_layer)\r\n\r\n # Content encoder\r\n # Define activation encoding for the content image (a_C)\r\n # Assign content image as the input of VGG19\r\n preprocessed_content = tf.Variable(\r\n tf.image.convert_image_dtype(content_image, tf.float32)\r\n )\r\n a_C = vgg_model_outputs(preprocessed_content)\r\n\r\n # Style encoder\r\n # Define activation encoding for the style image (a_S)\r\n # Assign style image as the input of VGG19\r\n preprocessed_style = tf.Variable(\r\n tf.image.convert_image_dtype(style_image, tf.float32)\r\n )\r\n a_S = vgg_model_outputs(preprocessed_style)\r\n\r\n # Initialize the optimizer\r\n optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\r\n # Need to redefine the clipped image as a tf.variable to be optimized\r\n generated_image = tf.Variable(generated_image)\r\n\r\n # Check if GPU is available\r\n print(\"Num GPUs Available: \", len(tf.config.list_physical_devices(\"GPU\")))\r\n\r\n # Train the model\r\n epochs = int(epochs)\r\n for i in range(epochs):\r\n train_step(\r\n generated_image, vgg_model_outputs, style_layers, optimizer, a_C, a_S\r\n )\r\n if i % 500 == 0:\r\n print(f\"Epoch {i} >>>\")\r\n\r\n # Resize to original size and save\r\n image = tensor_to_image(generated_image)\r\n image = image.resize((content_width, content_height))\r\n image.save(save + \".jpg\")\r\n print(\"Image saved.\")\r\n\r\n except Exception as message:\r\n print(message)\r\n\r\n\r\ndef get_layer_outputs(vgg, layer_names):\r\n \"\"\"\r\n Creates a vgg model that returns a list of intermediate output values.\r\n \"\"\"\r\n outputs = [vgg.get_layer(layer[0]).output for layer in layer_names]\r\n\r\n model = tf.keras.Model([vgg.input], outputs)\r\n return model\r\n\r\n\r\ndef get_style_layers(similarity=\"balanced\"):\r\n \"\"\"\r\n Assigns weights to style layer outputs to define whether the generated image\r\n is similar to \"content\", \"style\", or \"balanced\". The function is picking the\r\n last convolutional layer in each of the five blocks of the VGG network. The\r\n activations of each of these layers along with the content layer (last layer)\r\n will be the outputs of the neural style transfer network.\r\n Parameters\r\n ----------\r\n similarity: str, optional\r\n a string identifying the similarity to either content, style or both\r\n Returns\r\n -------\r\n style_layers\r\n a list of tuples identifying the name of style layer along with their weights\r\n \"\"\"\r\n if similarity == \"balanced\":\r\n style_layers = [\r\n (\"block1_conv1\", 0.2),\r\n (\"block2_conv1\", 0.2),\r\n (\"block3_conv1\", 0.2),\r\n (\"block4_conv1\", 0.2),\r\n (\"block5_conv1\", 0.2),\r\n ]\r\n elif similarity == \"content\":\r\n style_layers = [\r\n (\"block1_conv1\", 0.02),\r\n (\"block2_conv1\", 0.08),\r\n (\"block3_conv1\", 0.2),\r\n (\"block4_conv1\", 0.3),\r\n (\"block5_conv1\", 0.4),\r\n ]\r\n elif similarity == \"style\":\r\n style_layers = [\r\n (\"block1_conv1\", 0.4),\r\n (\"block2_conv1\", 0.3),\r\n (\"block3_conv1\", 0.2),\r\n (\"block4_conv1\", 0.08),\r\n (\"block5_conv1\", 0.02),\r\n ]\r\n else:\r\n raise Exception(\r\n \"Please provide either of 'content', 'style' or 'balanced' for --similarity\"\r\n )\r\n\r\n return style_layers\r\n\r\n\r\ndef preprocess_image(image_path, image_size):\r\n \"\"\"\r\n loads the image and makes it compatible with VGG input size\r\n Parameters\r\n ----------\r\n image_path: str\r\n directory path of the image\r\n Returns\r\n -------\r\n image\r\n loaded and standardaized image\r\n \"\"\"\r\n # Load and resize Content and Style images to a square image\r\n image = np.array(Image.open(image_path).resize((image_size, image_size)))\r\n # Add one dim for VGG compatibility\r\n image = tf.constant(np.reshape(image, ((1,) + image.shape)))\r\n\r\n return image\r\n\r\n\r\ndef tensor_to_image(tensor):\r\n \"\"\"\r\n Converts the calculated final vector into a PIL image\r\n Parameters\r\n ----------\r\n tensor: Tensor\r\n Returns\r\n -------\r\n Image\r\n A PIL image\r\n \"\"\"\r\n tensor = tensor * 255\r\n tensor = np.array(tensor, dtype=np.uint8)\r\n if np.ndim(tensor) > 3:\r\n tensor = tensor[0]\r\n return Image.fromarray(tensor)\r\n\r\n\r\[email protected]()\r\ndef train_step(generated_image, vgg_model_outputs, style_layers, optimizer, a_C, a_S):\r\n \"\"\"\r\n Uses precomputed encoded images a_S and a_C as constants, calculates\r\n a_G as the encoding of the newly generated image, and uses the three\r\n to compute the cost function, and respectively, one gradient step.\r\n Parameters\r\n ----------\r\n generated_image: tensor\r\n image in shape of a vector\r\n \"\"\"\r\n with tf.GradientTape() as tape:\r\n\r\n # a_G as the vgg_model_outputs for the current generated image\r\n a_G = vgg_model_outputs(generated_image)\r\n\r\n # Compute content cost\r\n J_content = compute_content_cost(a_C, a_G)\r\n\r\n # Compute style cost\r\n J_style = compute_style_cost(a_S, a_G, style_layers)\r\n\r\n # Compute total cost\r\n J = total_cost(J_content, J_style, alpha=10, beta=40)\r\n\r\n grad = tape.gradient(J, generated_image)\r\n\r\n optimizer.apply_gradients([(grad, generated_image)])\r\n generated_image.assign(\r\n tf.clip_by_value(generated_image, clip_value_min=0.0, clip_value_max=1.0)\r\n )\r\n\r\n\r\ndef compute_content_cost(content_output, generated_output):\r\n \"\"\"\r\n Computes the content cost.\r\n Parameters\r\n ----------\r\n a_C: tensor\r\n hidden layer activations representing content of the image C - dimension (1, n_H, n_W, n_C)\r\n a_G: tensor\r\n hidden layer activations representing content of the image G - dimension (1, n_H, n_W, n_C)\r\n Returns\r\n -------\r\n J_content: float64\r\n the content cost between a_C and a_G\r\n \"\"\"\r\n # Exclude the last layer output\r\n a_C = content_output[-1]\r\n a_G = generated_output[-1]\r\n\r\n # Retrieve dimensions from a_G\r\n _, n_H, n_W, n_C = a_G.get_shape().as_list()\r\n\r\n # Reshape a_C and a_G\r\n a_C_unrolled = tf.reshape(a_C, shape=(1, -1, n_C))\r\n a_G_unrolled = tf.reshape(a_G, shape=(1, -1, n_C))\r\n\r\n # compute the cost with tensorflow\r\n J_content = (1 / (4 * n_C * n_H * n_W)) * tf.reduce_sum(\r\n tf.square(tf.subtract(a_C_unrolled, a_G_unrolled))\r\n )\r\n\r\n return J_content\r\n\r\n\r\ndef compute_layer_style_cost(a_S, a_G):\r\n \"\"\"\r\n Computes the style cost of one layer.\r\n Parameters\r\n ----------\r\n a_C: tensor\r\n hidden layer activations representing content of the image C - dimension (1, n_H, n_W, n_C)\r\n a_G: tensor\r\n hidden layer activations representing content of the image G - dimension (1, n_H, n_W, n_C)\r\n Returns\r\n -------\r\n J_style_layer\r\n A scalar value representing style cost for a layer\r\n \"\"\"\r\n\r\n # Retrieve dimensions from a_G\r\n _, n_H, n_W, n_C = a_G.get_shape().as_list()\r\n\r\n # Reshape the images from (1, n_H, n_W, n_C) to have them of shape (n_C, n_H * n_W)\r\n a_S = tf.reshape(tf.transpose(a_S, perm=[3, 0, 1, 2]), shape=(n_C, -1))\r\n a_G = tf.reshape(tf.transpose(a_G, perm=[3, 0, 1, 2]), shape=(n_C, -1))\r\n\r\n # Computing gram_matrices for both images S and G\r\n GS = tf.matmul(a_S, tf.transpose(a_S))\r\n GG = tf.matmul(a_G, tf.transpose(a_G))\r\n\r\n # Computing the loss\r\n J_style_layer = (1 / (2 * n_C * n_H * n_W) ** 2) * tf.reduce_sum(\r\n tf.square(tf.subtract(GS, GG))\r\n )\r\n\r\n return J_style_layer\r\n\r\n\r\ndef compute_style_cost(style_image_output, generated_image_output, style_layers):\r\n \"\"\"\r\n Computes the overall style cost from several chosen layers\r\n Parameters\r\n ----------\r\n style_image_output: tensor\r\n output of VGG model for the style image (activations of style layers & content layer)\r\n generated_image_output: tensor\r\n output of VGG model for the generated image (activations of style layers & content layer)\r\n style_layers : list of tuples\r\n containing the names of the layers we would like to extract style from and a coefficient for each of them\r\n Returns\r\n -------\r\n J_style\r\n A scalar value representing style cost\r\n \"\"\"\r\n\r\n # initialize the cost\r\n J_style = 0\r\n\r\n # Excluding the last element of the array which contains the content layer image\r\n a_S = style_image_output[:-1] # a_S is the hidden layer activations\r\n a_G = generated_image_output[:-1] # a_G is the hidden layer activations\r\n\r\n for i, weight in zip(range(len(a_S)), style_layers):\r\n # Compute style_cost for the current layer\r\n J_style_layer = compute_layer_style_cost(a_S[i], a_G[i])\r\n\r\n # Add weight * J_style_layer of this layer to overall style cost\r\n J_style += weight[1] * J_style_layer\r\n\r\n return J_style\r\n\r\n\r\[email protected]()\r\ndef total_cost(J_content, J_style, alpha=10, beta=40):\r\n \"\"\"\r\n Computes the total cost function. Because the main purpose of the algorithm\r\n is on matching the style of a target photo a bigger weight (beta) is given to\r\n the style image.\r\n Parameters\r\n ----------\r\n J_content: float\r\n content cost computed in compute_content_cost\r\n J_style: float\r\n style cost computed in compute_style_cost\r\n alpha: float\r\n hyperparameter weighting the importance of the content cost\r\n beta: float\r\n hyperparameter weighting the importance of the style cost\r\n Returns\r\n -------\r\n J\r\n total cost\r\n \"\"\"\r\n J = alpha * J_content + beta * J_style\r\n\r\n return J\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main(\r\n opt[\"--content\"],\r\n opt[\"--style\"],\r\n opt[\"--save\"],\r\n opt[\"--similarity\"],\r\n opt[\"--epochs\"],\r\n )\r\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "tensorflow.shape", "tensorflow.reshape", "tensorflow.subtract", "tensorflow.function", "numpy.reshape", "tensorflow.keras.Model", "tensorflow.add", "tensorflow.keras.applications.VGG19", "tensorflow.GradientTape", "tensorflow.image.convert_image_dtype", "tensorflow.clip_by_value", "numpy.ndim", "tensorflow.Variable", "numpy.array", "tensorflow.transpose", "tensorflow.config.list_physical_devices" ] ]
oreh/gseapy
[ "d3212afb2e8d61f37957d685da6ef28f723d98e6" ]
[ "gseapy/gsea_plot.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nfrom matplotlib.colors import Normalize\n\n\n\nclass _MidpointNormalize(Normalize):\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n # I'm ignoring masked values and all kinds of edge cases to make a\n # simple example...\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y))\n\n\n\n\n\n\ndef gsea_plot(rank_metric, enrich_term, hit_ind, nes, pval, fdr, RES,\n phenoPos=None, phenoNeg=None, figsize =(6.5,6), **kwarg):\n \"\"\"This is the main function for reproducing the gsea plot.\n \n :param rank_metric: rankings, rank_metric['rank'].values.\n :param enrich_term: gene_set name\n :param hit_ind: hit indexs of rank_metric['gene_name'] presented in gene set S.\n :param nes: Normalized enrichment scores.\n :param pval: nominal p-value.\n :param fdr: false discoveray rate.\n :param RES: ranking enrichment scores of all genes in rank_metric['gene_name'].\n :param phenoPos: phenotype lable, positive correlated.\n :param phenoNeg: phenotype lable, negative correlated.\n :param figsize: matplotlib figsize.\n :return: fig object of gsea plot.\n \"\"\" \n \n # center color map at midpoint = 0\n norm = _MidpointNormalize(midpoint=0)\n \n #dataFrame of ranked matrix scores \n x = rank_metric.index.values \n #figsize = (6,6)\n phenoP_label = phenoPos + ' (Positively Correlated)'\n phenoN_label = phenoNeg + ' (Negatively Correlated)'\n zero_score_ind = np.abs(rank_metric['rank']).argmin()\n z_score_label = 'Zero score at ' + str(zero_score_ind)\n nes_label = 'NES: '+ \"{:.3f}\".format(float(nes))\n pval_label = 'Pval: '+ \"{:.3f}\".format(float(pval))\n fdr_label = 'FDR: '+ \"{:.3f}\".format(float(fdr)) \n im_matrix = rank_metric.ix[:,1:].T\n\n #in most case, we will have mangy plots, so do not display plots\n #It's also convinient to run this script on command line. \n plt.ioff() \n #GSEA Plots\n gs = plt.GridSpec(16,1)\n fig = plt.figure(figsize=figsize)\n #Ranked Metric Scores Plot\n ax1 = fig.add_subplot(gs[11:])\n ax1.fill_between(x, y1= rank_metric['rank'], y2=0, color='#C9D3DB')\n ax1.set_ylabel(\"Ranked list metric\",fontsize=14) \n ax1.text(.05, .9, phenoP_label, color='red', horizontalalignment='left', verticalalignment='top',\n transform=ax1.transAxes)\n ax1.text(.95, .05, phenoN_label, color='Blue', horizontalalignment='right', verticalalignment='bottom',\n transform=ax1.transAxes)\n\n # the x coords of this transformation are data, and the y coord are axes\n trans1 = transforms.blended_transform_factory(ax1.transData, ax1.transAxes)\n ax1.vlines(zero_score_ind, 0, 1, linewidth=.5, transform=trans1, linestyles='--', color='grey')\n ax1.text(zero_score_ind, 0.5, z_score_label, horizontalalignment='center', verticalalignment='center',\n transform=trans1) \n ax1.set_xlabel(\"Rank in Ordered Dataset\", fontsize=14)\n ax1.spines['top'].set_visible(False)\n ax1.tick_params(axis='both', which='both', top='off', right='off', left='off')\n ax1.locator_params(axis='y', nbins=5) \n ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc) ))\n \n # use round method to control float number\n #ax1.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : round(tick_loc, 1) ))\n \n #gene hits\n ax2 = fig.add_subplot(gs[8:10], sharex=ax1)\n\n # the x coords of this transformation are data, and the y coord are axes\n trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)\n ax2.vlines(hit_ind, 0, 1,linewidth=.5,transform=trans2)\n ax2.spines['bottom'].set_visible(False)\n ax2.tick_params(axis='both', which='both', bottom='off', top='off', \n labelbottom='off', right='off', left='off',labelleft='off')\n #colormap\n ax3 = fig.add_subplot(gs[10],sharex=ax1)\n ax3.imshow(im_matrix, aspect='auto', norm=norm, cmap=plt.cm.seismic, interpolation='none') # cm.coolwarm\n ax3.spines['bottom'].set_visible(False)\n ax3.tick_params(axis='both', which='both', bottom='off', top='off', \n labelbottom='off', right='off', left='off',labelleft='off')\n\n # Enrichment score plot\n ax4 = fig.add_subplot(gs[:8],sharex=ax1)\n ax4.plot(x,RES,linewidth=4,color ='#88C544')\n ax4.text(.1, .1, fdr_label, transform=ax4.transAxes)\n ax4.text(.1, .2, pval_label, transform=ax4.transAxes)\n ax4.text(.1, .3, nes_label, transform=ax4.transAxes)\n\n # the y coords of this transformation are data, and the x coord are axes\n trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)\n ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')\n ax4.set_ylabel(\"Enrichment score (ES)\", fontsize=14)\n ax4.set_xlim(min(x), max(x))\n ax4.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off')\n ax4.locator_params(axis='y', nbins=5)\n # FuncFormatter need two argment, I don't know why. this lambda function used to format yaxis tick labels.\n ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )\n \n #fig adjustment\n fig.suptitle(enrich_term, fontsize=16)\n fig.subplots_adjust(hspace=0)\n #fig.tight_layout()\n plt.close(fig)\n \n return fig\n \n" ]
[ [ "matplotlib.colors.Normalize.__init__", "numpy.interp", "matplotlib.pyplot.ioff", "matplotlib.pyplot.figure", "numpy.abs", "matplotlib.pyplot.GridSpec", "matplotlib.pyplot.close", "matplotlib.transforms.blended_transform_factory" ] ]
TangleSpace/hotstepper
[ "4d8a278d94f19fee2bc4d3ba25628fa69ed3653d" ]
[ "hotstepper/mixins/operations.py" ]
[ "import numpy as np\nfrom hotstepper.core.data_model import DataModel\nfrom hotstepper.utilities.helpers import get_epoch_start\n\n\ndef apply_math_function(caller,other,math_function, sample_points=None):\n \"\"\"\n Apply the supplied function to two objects evaluated at the union of all their unique step keys.\n\n For example, math_function = numpy.multiply will multiple the step values from each steps object together at each of the step keys that forms the union set of all step keys.\n Simply, a list of unique step keys will be generated based on those from each steps object and the provided function will be applied across all steps values at each of those keys to generate a new steps object.\n\n If the second argument is a scalar, this value will be broadcast to match the number of step keys in the longest step or steps object.\n\n Parameters\n ===========\n caller : Step, Steps\n The parent object with step values to perform the math operation on\n\n other : int, float, Step, Steps\n Steps or scalar value to be combined with the caller object values evaluated at the common union of step keys using the provided math_function.\n\n math_function : math_like function, e.g. numpy.add, operator.__mul__\n A binary math function that accepts two arguments and returns an array the same length as the longest input, e.g +,-,*,/, np.add, np.multiply etc.\n\n sample_points : array_like of int,float or datetime_like, Optional\n Specifiy the specific points the math_function function is to be evaluated across all provided step functions.\n\n Returns\n ==========\n Steps\n A new steps object containing the result of the function application across the provided objects.\n\n See Also\n ============\n filter_values\n apply_reduction_function\n\n Examples\n ==========\n\n \"\"\"\n\n return _apply_aggreduce_function(\n steps_to_combine=[caller,other],\n agg_reduce_function=math_function,\n sample_points=sample_points,\n is_agg_function=True\n )\n\n\ndef apply_reduction_function(steps_to_combine,reduction_function,sample_points=None):\n \"\"\"\n Apply the supplied function to all provided objects evaluated at the union of all their unique step keys.\n\n For example, reduction_function = numpy.mean will find the mean across all objects evaluated at each step key that is from the union set of all keys.\n Simply, a list of unique step keys will be generated based on those from each steps object and the provided function will be applied across all steps values at each of those keys to generate a new steps object.\n\n If the second argument is a scalar, this value will be broadcast to match the number of step keys in the longest step or steps object.\n\n Parameters\n ===========\n steps_to_combine : int, float, Step, Steps\n Objects and/or numbers to apply the reduction function at each of the unique keys.\n\n reduction_function : math_like function\n A reduction function that returns a scalar for each input array, e.g mean, variance, np.mean, np.std, np.median etc.\n\n sample_points: array_like of int,float or datetime_like, Optional\n Specifiy the specific points the reduction_function function is to be evaluated across all provided step functions.\n\n Returns\n ==========\n Steps\n A new steps object containing the result of the function application across the provided objects.\n\n See Also\n ============\n filter_values\n apply_math_function\n\n Examples\n ==========\n\n \"\"\"\n\n return _apply_aggreduce_function(\n steps_to_combine=steps_to_combine,\n agg_reduce_function=reduction_function,\n sample_points=sample_points,\n is_agg_function=False\n )\n\n\ndef _apply_aggreduce_function(steps_to_combine,agg_reduce_function,sample_points=None, is_agg_function=True):\n \"\"\"\n Apply the supplied function to all provided objects evaluated at the union of their unique step keys.\n\n For example, aggregation_function = numpy.mean will find the mean across all objects evaluated at each step key that is from the union set of all keys from each steps function.\n Simply, a list of unique step keys will be generated based on those from each steps object and the provided function will be applied across all steps values at each of those keys to generate a new steps object.\n\n If the second argument is a scalar, this value will be broadcast to match the number of step keys in the longest step or steps object.\n\n Parameters\n ===========\n steps_to_combine : int, float, Step, Steps\n Any value to compare each step component against.\n\n agg_reduce_function : math_like function, e.g. aggreation functions like numpy.add, operator.__mul__ or reduction functions like mean, std etc.\n A reduction function that returns a scalar for each input array, e.g mean, variance, np.mean, np.std, np.median etc.\n\n sample_points : array_like of int,float or datetime_like, Optional\n Specifiy the specific points the agg_reduce_function function is to be evaluated across all provided step functions.\n\n is_agg_function : bool, Optional\n Flag to indicate if the is_agg_function is either an aggregation type such as the mathematical operations +,-,/,* or a reduction type such as mean, max, median.\n\n Returns\n ==========\n Steps\n A new steps object containing the result of the function application across the provided objects.\n\n See Also\n ============\n filter_values\n apply_reduction_function\n apply_math_function\n\n Examples\n ==========\n\n \"\"\"\n \n #used to check if objects are implementing the AbstractSteps interface\n ty = type(steps_to_combine[0])\n base_parent = ty.__base__\n\n if sample_points is None:\n keys = np.sort(np.unique(np.concatenate([s.step_keys() for s in steps_to_combine if isinstance(s,base_parent)])))\n else:\n keys = sample_points\n\n #to handle int float as well as AbstractSteps in one go\n get_stack_value = lambda x: x.step(keys,False) if isinstance(x,base_parent) else np.full(len(keys),x)\n stack = np.array([get_stack_value(s) for s in steps_to_combine])\n\n if is_agg_function:\n result = np.diff(agg_reduce_function(*stack),prepend=0)\n else:\n result = np.diff(agg_reduce_function(stack,axis=0),prepend=0)\n \n step_data = np.empty((keys.shape[0],3))\n step_data[:,DataModel.START.value] = keys\n step_data[:,DataModel.DIRECTION.value] = 1\n step_data[:,DataModel.WEIGHT.value] = result\n\n #filter out values that create issues\n step_data = step_data[~np.isnan(step_data[:,DataModel.WEIGHT.value])]\n step_data = step_data[step_data[:,DataModel.WEIGHT.value]!=0]\n step_data = step_data[step_data[:,DataModel.WEIGHT.value]!=np.PINF]\n step_data = step_data[step_data[:,DataModel.WEIGHT.value]!=np.NINF]\n \n #promote the Steps key type if any of the steps to combine are using datetime\n any_using_datetime = (np.array([s.using_datetime() for s in steps_to_combine if isinstance(s,base_parent)])==True).any()\n\n ty = type(steps_to_combine[0])\n result_step = ty(use_datetime=any_using_datetime,basis=steps_to_combine[0].basis())\n\n if step_data.shape[0] > 0:\n return result_step.add_steps(step_data)\n else:\n return result_step\n\n\ndef filter_values(caller,other, operation_func, normalise_value = 0):\n \"\"\"\n This function is used to create a filtered version of the steps by removing steps not evaluating to true from applying the comparison function at all step change locations.\n\n Parameters\n ===========\n other : int, float\n Any value to compare each steps value against.\n\n operation_func : binary boolean function\n A binary comparison function that returns a bool, e.g >,<,==.\n\n normalise_value: int, float, Optional\n A value to assign at the step keys that are included in the return object. If a value of zero is used, the return object will have the value of the step function between the included step keys.\n\n Returns\n ==========\n Steps\n A new steps object containing the result of the function application across the provided objects.\n\n See Also\n ============\n apply_reduction_function\n apply_math_function\n\n \"\"\"\n\n if type(other) in [float,int]:\n\n caller_step_data = caller.steps()\n mask = np.where(operation_func(caller_step_data[:,DataModel.WEIGHT.value],other), True,False)\n\n if np.alltrue(mask):\n if normalise_value == 0:\n return caller\n else:\n ty = type(caller)\n return ty(use_datetime=caller.using_datetime(),\n basis=caller.basis(),\n start=caller.first(),\n end=caller.last(),\n weight=normalise_value\n )\n\n new_steps = _filter_by_mask(caller_step_data,mask,normalise_value)\n else:\n caller_step_data = caller.steps()\n other_step_values = other(caller.step_keys())\n\n mask = np.where(operation_func(caller_step_data[:,DataModel.WEIGHT.value],other_step_values), True,False)\n\n if np.alltrue(mask):\n if normalise_value == 0:\n return caller\n else:\n ty = type(caller)\n return ty(use_datetime=caller.using_datetime(),\n basis=caller.basis(),\n start=caller.first(),\n end=caller.last(),\n weight=normalise_value\n )\n \n new_steps = _filter_by_mask(caller_step_data,mask,normalise_value)\n\n #we have the data, now return an object matching the caller, something that implements the AbstractSteps interface\n ty = type(caller)\n result_step = ty(use_datetime=caller.using_datetime(),basis=caller.basis())\n\n if len(new_steps) > 0:\n return result_step.add_steps(np.array(new_steps))\n else:\n return result_step\n\n\ndef _filter_by_mask(step_data,mask,normalise_value = 0):\n\n if np.alltrue(mask):\n return step_data\n\n new_steps = []\n\n st = None\n adj = 0\n for i ,s in enumerate(step_data[:,DataModel.START.value]):\n if mask[i]:\n if st is None:\n st = i\n if normalise_value == 0:\n new_steps.append([s,1,step_data[i,DataModel.WEIGHT.value]])\n else:\n new_steps.append([s,1,normalise_value])\n elif st is not None and (i > st) and normalise_value == 0:\n new_steps.append([s,1,step_data[i,DataModel.DIRECTION.value]])\n adj += step_data[i,DataModel.DIRECTION.value]\n else:\n if st is not None and st != get_epoch_start(False):\n if normalise_value == 0:\n new_steps.append([s,1,-1*(step_data[st,DataModel.WEIGHT.value] + adj)])\n adj = 0\n else:\n new_steps.append([s,1,-1*normalise_value])\n st = None\n \n return new_steps\n" ]
[ [ "numpy.alltrue", "numpy.array", "numpy.empty", "numpy.isnan" ] ]
cyberflax2020/21-S1-2-C-Cinema-Code
[ "6c3358168996529cbb0745a7c3f5aa257d790360" ]
[ "Build_Body_Samples.py" ]
[ "import csv\nimport numpy as np\nimport mediapipe as mp\nimport cv2\n\nclass_name = \"Speaking\"\n\nmp_drawing = mp.solutions.drawing_utils # Drawing helpers\nmp_holistic = mp.solutions.holistic # Mediapipe Solutions\n\nstr_source = input(\"dir:\")\ncap = cv2.VideoCapture(str_source)\n# Initiate holistic model\nwith mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n while cap.isOpened():\n ret, frame = cap.read()\n\n # Recolor Feed\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image.flags.writeable = False\n\n # Make Detections\n results = holistic.process(image)\n\n # Recolor image back to BGR for rendering\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n # 1. Draw face landmarks\n mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(80, 110, 10), thickness=1, circle_radius=1),\n mp_drawing.DrawingSpec(color=(80, 256, 121), thickness=1, circle_radius=1)\n )\n\n # 2. Right hand\n mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(80, 22, 10), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(80, 44, 121), thickness=2, circle_radius=2)\n )\n\n # 3. Left Hand\n mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(121, 22, 76), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(121, 44, 250), thickness=2, circle_radius=2)\n )\n\n # 4. Pose Detections\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(245, 117, 66), thickness=2, circle_radius=4),\n mp_drawing.DrawingSpec(color=(245, 66, 230), thickness=2, circle_radius=2)\n )\n # Export coordinates\n try:\n # Extract Pose landmarks\n pose = results.pose_landmarks.landmark\n pose_row = list(\n np.array([[landmark.x, landmark.y, landmark.z, landmark.visibility] for landmark in pose]).flatten())\n\n row = pose_row\n\n # Append class name\n row.insert(0, class_name)\n\n # Export to CSV\n with open('body_coords.csv', mode='a', newline='') as f:\n csv_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow(row)\n\n except:\n pass\n\n cv2.imshow('Video', image)\n\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.array" ] ]
wdxtub/deep-learning-note
[ "47b83a039b80d4757e0436d5cbd2fa3037de3904" ]
[ "mlds/1-numpy/4_numpy_100.py" ]
[ "import numpy as np\nimport time\n\nprint('1. 创建大小为 10 的空向量')\na = np.zeros(10)\nprint(a)\n\nprint('2. 查看矩阵占据的内存大小')\nprint('用元素个数乘以每个元素的大小')\nprint(f'占据 {a.size * a.itemsize} 字节')\n\nprint('3. 创建一个向量,值从 10 到 49')\na = np.arange(10, 50)\nprint(a)\n\nprint('4. 翻转一个向量')\na = a[::-1]\nprint(a)\n\nprint('5. 创建一个 3x3 的矩阵,值从 0 到 8')\na = np.arange(9).reshape(3,3)\nprint(a)\n\nprint('6. 从 [1, 2, 0, 0, 4, 0] 中寻找非零元素索引')\nnz = np.nonzero([1, 2, 0, 0, 4, 0])\nprint(nz)\n\nprint('7. 创建 3x3 单位矩阵(对角线元素为 1 的方阵)')\na = np.eye(3)\nprint(a)\n\nprint('8. 创建一个 3x3x3 的随机矩阵')\na = np.random.random((3, 3, 3))\nprint(a)\n\nprint('9. 创建一个 10x10 的矩阵并寻找最大最小值')\na = np.random.random((10, 10))\na_min, a_max = a.min(), a.max()\nprint('min', a_min, ', max', a_max)\n\nprint('10. 创建一个长度为 30 的向量,并求均值')\na = np.random.random(30)\nprint('mean', a.mean())\n\nprint('11. 创建一个边界为 1 其他为 0 的二维矩阵')\na = np.ones((10, 10))\na[1:-1,1:-1] = 0\nprint(a)\n\nprint('12. 为已经存在的矩阵填充 0 的边界')\na = np.ones((5, 5))\nprint(a)\na = np.pad(a, pad_width=1, mode='constant', constant_values=0)\nprint(a)\n\nprint('13. 给出下列计算的结果')\nprint('0 * np.nan =', 0 * np.nan)\nprint('np.nan == np.nan =', np.nan == np.nan)\nprint('np.inf > np.nan =', np.inf > np.nan)\nprint('np.nan - np.nan =', np.nan - np.nan)\nprint('np.nan in set([np.nan]) =', np.nan in set([np.nan]))\nprint('0.3 == 3 * 0.1 =', 0.3 == 3 * 0.1)\n\nprint('14. 创建一个 5x5 的矩阵,对角线下的数值为 1 2 3 4')\na = np.diag(1 + np.arange(4), k=-1)\nprint(a)\n\nprint('15. 创建一个 8x8 矩阵,其中 0 和 1 间隔分布')\na = np.zeros((8, 8), dtype=int)\na[1::2, ::2] = 1\na[::2, 1::2] = 1\nprint(a)\n\nprint('16. 使用 tile 函数创建一个 8x8 矩阵,其中 0 和 1 间隔分布')\na = np.tile(np.array([[0, 1], [1, 0]]), (4, 4))\nprint(a)\n\nprint('17. 假设有一个 (6, 7, 8) 大小的矩阵,那么第 100 个元素的索引是多少')\nprint(np.unravel_index(100, (6, 7, 8)))\n\nprint('18. 归一化一个随机 5x5 矩阵')\na = np.random.random((5, 5))\na = (a - np.mean(a)) / np.std(a)\nprint(a)\n\nprint('19. 点乘一个 5x3 和 3x2 的矩阵')\na = np.dot(np.ones((5, 3)), np.ones((3, 2)))\nprint(a)\n\nprint('20. 给定一个一维数组,不新增空间,把 3~8 之间的数字变成负数')\na = np.arange(10)\na[(3 < a) & (a <= 8)] *= -1\nprint(a)\n\nprint('21. 两个数组求交集')\na1 = np.random.randint(0, 10, 10)\na2 = np.random.randint(0, 10, 10)\nprint(np.intersect1d(a1, a2))\n\nprint('22. 获取 2020 年 6 月的所有日期')\na = np.arange('2020-06', '2020-07', dtype='datetime64[D]')\nprint(a)\n\nprint('23. 用 5 种方法去掉小数部分')\na = np.random.uniform(0, 10, 10)\nprint('a', a)\nprint('1:', a - a%1)\nprint('2:', np.floor(a))\nprint('3:', np.ceil(a) - 1)\nprint('4:', a.astype(int))\nprint('5:', np.trunc(a))\n\nprint('24. 创建一个 5x5 的矩阵,每一行都是从 0 到 4')\na = np.zeros((5, 5))\na += np.arange(5)\nprint(a)\n\nprint('25. 创建一个大小为 10,值从 0 到 1 的向量(不包括 0 和 1)')\na = np.linspace(0, 1, 11, endpoint=False)[1:]\nprint(a)\n\nprint('26. 创建一个大小为 10 的随机向量并排序')\na = np.random.random(10)\na.sort()\nprint(a)\n\nprint('27. 如何用比 np.sum 更快的方法对一个小数组求和')\na = np.arange(10)\nprint('a', a)\nstart = time.time()\nprint('add.reduct', np.add.reduce(a))\nend = time.time()\nprint('add.reduce time:', end-start)\nstart = time.time()\nprint('np.sum', np.sum(a))\nend = time.time()\nprint('np.sum time:', end - start)\n\nprint('28. 比较两个数组是否相等')\na = np.random.randint(0, 10, 10)\nb = np.random.randint(0, 10, 10)\nprint(np.allclose(a, b))\nprint(np.array_equal(a, b))\n\nprint('29. 将一个 10x2 的笛卡尔坐标系的点转成极坐标')\na = np.random.random((10, 2))\nx, y = a[:, 0], a[:, 1]\nr = np.sqrt(x**2 + y**2)\nt = np.arctan2(y, x)\nprint(r)\nprint(t)\n\nprint('30. 创建一个大小为 10 的随机向量,并将最大的替换成 0')\na = np.random.random(10)\nprint('before', a)\na[a.argmax()] = 0\nprint('after', a)\n\nprint('31. 不用额外空间将 float 矩阵变成 int 矩阵')\na = np.arange(10, dtype=np.float32)\na = a.astype(np.int32, copy=False)\nprint(a)\n\nprint('32. 在一个 2 维矩阵中随机放 p 个元素')\nn, p = 10, 3\na = np.zeros((n, n))\nnp.put(a, np.random.choice(range(n*n), p, replace=False), 1)\nprint(a)\n\nprint('33. 矩阵的每行减去每行的均值')\na = np.random.randint(0, 10, (5, 10))\nprint('before', a)\nb = a - a.mean(axis=1, keepdims=True)\nprint('after', b)\n\nprint('34. 根据第 i 列给矩阵排序')\na = np.random.randint(0, 10, (3, 3))\nprint('before', a)\nprint('after', a[a[:, 1].argsort()])\n\nprint('35. 交换矩阵的两行')\na = np.arange(25).reshape(5, 5)\na[[0,1]] = a[[1, 0]]\nprint(a)\n\nprint('36. 如何计算一个数组的滑动窗口')\ndef moving_averate(a, n=3):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n-1:] / n\na = np.arange(20)\nprint(moving_averate(a, n=4))\n\nprint('37. 如何找到数组中出现次数最多的元素')\na = np.random.randint(0, 10, 50)\nprint(np.bincount(a).argmax())\n\nprint('38. 如何获取数组中最大的 n 个数')\na = np.arange(1000)\nnp.random.shuffle(a)\nn = 5\nstart = time.time()\nprint('slow', a[np.argsort(a)[-n:]])\nend = time.time()\nprint('slow time', end - start)\nstart = time.time()\nprint('fast', a[np.argpartition(-a, n)[:n]])\nend = time.time()\nprint('fast time', end - start)" ]
[ [ "numpy.ones", "numpy.sum", "numpy.intersect1d", "numpy.argsort", "numpy.trunc", "numpy.unravel_index", "numpy.allclose", "numpy.add.reduce", "numpy.argpartition", "numpy.linspace", "numpy.nonzero", "numpy.mean", "numpy.random.uniform", "numpy.eye", "numpy.sqrt", "numpy.bincount", "numpy.ceil", "numpy.zeros", "numpy.arange", "numpy.std", "numpy.pad", "numpy.arctan2", "numpy.random.shuffle", "numpy.cumsum", "numpy.floor", "numpy.random.random", "numpy.array_equal", "numpy.array", "numpy.random.randint" ] ]
BitGo/statsmodels
[ "31a73250495d63dfc853625ce1d2b3566d3ac95a" ]
[ "statsmodels/tsa/vector_ar/tests/test_var.py" ]
[ "\"\"\"\nTest VAR Model\n\"\"\"\nfrom __future__ import print_function\n# pylint: disable=W0612,W0231\nfrom statsmodels.compat.python import (iteritems, StringIO, lrange, BytesIO,\n range)\nfrom nose.tools import assert_raises\nimport nose\nimport os\nimport sys\n\nimport numpy as np\n\nimport statsmodels.api as sm\nimport statsmodels.tsa.vector_ar.util as util\nimport statsmodels.tools.data as data_util\nfrom statsmodels.tsa.vector_ar.var_model import VAR\n\n\nfrom numpy.testing import (assert_almost_equal, assert_equal, assert_,\n assert_allclose)\n\nDECIMAL_12 = 12\nDECIMAL_6 = 6\nDECIMAL_5 = 5\nDECIMAL_4 = 4\nDECIMAL_3 = 3\nDECIMAL_2 = 2\n\nclass CheckVAR(object):\n # just so pylint won't complain\n res1 = None\n res2 = None\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_3)\n\n def test_neqs(self):\n assert_equal(self.res1.neqs, self.res2.neqs)\n\n def test_nobs(self):\n assert_equal(self.res1.avobs, self.res2.nobs)\n\n def test_df_eq(self):\n assert_equal(self.res1.df_eq, self.res2.df_eq)\n\n def test_rmse(self):\n results = self.res1.results\n for i in range(len(results)):\n assert_almost_equal(results[i].mse_resid**.5,\n eval('self.res2.rmse_'+str(i+1)), DECIMAL_6)\n\n def test_rsquared(self):\n results = self.res1.results\n for i in range(len(results)):\n assert_almost_equal(results[i].rsquared,\n eval('self.res2.rsquared_'+str(i+1)), DECIMAL_3)\n\n def test_llf(self):\n results = self.res1.results\n assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_2)\n for i in range(len(results)):\n assert_almost_equal(results[i].llf,\n eval('self.res2.llf_'+str(i+1)), DECIMAL_2)\n\n def test_aic(self):\n assert_almost_equal(self.res1.aic, self.res2.aic)\n\n def test_bic(self):\n assert_almost_equal(self.res1.bic, self.res2.bic)\n\n def test_hqic(self):\n assert_almost_equal(self.res1.hqic, self.res2.hqic)\n\n def test_fpe(self):\n assert_almost_equal(self.res1.fpe, self.res2.fpe)\n\n def test_detsig(self):\n assert_almost_equal(self.res1.detomega, self.res2.detsig)\n\n def test_bse(self):\n assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)\n\ndef get_macrodata():\n data = sm.datasets.macrodata.load().data[['realgdp','realcons','realinv']]\n names = data.dtype.names\n nd = data.view((float,3), type=np.ndarray)\n nd = np.diff(np.log(nd), axis=0)\n return nd.ravel().view(data.dtype, type=np.ndarray)\n\ndef generate_var():\n from rpy2.robjects import r\n import pandas.rpy.common as prp\n r.source('tests/var.R')\n return prp.convert_robj(r['result'], use_pandas=False)\n\ndef write_generate_var():\n result = generate_var()\n np.savez('tests/results/vars_results.npz', **result)\n\nclass RResults(object):\n \"\"\"\n Simple interface with results generated by \"vars\" package in R.\n \"\"\"\n\n def __init__(self):\n #data = np.load(resultspath + 'vars_results.npz')\n from .results.results_var_data import var_results\n data = var_results.__dict__\n\n self.names = data['coefs'].dtype.names\n self.params = data['coefs'].view((float, len(self.names)), type=np.ndarray)\n self.stderr = data['stderr'].view((float, len(self.names)), type=np.ndarray)\n\n self.irf = data['irf'].item()\n self.orth_irf = data['orthirf'].item()\n\n self.nirfs = int(data['nirfs'][0])\n self.nobs = int(data['obs'][0])\n self.totobs = int(data['totobs'][0])\n\n crit = data['crit'].item()\n self.aic = crit['aic'][0]\n self.sic = self.bic = crit['sic'][0]\n self.hqic = crit['hqic'][0]\n self.fpe = crit['fpe'][0]\n\n self.detomega = data['detomega'][0]\n self.loglike = data['loglike'][0]\n\n self.nahead = int(data['nahead'][0])\n self.ma_rep = data['phis']\n\n self.causality = data['causality']\n\ndef close_plots():\n try:\n import matplotlib.pyplot as plt\n plt.close('all')\n except ImportError:\n pass\n\n_orig_stdout = None\n\ndef setup_module():\n global _orig_stdout\n _orig_stdout = sys.stdout\n sys.stdout = StringIO()\n\ndef teardown_module():\n sys.stdout = _orig_stdout\n close_plots()\n\ndef have_matplotlib():\n try:\n import matplotlib\n return True\n except ImportError:\n return False\n\nclass CheckIRF(object):\n\n ref = None; res = None; irf = None\n k = None\n\n #---------------------------------------------------------------------------\n # IRF tests\n\n def test_irf_coefs(self):\n self._check_irfs(self.irf.irfs, self.ref.irf)\n self._check_irfs(self.irf.orth_irfs, self.ref.orth_irf)\n\n\n def _check_irfs(self, py_irfs, r_irfs):\n for i, name in enumerate(self.res.names):\n ref_irfs = r_irfs[name].view((float, self.k), type=np.ndarray)\n res_irfs = py_irfs[:, :, i]\n assert_almost_equal(ref_irfs, res_irfs)\n\n\n def test_plot_irf(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n import matplotlib.pyplot as plt\n self.irf.plot()\n plt.close('all')\n self.irf.plot(plot_stderr=False)\n plt.close('all')\n\n self.irf.plot(impulse=0, response=1)\n plt.close('all')\n self.irf.plot(impulse=0)\n plt.close('all')\n self.irf.plot(response=0)\n plt.close('all')\n\n self.irf.plot(orth=True)\n plt.close('all')\n self.irf.plot(impulse=0, response=1, orth=True)\n close_plots()\n\n\n def test_plot_cum_effects(self):\n if not have_matplotlib():\n raise nose.SkipTest\n # I need close after every plot to avoid segfault, see #3158\n import matplotlib.pyplot as plt\n plt.close('all')\n self.irf.plot_cum_effects()\n plt.close('all')\n self.irf.plot_cum_effects(plot_stderr=False)\n plt.close('all')\n self.irf.plot_cum_effects(impulse=0, response=1)\n plt.close('all')\n\n self.irf.plot_cum_effects(orth=True)\n plt.close('all')\n self.irf.plot_cum_effects(impulse=0, response=1, orth=True)\n close_plots()\n\n\nclass CheckFEVD(object):\n\n fevd = None\n\n #---------------------------------------------------------------------------\n # FEVD tests\n\n def test_fevd_plot(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.fevd.plot()\n close_plots()\n\n def test_fevd_repr(self):\n self.fevd\n\n def test_fevd_summary(self):\n self.fevd.summary()\n\n def test_fevd_cov(self):\n # test does not crash\n # not implemented\n # covs = self.fevd.cov()\n\n pass\n\nclass TestVARResults(CheckIRF, CheckFEVD):\n\n @classmethod\n def setupClass(cls):\n cls.p = 2\n\n cls.data = get_macrodata()\n cls.model = VAR(cls.data)\n cls.names = cls.model.endog_names\n\n cls.ref = RResults()\n cls.k = len(cls.ref.names)\n cls.res = cls.model.fit(maxlags=cls.p)\n\n cls.irf = cls.res.irf(cls.ref.nirfs)\n cls.nahead = cls.ref.nahead\n\n cls.fevd = cls.res.fevd()\n\n def test_constructor(self):\n # make sure this works with no names\n ndarr = self.data.view((float, 3), type=np.ndarray)\n model = VAR(ndarr)\n res = model.fit(self.p)\n\n def test_names(self):\n assert_equal(self.model.endog_names, self.ref.names)\n\n model2 = VAR(self.data)\n assert_equal(model2.endog_names, self.ref.names)\n\n def test_get_eq_index(self):\n assert(type(self.res.names) is list)\n\n for i, name in enumerate(self.names):\n idx = self.res.get_eq_index(i)\n idx2 = self.res.get_eq_index(name)\n\n assert_equal(idx, i)\n assert_equal(idx, idx2)\n\n assert_raises(Exception, self.res.get_eq_index, 'foo')\n\n def test_repr(self):\n # just want this to work\n foo = str(self.res)\n bar = repr(self.res)\n\n def test_params(self):\n assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)\n\n def test_cov_params(self):\n # do nothing for now\n self.res.cov_params\n\n def test_cov_ybar(self):\n self.res.cov_ybar()\n\n def test_tstat(self):\n self.res.tvalues\n\n def test_pvalues(self):\n self.res.pvalues\n\n def test_summary(self):\n summ = self.res.summary()\n\n\n def test_detsig(self):\n assert_almost_equal(self.res.detomega, self.ref.detomega)\n\n def test_aic(self):\n assert_almost_equal(self.res.aic, self.ref.aic)\n\n def test_bic(self):\n assert_almost_equal(self.res.bic, self.ref.bic)\n\n def test_hqic(self):\n assert_almost_equal(self.res.hqic, self.ref.hqic)\n\n def test_fpe(self):\n assert_almost_equal(self.res.fpe, self.ref.fpe)\n\n def test_lagorder_select(self):\n ics = ['aic', 'fpe', 'hqic', 'bic']\n\n for ic in ics:\n res = self.model.fit(maxlags=10, ic=ic, verbose=True)\n\n assert_raises(Exception, self.model.fit, ic='foo')\n\n def test_nobs(self):\n assert_equal(self.res.nobs, self.ref.nobs)\n\n def test_stderr(self):\n assert_almost_equal(self.res.stderr, self.ref.stderr, DECIMAL_4)\n\n def test_loglike(self):\n assert_almost_equal(self.res.llf, self.ref.loglike)\n\n def test_ma_rep(self):\n ma_rep = self.res.ma_rep(self.nahead)\n assert_almost_equal(ma_rep, self.ref.ma_rep)\n\n #--------------------------------------------------\n # Lots of tests to make sure stuff works...need to check correctness\n\n def test_causality(self):\n causedby = self.ref.causality['causedby']\n\n for i, name in enumerate(self.names):\n variables = self.names[:i] + self.names[i + 1:]\n result = self.res.test_causality(name, variables, kind='f')\n assert_almost_equal(result['pvalue'], causedby[i], DECIMAL_4)\n\n rng = lrange(self.k)\n rng.remove(i)\n result2 = self.res.test_causality(i, rng, kind='f')\n assert_almost_equal(result['pvalue'], result2['pvalue'], DECIMAL_12)\n\n # make sure works\n result = self.res.test_causality(name, variables, kind='wald')\n\n # corner cases\n _ = self.res.test_causality(self.names[0], self.names[1])\n _ = self.res.test_causality(0, 1)\n\n assert_raises(Exception,self.res.test_causality, 0, 1, kind='foo')\n\n def test_select_order(self):\n result = self.model.fit(10, ic='aic', verbose=True)\n result = self.model.fit(10, ic='fpe', verbose=True)\n\n # bug\n model = VAR(self.model.endog)\n model.select_order()\n\n def test_is_stable(self):\n # may not necessarily be true for other datasets\n assert(self.res.is_stable(verbose=True))\n\n def test_acf(self):\n # test that it works...for now\n acfs = self.res.acf(10)\n\n # defaults to nlags=lag_order\n acfs = self.res.acf()\n assert(len(acfs) == self.p + 1)\n\n def test_acorr(self):\n acorrs = self.res.acorr(10)\n\n def test_forecast(self):\n point = self.res.forecast(self.res.y[-5:], 5)\n\n def test_forecast_interval(self):\n y = self.res.y[:-self.p:]\n point, lower, upper = self.res.forecast_interval(y, 5)\n\n def test_plot_sim(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.res.plotsim(steps=100)\n close_plots()\n\n def test_plot(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.res.plot()\n close_plots()\n\n def test_plot_acorr(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.res.plot_acorr()\n close_plots()\n\n def test_plot_forecast(self):\n if not have_matplotlib():\n raise nose.SkipTest\n\n self.res.plot_forecast(5)\n close_plots()\n\n def test_reorder(self):\n #manually reorder\n data = self.data.view((float,3), type=np.ndarray)\n names = self.names\n data2 = np.append(np.append(data[:,2,None], data[:,0,None], axis=1), data[:,1,None], axis=1)\n names2 = []\n names2.append(names[2])\n names2.append(names[0])\n names2.append(names[1])\n res2 = VAR(data2).fit(maxlags=self.p)\n\n #use reorder function\n res3 = self.res.reorder(['realinv','realgdp', 'realcons'])\n\n #check if the main results match\n assert_almost_equal(res2.params, res3.params)\n assert_almost_equal(res2.sigma_u, res3.sigma_u)\n assert_almost_equal(res2.bic, res3.bic)\n assert_almost_equal(res2.stderr, res3.stderr)\n\n def test_pickle(self):\n fh = BytesIO()\n #test wrapped results load save pickle\n self.res.save(fh)\n fh.seek(0,0)\n res_unpickled = self.res.__class__.load(fh)\n assert_(type(res_unpickled) is type(self.res))\n\n\nclass E1_Results(object):\n \"\"\"\n Results from Lutkepohl (2005) using E2 dataset\n \"\"\"\n\n def __init__(self):\n # Lutkepohl p. 120 results\n\n # I asked the author about these results and there is probably rounding\n # error in the book, so I adjusted these test results to match what is\n # coming out of the Python (double-checked) calculations\n self.irf_stderr = np.array([[[.125, 0.546, 0.664 ],\n [0.032, 0.139, 0.169],\n [0.026, 0.112, 0.136]],\n\n [[0.129, 0.547, 0.663],\n [0.032, 0.134, 0.163],\n [0.026, 0.108, 0.131]],\n\n [[0.084, .385, .479],\n [.016, .079, .095],\n [.016, .078, .103]]])\n\n self.cum_irf_stderr = np.array([[[.125, 0.546, 0.664 ],\n [0.032, 0.139, 0.169],\n [0.026, 0.112, 0.136]],\n\n [[0.149, 0.631, 0.764],\n [0.044, 0.185, 0.224],\n [0.033, 0.140, 0.169]],\n\n [[0.099, .468, .555],\n [.038, .170, .205],\n [.033, .150, .185]]])\n\n self.lr_stderr = np.array([[.134, .645, .808],\n [.048, .230, .288],\n [.043, .208, .260]])\n\nbasepath = os.path.split(sm.__file__)[0]\nresultspath = basepath + '/tsa/vector_ar/tests/results/'\n\ndef get_lutkepohl_data(name='e2'):\n lut_data = basepath + '/tsa/vector_ar/data/'\n path = lut_data + '%s.dat' % name\n\n return util.parse_lutkepohl_data(path)\n\ndef test_lutkepohl_parse():\n files = ['e%d' % i for i in range(1, 7)]\n\n for f in files:\n get_lutkepohl_data(f)\n\nclass TestVARResultsLutkepohl(object):\n \"\"\"\n Verify calculations using results from Lutkepohl's book\n \"\"\"\n\n def __init__(self):\n self.p = 2\n sdata, dates = get_lutkepohl_data('e1')\n\n data = data_util.struct_to_ndarray(sdata)\n adj_data = np.diff(np.log(data), axis=0)\n # est = VAR(adj_data, p=2, dates=dates[1:], names=names)\n\n self.model = VAR(adj_data[:-16], dates=dates[1:-16], freq='BQ-MAR')\n self.res = self.model.fit(maxlags=self.p)\n self.irf = self.res.irf(10)\n self.lut = E1_Results()\n\n def test_approx_mse(self):\n # 3.5.18, p. 99\n mse2 = np.array([[25.12, .580, 1.300],\n [.580, 1.581, .586],\n [1.300, .586, 1.009]]) * 1e-4\n\n assert_almost_equal(mse2, self.res.forecast_cov(3)[1],\n DECIMAL_3)\n\n def test_irf_stderr(self):\n irf_stderr = self.irf.stderr(orth=False)\n for i in range(1, 1 + len(self.lut.irf_stderr)):\n assert_almost_equal(np.round(irf_stderr[i], 3),\n self.lut.irf_stderr[i-1])\n\n def test_cum_irf_stderr(self):\n stderr = self.irf.cum_effect_stderr(orth=False)\n for i in range(1, 1 + len(self.lut.cum_irf_stderr)):\n assert_almost_equal(np.round(stderr[i], 3),\n self.lut.cum_irf_stderr[i-1])\n\n def test_lr_effect_stderr(self):\n stderr = self.irf.lr_effect_stderr(orth=False)\n orth_stderr = self.irf.lr_effect_stderr(orth=True)\n assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)\n\ndef test_get_trendorder():\n results = {\n 'c' : 1,\n 'nc' : 0,\n 'ct' : 2,\n 'ctt' : 3\n }\n\n for t, trendorder in iteritems(results):\n assert(util.get_trendorder(t) == trendorder)\n\n\ndef test_var_constant():\n # see 2043\n import datetime\n from pandas import DataFrame, DatetimeIndex\n\n series = np.array([[2., 2.], [1, 2.], [1, 2.], [1, 2.], [1., 2.]])\n data = DataFrame(series)\n\n d = datetime.datetime.now()\n delta = datetime.timedelta(days=1)\n index = []\n for i in range(data.shape[0]):\n index.append(d)\n d += delta\n\n data.index = DatetimeIndex(index)\n\n model = VAR(data)\n assert_raises(ValueError, model.fit, 1)\n\ndef test_var_trend():\n # see 2271\n data = get_macrodata().view((float,3), type=np.ndarray)\n\n model = sm.tsa.VAR(data)\n results = model.fit(4) #, trend = 'c')\n irf = results.irf(10)\n\n\n data_nc = data - data.mean(0)\n model_nc = sm.tsa.VAR(data_nc)\n results_nc = model_nc.fit(4, trend = 'nc')\n assert_raises(ValueError, model.fit, 4, trend='t')\n\n\ndef test_irf_trend():\n # test for irf with different trend see #1636\n # this is a rough comparison by adding trend or subtracting mean to data\n # to get similar AR coefficients and IRF\n data = get_macrodata().view((float,3), type=np.ndarray)\n\n model = sm.tsa.VAR(data)\n results = model.fit(4) #, trend = 'c')\n irf = results.irf(10)\n\n\n data_nc = data - data.mean(0)\n model_nc = sm.tsa.VAR(data_nc)\n results_nc = model_nc.fit(4, trend = 'nc')\n irf_nc = results_nc.irf(10)\n\n assert_allclose(irf_nc.stderr()[1:4], irf.stderr()[1:4], rtol=0.01)\n\n trend = 1e-3 * np.arange(len(data)) / (len(data) - 1)\n # for pandas version, currently not used, if data is a pd.DataFrame\n #data_t = pd.DataFrame(data.values + trend[:,None], index=data.index, columns=data.columns)\n data_t = data + trend[:,None]\n\n model_t = sm.tsa.VAR(data_t)\n results_t = model_t.fit(4, trend = 'ct')\n irf_t = results_t.irf(10)\n\n assert_allclose(irf_t.stderr()[1:4], irf.stderr()[1:4], rtol=0.03)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],\n exit=False)\n" ]
[ [ "numpy.testing.assert_almost_equal", "pandas.DatetimeIndex", "numpy.append", "numpy.testing.assert_equal", "numpy.savez", "pandas.rpy.common.convert_robj", "pandas.DataFrame", "numpy.log", "matplotlib.pyplot.close", "numpy.array", "numpy.round" ] ]
hnwarid/DQLabAcademy
[ "e03d82f97536ae103b6abc65db0ae16520fb68c7" ]
[ "1_PythonDataProcessing/3_14_index_method.py" ]
[ "import pandas as pd\n# Baca file sample_tsv.tsv untuk 10 baris pertama saja\ndf = pd.read_csv(\"https://storage.googleapis.com/dqlab-dataset/sample_tsv.tsv\", sep=\"\\t\", nrows=10)\n# Cetak data frame awal\nprint(\"Dataframe awal:\\n\", df)\n# Set index baru\ndf.index = [\"Pesanan ke-\" + str(i) for i in range(1, 11)]\n# Cetak data frame dengan index baru\nprint(\"Dataframe dengan index baru:\\n\", df)" ]
[ [ "pandas.read_csv" ] ]
vibhatha/cylon
[ "3f2c5b08935a4332b820818ca113cb44f7ac5da3" ]
[ "python/examples/op_benchmark/null_handling_benchmark.py" ]
[ "##\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##\n\nimport pyarrow as pa\nimport numpy as np\nimport pandas as pd\nimport pycylon as cn\nfrom pycylon import CylonContext\nfrom pycylon import Table\nfrom bench_util import get_dataframe\nimport time\nimport argparse\n\n\"\"\"\nRun benchmark:\n\n>>> python python/examples/op_benchmark/null_handling_benchmark.py --start_size 1_000_000 \\\n --step_size 1_000_000 \\\n --end_size 10_000_000 \\\n --num_cols 2 \\\n --stats_file /tmp/dropna_bench.csv \\\n --repetitions 1 \\\n --duplication_factor 0.9\n\"\"\"\n\n\ndef dropna_op(num_rows: int, num_cols: int, duplication_factor: float):\n ctx: CylonContext = CylonContext(config=None, distributed=False)\n\n df = get_dataframe(num_rows=num_rows, num_cols=num_cols, duplication_factor=duplication_factor, with_null=True)\n\n ct = Table.from_pandas(ctx, df)\n\n pandas_time = time.time()\n df.dropna(axis=1)\n pandas_time = time.time() - pandas_time\n\n cylon_time = time.time()\n ct.dropna(axis=0)\n cylon_time = time.time() - cylon_time\n\n pandas_eval_time = time.time()\n pd.eval('df.dropna(axis=1)')\n pandas_eval_time = time.time() - pandas_eval_time\n\n return pandas_time, cylon_time, pandas_eval_time\n\n\ndef bench_dropna(start: int, end: int, step: int, num_cols: int, repetitions: int, stats_file: str,\n duplication_factor: float):\n all_data = []\n schema = [\"num_records\", \"num_cols\", \"pandas\", \"cylon\", \"pandas[eval]\", \"speed up\", \"speed up [eval]\"]\n assert repetitions >= 1\n assert start > 0\n assert step > 0\n assert num_cols > 0\n\n for records in range(start, end + step, step):\n print(f\"DropNa Op : Records={records}, Columns={num_cols}\")\n times = []\n for idx in range(repetitions):\n pandas_time, cylon_time, pandas_eval_time = dropna_op(num_rows=records, num_cols=num_cols,\n duplication_factor=duplication_factor)\n times.append([pandas_time, cylon_time, pandas_eval_time])\n times = np.array(times).sum(axis=0) / repetitions\n print(f\"DropNa Op : Records={records}, Columns={num_cols}, \"\n f\"Pandas Time : {times[0]}, Cylon Time : {times[1]}, Pandas Eval Time : {times[2]}\")\n all_data.append([records, num_cols, times[0], times[1], times[2], times[0] / times[1], times[2]/ times[1]])\n pdf = pd.DataFrame(all_data, columns=schema)\n print(pdf)\n pdf.to_csv(stats_file)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--start_size\",\n help=\"initial data size\",\n type=int)\n parser.add_argument(\"-e\", \"--end_size\",\n help=\"end data size\",\n type=int)\n parser.add_argument(\"-d\", \"--duplication_factor\",\n help=\"random data duplication factor\",\n type=float)\n parser.add_argument(\"-s\", \"--step_size\",\n help=\"Step size\",\n type=int)\n parser.add_argument(\"-c\", \"--num_cols\",\n help=\"number of columns\",\n type=int)\n parser.add_argument(\"-r\", \"--repetitions\",\n help=\"number of experiments to be repeated\",\n type=int)\n parser.add_argument(\"-f\", \"--stats_file\",\n help=\"stats file to be saved\",\n type=str)\n\n args = parser.parse_args()\n print(f\"Start Data Size : {args.start_size}\")\n print(f\"End Data Size : {args.end_size}\")\n print(f\"Step Data Size : {args.step_size}\")\n print(f\"Data Duplication Factor : {args.duplication_factor}\")\n print(f\"Number of Columns : {args.num_cols}\")\n print(f\"Number of Repetitions : {args.repetitions}\")\n print(f\"Stats File : {args.stats_file}\")\n bench_dropna(start=args.start_size,\n end=args.end_size,\n step=args.step_size,\n num_cols=args.num_cols,\n repetitions=args.repetitions,\n stats_file=args.stats_file,\n duplication_factor=args.duplication_factor)\n" ]
[ [ "numpy.array", "pandas.eval", "pandas.DataFrame" ] ]
mieldehabanero/stable-baselines3
[ "b37052cbf059b6f81314f5b98205e4a3403e4112" ]
[ "tests/test_dict_env.py" ]
[ "import gym\nimport numpy as np\nimport pytest\nfrom gym import spaces\n\nfrom stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.common.envs import BitFlippingEnv, SimpleMultiObsEnv\nfrom stable_baselines3.common.evaluation import evaluate_policy\nfrom stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecFrameStack, VecNormalize\n\n\nclass DummyDictEnv(gym.Env):\n \"\"\"Custom Environment for testing purposes only\"\"\"\n\n metadata = {\"render.modes\": [\"human\"]}\n\n def __init__(\n self,\n use_discrete_actions=False,\n channel_last=False,\n nested_dict_obs=False,\n vec_only=False,\n ):\n super().__init__()\n if use_discrete_actions:\n self.action_space = spaces.Discrete(3)\n else:\n self.action_space = spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32)\n N_CHANNELS = 1\n HEIGHT = 64\n WIDTH = 64\n\n if channel_last:\n obs_shape = (HEIGHT, WIDTH, N_CHANNELS)\n else:\n obs_shape = (N_CHANNELS, HEIGHT, WIDTH)\n\n self.observation_space = spaces.Dict(\n {\n # Image obs\n \"img\": spaces.Box(low=0, high=255, shape=obs_shape, dtype=np.uint8),\n # Vector obs\n \"vec\": spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32),\n # Discrete obs\n \"discrete\": spaces.Discrete(4),\n }\n )\n\n # For checking consistency with normal MlpPolicy\n if vec_only:\n self.observation_space = spaces.Dict(\n {\n # Vector obs\n \"vec\": spaces.Box(low=-1, high=1, shape=(2,), dtype=np.float32),\n }\n )\n\n if nested_dict_obs:\n # Add dictionary observation inside observation space\n self.observation_space.spaces[\"nested-dict\"] = spaces.Dict({\"nested-dict-discrete\": spaces.Discrete(4)})\n\n def seed(self, seed=None):\n if seed is not None:\n self.observation_space.seed(seed)\n\n def step(self, action):\n reward = 0.0\n done = False\n return self.observation_space.sample(), reward, done, {}\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n return np.zeros((len(achieved_goal),))\n\n def reset(self):\n return self.observation_space.sample()\n\n def render(self, mode=\"human\"):\n pass\n\n\[email protected](\"policy\", [\"MlpPolicy\", \"CnnPolicy\"])\ndef test_policy_hint(policy):\n # Common mistake: using the wrong policy\n with pytest.raises(ValueError):\n PPO(policy, BitFlippingEnv(n_bits=4))\n\n\[email protected](\"model_class\", [PPO, A2C])\ndef test_goal_env(model_class):\n env = BitFlippingEnv(n_bits=4)\n # check that goal env works for PPO/A2C that cannot use HER replay buffer\n model = model_class(\"MultiInputPolicy\", env, n_steps=64).learn(250)\n evaluate_policy(model, model.get_env())\n\n\[email protected](\"model_class\", [PPO, A2C, DQN, DDPG, SAC, TD3])\ndef test_consistency(model_class):\n \"\"\"\n Make sure that dict obs with vector only vs using flatten obs is equivalent.\n This ensures notable that the network architectures are the same.\n \"\"\"\n use_discrete_actions = model_class == DQN\n dict_env = DummyDictEnv(use_discrete_actions=use_discrete_actions, vec_only=True)\n dict_env = gym.wrappers.TimeLimit(dict_env, 100)\n env = gym.wrappers.FlattenObservation(dict_env)\n dict_env.seed(10)\n obs = dict_env.reset()\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n )\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features and make learning faster\n kwargs = dict(\n buffer_size=250,\n train_freq=8,\n gradient_steps=1,\n )\n if model_class == DQN:\n kwargs[\"learning_starts\"] = 0\n\n dict_model = model_class(\"MultiInputPolicy\", dict_env, gamma=0.5, seed=1, **kwargs)\n action_before_learning_1, _ = dict_model.predict(obs, deterministic=True)\n dict_model.learn(total_timesteps=n_steps)\n\n normal_model = model_class(\"MlpPolicy\", env, gamma=0.5, seed=1, **kwargs)\n action_before_learning_2, _ = normal_model.predict(obs[\"vec\"], deterministic=True)\n normal_model.learn(total_timesteps=n_steps)\n\n action_1, _ = dict_model.predict(obs, deterministic=True)\n action_2, _ = normal_model.predict(obs[\"vec\"], deterministic=True)\n\n assert np.allclose(action_before_learning_1, action_before_learning_2)\n assert np.allclose(action_1, action_2)\n\n\[email protected](\"model_class\", [PPO, A2C, DQN, DDPG, SAC, TD3])\[email protected](\"channel_last\", [False, True])\ndef test_dict_spaces(model_class, channel_last):\n \"\"\"\n Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support\n with mixed observation.\n \"\"\"\n use_discrete_actions = model_class not in [SAC, TD3, DDPG]\n env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=channel_last)\n env = gym.wrappers.TimeLimit(env, 100)\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n )\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features and make learning faster\n kwargs = dict(\n buffer_size=250,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n train_freq=8,\n gradient_steps=1,\n )\n if model_class == DQN:\n kwargs[\"learning_starts\"] = 0\n\n model = model_class(\"MultiInputPolicy\", env, gamma=0.5, seed=1, **kwargs)\n\n model.learn(total_timesteps=n_steps)\n\n evaluate_policy(model, env, n_eval_episodes=5, warn=False)\n\n\[email protected](\"model_class\", [PPO, A2C])\ndef test_multiprocessing(model_class):\n use_discrete_actions = model_class not in [SAC, TD3, DDPG]\n\n def make_env():\n env = DummyDictEnv(use_discrete_actions=use_discrete_actions, channel_last=False)\n env = gym.wrappers.TimeLimit(env, 100)\n return env\n\n env = make_vec_env(make_env, n_envs=2, vec_env_cls=SubprocVecEnv)\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n )\n\n model = model_class(\"MultiInputPolicy\", env, gamma=0.5, seed=1, **kwargs)\n\n model.learn(total_timesteps=n_steps)\n\n\[email protected](\"model_class\", [PPO, A2C, DQN, DDPG, SAC, TD3])\[email protected](\"channel_last\", [False, True])\ndef test_dict_vec_framestack(model_class, channel_last):\n \"\"\"\n Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support\n for Dictionary spaces and VecEnvWrapper using MultiInputPolicy.\n \"\"\"\n use_discrete_actions = model_class not in [SAC, TD3, DDPG]\n channels_order = {\"vec\": None, \"img\": \"last\" if channel_last else \"first\"}\n env = DummyVecEnv(\n [lambda: SimpleMultiObsEnv(random_start=True, discrete_actions=use_discrete_actions, channel_last=channel_last)]\n )\n\n env = VecFrameStack(env, n_stack=3, channels_order=channels_order)\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n )\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features and make learning faster\n kwargs = dict(\n buffer_size=250,\n policy_kwargs=dict(\n net_arch=[32],\n features_extractor_kwargs=dict(cnn_output_dim=32),\n ),\n train_freq=8,\n gradient_steps=1,\n )\n if model_class == DQN:\n kwargs[\"learning_starts\"] = 0\n\n model = model_class(\"MultiInputPolicy\", env, gamma=0.5, seed=1, **kwargs)\n\n model.learn(total_timesteps=n_steps)\n\n evaluate_policy(model, env, n_eval_episodes=5, warn=False)\n\n\[email protected](\"model_class\", [PPO, A2C, DQN, DDPG, SAC, TD3])\ndef test_vec_normalize(model_class):\n \"\"\"\n Additional tests for PPO/A2C/SAC/DDPG/TD3/DQN to check observation space support\n for GoalEnv and VecNormalize using MultiInputPolicy.\n \"\"\"\n env = DummyVecEnv([lambda: gym.wrappers.TimeLimit(DummyDictEnv(use_discrete_actions=model_class == DQN), 100)])\n env = VecNormalize(env, norm_obs_keys=[\"vec\"])\n\n kwargs = {}\n n_steps = 256\n\n if model_class in {A2C, PPO}:\n kwargs = dict(\n n_steps=128,\n policy_kwargs=dict(\n net_arch=[32],\n ),\n )\n else:\n # Avoid memory error when using replay buffer\n # Reduce the size of the features and make learning faster\n kwargs = dict(\n buffer_size=250,\n policy_kwargs=dict(\n net_arch=[32],\n ),\n train_freq=8,\n gradient_steps=1,\n )\n if model_class == DQN:\n kwargs[\"learning_starts\"] = 0\n\n model = model_class(\"MultiInputPolicy\", env, gamma=0.5, seed=1, **kwargs)\n\n model.learn(total_timesteps=n_steps)\n\n evaluate_policy(model, env, n_eval_episodes=5, warn=False)\n\n\ndef test_dict_nested():\n \"\"\"\n Make sure we throw an appropiate error with nested Dict observation spaces\n \"\"\"\n # Test without manual wrapping to vec-env\n env = DummyDictEnv(nested_dict_obs=True)\n\n with pytest.raises(NotImplementedError):\n _ = PPO(\"MultiInputPolicy\", env, seed=1)\n\n # Test with manual vec-env wrapping\n\n with pytest.raises(NotImplementedError):\n env = DummyVecEnv([lambda: DummyDictEnv(nested_dict_obs=True)])\n" ]
[ [ "numpy.allclose" ] ]
OliviaWang123456/ncnet
[ "d45920d57ea1c01befb96785a2f1af8bd50e7390" ]
[ "lib/pf_dataset.py" ]
[ "from __future__ import print_function, division\nimport os\nimport torch\nfrom torch.autograd import Variable\nfrom skimage import io\nimport pandas as pd\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom lib.transformation import AffineTnf\n \nclass PFPascalDataset(Dataset):\n \n \"\"\"\n \n Proposal Flow PASCAL image pair dataset\n \n\n Args:\n csv_file (string): Path to the csv file with image names and transformations.\n dataset_path (string): Directory with the images.\n output_size (2-tuple): Desired output size\n transform (callable): Transformation for post-processing the training pair (eg. image normalization)\n \n \"\"\"\n\n def __init__(self, csv_file, dataset_path, output_size=(240,240), transform=None, category=None, pck_procedure='pf'):\n\n self.category_names=['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow','diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']\n self.out_h, self.out_w = output_size\n self.pairs = pd.read_csv(csv_file)\n self.category = self.pairs.iloc[:,2].values.astype('float') #as_matrix\n if category is not None:\n cat_idx = np.nonzero(self.category==category)[0]\n self.category=self.category[cat_idx]\n self.pairs=self.pairs.iloc[cat_idx,:]\n self.img_A_names = self.pairs.iloc[:,0]\n self.img_B_names = self.pairs.iloc[:,1]\n self.point_A_coords = self.pairs.iloc[:, 3:5]\n self.point_B_coords = self.pairs.iloc[:, 5:]\n self.dataset_path = dataset_path \n self.transform = transform\n # no cuda as dataset is called from CPU threads in dataloader and produces confilct\n self.affineTnf = AffineTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False) \n self.pck_procedure = pck_procedure\n \n def __len__(self):\n return len(self.pairs)\n\n def __getitem__(self, idx):\n # get pre-processed images\n image_A,im_size_A = self.get_image(self.img_A_names,idx)\n image_B,im_size_B = self.get_image(self.img_B_names,idx)\n\n # get pre-processed point coords\n point_A_coords = self.get_points(self.point_A_coords,idx)\n point_B_coords = self.get_points(self.point_B_coords,idx)\n \n # compute PCK reference length L_pck (equal to max bounding box side in image_A)\n #L_pck = torch.FloatTensor([torch.max(point_A_coords.max(1)[0]-point_A_coords.min(1)[0])])\n N_pts = torch.sum(torch.ne(point_A_coords[0,:],-1))\n\n if self.pck_procedure=='pf':\n L_pck = torch.FloatTensor([torch.max(point_A_coords[:,:N_pts].max(1)[0]-point_A_coords[:,:N_pts].min(1)[0])])\n elif self.pck_procedure=='scnet':\n #modification to follow the evaluation procedure of SCNet\n point_A_coords[0,0:N_pts]=point_A_coords[0,0:N_pts]*224/im_size_A[1]\n point_A_coords[1,0:N_pts]=point_A_coords[1,0:N_pts]*224/im_size_A[0]\n\n point_B_coords[0,0:N_pts]=point_B_coords[0,0:N_pts]*224/im_size_B[1]\n point_B_coords[1,0:N_pts]=point_B_coords[1,0:N_pts]*224/im_size_B[0]\n\n im_size_A[0:2]=torch.FloatTensor([224,224])\n im_size_B[0:2]=torch.FloatTensor([224,224])\n\n L_pck = torch.FloatTensor([224.0])\n \n sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'source_points': point_A_coords, 'target_points': point_B_coords, 'L_pck': L_pck}\n \n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n def get_image(self,img_name_list,idx):\n img_name = os.path.join(self.dataset_path, img_name_list.iloc[idx])\n image = io.imread(img_name)\n \n # get image size\n im_size = np.asarray(image.shape)\n \n # convert to torch Variable\n image = np.expand_dims(image.transpose((2,0,1)),0)\n image = torch.Tensor(image.astype(np.float32))\n image_var = Variable(image,requires_grad=False)\n \n # Resize image using bilinear sampling with identity affine tnf\n image = self.affineTnf(image_var).data.squeeze(0)\n \n im_size = torch.Tensor(im_size.astype(np.float32))\n \n return (image, im_size)\n \n def get_points(self,point_coords_list,idx):\n X=np.fromstring(point_coords_list.iloc[idx,0],sep=';')\n Y=np.fromstring(point_coords_list.iloc[idx,1],sep=';')\n Xpad = -np.ones(20); Xpad[:len(X)]=X\n Ypad = -np.ones(20); Ypad[:len(X)]=Y\n point_coords = np.concatenate((Xpad.reshape(1,20),Ypad.reshape(1,20)),axis=0)\n \n # make arrays float tensor for subsequent processing\n point_coords = torch.Tensor(point_coords.astype(np.float32))\n return point_coords\n\n " ]
[ [ "numpy.ones", "torch.FloatTensor", "pandas.read_csv", "torch.autograd.Variable", "numpy.asarray", "torch.ne", "numpy.nonzero", "numpy.fromstring" ] ]
jongtack/tensorflow
[ "2d5f0ac61fe4e4160fbb68d8031418528111dae9" ]
[ "tensorflow/python/ops/rnn.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"RNN helpers for TensorFlow models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import rnn_cell\nfrom tensorflow.python.ops import variable_scope as vs\n\n\ndef rnn(cell, inputs, initial_state=None, dtype=None,\n sequence_length=None, scope=None):\n \"\"\"Creates a recurrent neural network specified by RNNCell \"cell\".\n\n The simplest form of RNN network generated is:\n state = cell.zero_state(...)\n outputs = []\n states = []\n for input_ in inputs:\n output, state = cell(input_, state)\n outputs.append(output)\n states.append(state)\n return (outputs, states)\n\n However, a few other options are available:\n\n An initial state can be provided.\n If sequence_length is provided, dynamic calculation is performed.\n\n Dynamic calculation returns, at time t:\n (t >= max(sequence_length)\n ? (zeros(output_shape), zeros(state_shape))\n : cell(input, state)\n\n Thus saving computational time when unrolling past the max sequence length.\n\n Args:\n cell: An instance of RNNCell.\n inputs: A length T list of inputs, each a tensor of shape\n [batch_size, cell.input_size].\n initial_state: (optional) An initial state for the RNN. This must be\n a tensor of appropriate type and shape [batch_size x cell.state_size].\n dtype: (optional) The data type for the initial state. Required if\n initial_state is not provided.\n sequence_length: An int64 vector (tensor) size [batch_size].\n scope: VariableScope for the created subgraph; defaults to \"RNN\".\n\n Returns:\n A pair (outputs, states) where:\n outputs is a length T list of outputs (one for each input)\n states is a length T list of states (one state following each input)\n\n Raises:\n TypeError: If \"cell\" is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n\n if not isinstance(cell, rnn_cell.RNNCell):\n raise TypeError(\"cell must be an instance of RNNCell\")\n if not isinstance(inputs, list):\n raise TypeError(\"inputs must be a list\")\n if not inputs:\n raise ValueError(\"inputs must not be empty\")\n\n outputs = []\n states = []\n with vs.variable_scope(scope or \"RNN\"):\n batch_size = array_ops.shape(inputs[0])[0]\n if initial_state is not None:\n state = initial_state\n else:\n if not dtype:\n raise ValueError(\"If no initial_state is provided, dtype must be.\")\n state = cell.zero_state(batch_size, dtype)\n\n if sequence_length: # Prepare variables\n zero_output_state = (\n array_ops.zeros(array_ops.pack([batch_size, cell.output_size]),\n inputs[0].dtype),\n array_ops.zeros(array_ops.pack([batch_size, cell.state_size]),\n state.dtype))\n max_sequence_length = math_ops.reduce_max(sequence_length)\n\n for time, input_ in enumerate(inputs):\n if time > 0: vs.get_variable_scope().reuse_variables()\n # pylint: disable=cell-var-from-loop\n def output_state():\n return cell(input_, state)\n # pylint: enable=cell-var-from-loop\n if sequence_length:\n (output, state) = control_flow_ops.cond(\n time >= max_sequence_length,\n lambda: zero_output_state, output_state)\n else:\n (output, state) = output_state()\n\n outputs.append(output)\n states.append(state)\n\n return (outputs, states)\n\n\ndef state_saving_rnn(cell, inputs, state_saver, state_name,\n sequence_length=None, scope=None):\n \"\"\"RNN that accepts a state saver for time-truncated RNN calculation.\n\n Args:\n cell: An instance of RNNCell.\n inputs: A length T list of inputs, each a tensor of shape\n [batch_size, cell.input_size].\n state_saver: A state saver object with methods `state` and `save_state`.\n state_name: The name to use with the state_saver.\n sequence_length: (optional) An int64 vector (tensor) size [batch_size].\n See the documentation for rnn() for more details about sequence_length.\n scope: VariableScope for the created subgraph; defaults to \"RNN\".\n\n Returns:\n A pair (outputs, states) where:\n outputs is a length T list of outputs (one for each input)\n states is a length T list of states (one state following each input)\n\n Raises:\n TypeError: If \"cell\" is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n initial_state = state_saver.state(state_name)\n (outputs, states) = rnn(cell, inputs, initial_state=initial_state,\n sequence_length=sequence_length, scope=scope)\n save_state = state_saver.save_state(state_name, states[-1])\n with ops.control_dependencies([save_state]):\n outputs[-1] = array_ops.identity(outputs[-1])\n\n return (outputs, states)\n\n\ndef _reverse_seq(input_seq, lengths):\n \"\"\"Reverse a list of Tensors up to specified lengths.\n\n Args:\n input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)\n lengths: A tensor of dimension batch_size, containing lengths for each\n sequence in the batch. If \"None\" is specified, simply reverses\n the list.\n\n Returns:\n time-reversed sequence\n \"\"\"\n if lengths is None:\n return list(reversed(input_seq))\n\n # Join into (time, batch_size, depth)\n s_joined = array_ops.pack(input_seq)\n # Reverse along dimension 0\n s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)\n # Split again into list\n result = array_ops.unpack(s_reversed)\n return result\n\n\ndef bidirectional_rnn(cell_fw, cell_bw, inputs,\n initial_state_fw=None, initial_state_bw=None,\n dtype=None, sequence_length=None, scope=None):\n \"\"\"Creates a bidirectional recurrent neural network.\n\n Similar to the unidirectional case above (rnn) but takes input and builds\n independent forward and backward RNNs with the final forward and backward\n outputs depth-concatenated, such that the output will have the format\n [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of\n forward and backward cell must match. The initial state for both directions\n is zero by default (but can be set optionally) and no intermediate states are\n ever returned -- the network is fully unrolled for the given (passed in)\n length(s) of the sequence(s) or completely unrolled if length(s) is not given.\n\n Args:\n cell_fw: An instance of RNNCell, to be used for forward direction.\n cell_bw: An instance of RNNCell, to be used for backward direction.\n inputs: A length T list of inputs, each a tensor of shape\n [batch_size, cell.input_size].\n initial_state_fw: (optional) An initial state for the forward RNN.\n This must be a tensor of appropriate type and shape\n [batch_size x cell.state_size].\n initial_state_bw: (optional) Same as for initial_state_fw.\n dtype: (optional) The data type for the initial state. Required if either\n of the initial states are not provided.\n sequence_length: (optional) An int64 vector (tensor) of size [batch_size],\n containing the actual lengths for each of the sequences.\n scope: VariableScope for the created subgraph; defaults to \"BiRNN\"\n\n Returns:\n A set of output `Tensors` where:\n outputs is a length T list of outputs (one for each input), which\n are depth-concatenated forward and backward outputs\n\n Raises:\n TypeError: If \"cell_fw\" or \"cell_bw\" is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n\n if not isinstance(cell_fw, rnn_cell.RNNCell):\n raise TypeError(\"cell_fw must be an instance of RNNCell\")\n if not isinstance(cell_bw, rnn_cell.RNNCell):\n raise TypeError(\"cell_bw must be an instance of RNNCell\")\n if not isinstance(inputs, list):\n raise TypeError(\"inputs must be a list\")\n if not inputs:\n raise ValueError(\"inputs must not be empty\")\n\n name = scope or \"BiRNN\"\n # Forward direction\n with vs.variable_scope(name + \"_FW\"):\n output_fw, _ = rnn(cell_fw, inputs, initial_state_fw, dtype)\n # Backward direction\n with vs.variable_scope(name + \"_BW\"):\n tmp, _ = rnn(\n cell_bw, _reverse_seq(inputs, sequence_length), initial_state_bw, dtype)\n output_bw = _reverse_seq(tmp, sequence_length)\n # Concat each of the forward/backward outputs\n outputs = [array_ops.concat(1, [fw, bw])\n for fw, bw in zip(output_fw, output_bw)]\n\n return outputs\n" ]
[ [ "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.python.ops.array_ops.pack", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.variable_scope.variable_scope", "tensorflow.python.ops.array_ops.unpack", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.reverse_sequence", "tensorflow.python.ops.control_flow_ops.cond", "tensorflow.python.ops.variable_scope.get_variable_scope" ] ]
bozhenhhu/gvp-pytorch
[ "82af6b22eaf8311c15733117b0071408d24ed877" ]
[ "run_cpd.py" ]
[ "import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--models-dir', metavar='PATH', default='./models/',\n help='directory to save trained models, default=./models/')\nparser.add_argument('--num-workers', metavar='N', type=int, default=4,\n help='number of threads for loading data, default=4')\nparser.add_argument('--max-nodes', metavar='N', type=int, default=3000,\n help='max number of nodes per batch, default=3000')\nparser.add_argument('--epochs', metavar='N', type=int, default=100,\n help='training epochs, default=100')\nparser.add_argument('--cath-data', metavar='PATH', default='./data/chain_set.jsonl',\n help='location of CATH dataset, default=./data/chain_set.jsonl')\nparser.add_argument('--cath-splits', metavar='PATH', default='./data/chain_set_splits.json',\n help='location of CATH split file, default=./data/chain_set_splits.json')\nparser.add_argument('--ts50', metavar='PATH', default='./data/ts50.json',\n help='location of TS50 dataset, default=./data/ts50.json')\nparser.add_argument('--train', action=\"store_true\", help=\"train a model\")\nparser.add_argument('--test-r', metavar='PATH', default=None,\n help='evaluate a trained model on recovery (without training)')\nparser.add_argument('--test-p', metavar='PATH', default=None,\n help='evaluate a trained model on perplexity (without training)')\nparser.add_argument('--n-samples', metavar='N', default=100,\n help='number of sequences to sample (if testing recovery), default=100')\n\nargs = parser.parse_args()\nassert sum(map(bool, [args.train, args.test_p, args.test_r])) == 1, \\\n \"Specify exactly one of --train, --test_r, --test_p\"\n\nimport torch\nimport torch.nn as nn\nimport gvp.data, gvp.models\nfrom datetime import datetime\nimport tqdm, os, json\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nimport torch_geometric\nfrom functools import partial\nprint = partial(print, flush=True)\n\nnode_dim = (100, 16)\nedge_dim = (32, 1)\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nif not os.path.exists(args.models_dir): os.makedirs(args.models_dir)\nmodel_id = int(datetime.timestamp(datetime.now()))\ndataloader = lambda x: torch_geometric.data.DataLoader(x, \n num_workers=args.num_workers,\n batch_sampler=gvp.data.BatchSampler(\n x.node_counts, max_nodes=args.max_nodes))\n\ndef main():\n \n model = gvp.models.CPDModel((6, 3), node_dim, (32, 1), edge_dim).to(device)\n \n print(\"Loading CATH dataset\")\n cath = gvp.data.CATHDataset(path=\"data/chain_set.jsonl\",\n splits_path=\"data/chain_set_splits.json\") \n \n trainset, valset, testset = map(gvp.data.ProteinGraphDataset,\n (cath.train, cath.val, cath.test))\n \n if args.test_r or args.test_p:\n ts50set = gvp.data.ProteinGraphDataset(json.load(open(args.ts50)))\n model.load_state_dict(torch.load(args.test_r or args.test_p))\n \n if args.test_r:\n print(\"Testing on CATH testset\"); test_recovery(model, testset)\n print(\"Testing on TS50 set\"); test_recovery(model, ts50set)\n \n elif args.test_p:\n print(\"Testing on CATH testset\"); test_perplexity(model, testset)\n print(\"Testing on TS50 set\"); test_perplexity(model, ts50set)\n \n elif args.train:\n train(model, trainset, valset, testset)\n \n \ndef train(model, trainset, valset, testset):\n train_loader, val_loader, test_loader = map(dataloader,\n (trainset, valset, testset))\n optimizer = torch.optim.Adam(model.parameters())\n best_path, best_val = None, np.inf\n lookup = train_loader.dataset.num_to_letter\n for epoch in range(args.epochs):\n model.train()\n loss, acc, confusion = loop(model, train_loader, optimizer=optimizer)\n path = f\"{args.models_dir}/{model_id}_{epoch}.pt\"\n torch.save(model.state_dict(), path)\n print(f'EPOCH {epoch} TRAIN loss: {loss:.4f} acc: {acc:.4f}')\n print_confusion(confusion, lookup=lookup)\n \n model.eval()\n with torch.no_grad():\n loss, acc, confusion = loop(model, val_loader) \n print(f'EPOCH {epoch} VAL loss: {loss:.4f} acc: {acc:.4f}')\n print_confusion(confusion, lookup=lookup)\n \n if loss < best_val:\n best_path, best_val = path, loss\n print(f'BEST {best_path} VAL loss: {best_val:.4f}')\n \n print(f\"TESTING: loading from {best_path}\")\n model.load_state_dict(torch.load(best_path))\n \n model.eval()\n with torch.no_grad():\n loss, acc, confusion = loop(model, test_loader)\n print(f'TEST loss: {loss:.4f} acc: {acc:.4f}')\n print_confusion(confusion,lookup=lookup)\n\ndef test_perplexity(model, dataset):\n model.eval()\n with torch.no_grad():\n loss, acc, confusion = loop(model, dataloader(dataset))\n print(f'TEST perplexity: {np.exp(loss):.4f}')\n print_confusion(confusion, lookup=dataset.num_to_letter)\n\ndef test_recovery(model, dataset):\n recovery = []\n \n for protein in tqdm.tqdm(dataset):\n protein = protein.to(device)\n h_V = (protein.node_s, protein.node_v)\n h_E = (protein.edge_s, protein.edge_v) \n sample = model.sample(h_V, protein.edge_index, \n h_E, n_samples=args.n_samples)\n \n recovery_ = sample.eq(protein.seq).float().mean().cpu().numpy()\n recovery.append(recovery_)\n print(protein.name, recovery_, flush=True)\n\n recovery = np.median(recovery)\n print(f'TEST recovery: {recovery:.4f}')\n \ndef loop(model, dataloader, optimizer=None):\n\n confusion = np.zeros((20, 20))\n t = tqdm.tqdm(dataloader)\n loss_fn = nn.CrossEntropyLoss()\n total_loss, total_correct, total_count = 0, 0, 0\n \n for batch in t:\n if optimizer: optimizer.zero_grad()\n \n batch = batch.to(device)\n h_V = (batch.node_s, batch.node_v)\n h_E = (batch.edge_s, batch.edge_v)\n \n logits = model(h_V, batch.edge_index, h_E, seq=batch.seq)\n logits, seq = logits[batch.mask], batch.seq[batch.mask]\n loss_value = loss_fn(logits, seq)\n\n if optimizer:\n loss_value.backward()\n optimizer.step()\n\n num_nodes = int(batch.mask.sum())\n total_loss += float(loss_value) * num_nodes\n total_count += num_nodes\n pred = torch.argmax(logits, dim=-1).detach().cpu().numpy()\n true = seq.detach().cpu().numpy()\n total_correct += (pred == true).sum()\n confusion += confusion_matrix(true, pred, labels=range(20))\n t.set_description(\"%.5f\" % float(total_loss/total_count))\n \n torch.cuda.empty_cache()\n \n return total_loss / total_count, total_correct / total_count, confusion\n \ndef print_confusion(mat, lookup):\n counts = mat.astype(np.int32)\n mat = (counts.T / counts.sum(axis=-1, keepdims=True).T).T\n mat = np.round(mat * 1000).astype(np.int32)\n res = '\\n'\n for i in range(20):\n res += '\\t{}'.format(lookup[i])\n res += '\\tCount\\n'\n for i in range(20):\n res += '{}\\t'.format(lookup[i])\n res += '\\t'.join('{}'.format(n) for n in mat[i])\n res += '\\t{}\\n'.format(sum(counts[i]))\n print(res)\n \nif __name__== \"__main__\":\n main()" ]
[ [ "torch.cuda.empty_cache", "torch.load", "numpy.zeros", "torch.argmax", "torch.no_grad", "numpy.median", "torch.nn.CrossEntropyLoss", "numpy.exp", "torch.cuda.is_available", "numpy.round" ] ]
italogfernandes/machine-learning
[ "7a0cb2bdf7fcc44dee1241fdf0ff59a68d8e45db" ]
[ "Part 2 - Regression/Section 4 - Simple Linear Regression/simple_linear_regression.py" ]
[ "# Simple Linear Regression\n\n# Importing the libraries\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\n# Importing the dataset\ndataset = pd.read_csv('../datasets/Salary_Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1.0/3.0, random_state = 0)\n\n# Fitting Simple Linear Regression to the Training set\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\n\n# Visualising the Training set results\nplt.scatter(X_train, y_train, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.title('Salary vs Experience (Training set)')\nplt.xlabel('Years of Experience')\nplt.ylabel('Salary')\nplt.show()\n\n# Visualising the Test set results\nplt.scatter(X_test, y_test, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'blue')\nplt.title('Salary vs Experience (Test set)')\nplt.xlabel('Years of Experience')\nplt.ylabel('Salary')\nplt.show()\n" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter" ] ]
FrankZhu7/play-with-data-science
[ "f527c7233fc9f33408e239b03ffd7a699a8b6923" ]
[ "SP500 volatility estimation with machine learning models/dm_test.py" ]
[ "# Author : John Tsang\n# Date : December 7th, 2017\n# Purpose : Implement the Diebold-Mariano Test (DM test) to compare\n# forecast accuracy\n# Input : 1) actual_lst: the list of actual values\n# 2) pred1_lst : the first list of predicted values\n# 3) pred2_lst : the second list of predicted values\n# 4) h : the number of stpes ahead\n# 5) crit : a string specifying the criterion\n# i) MSE : the mean squared error\n# ii) MAD : the mean absolute deviation\n# iii) MAPE : the mean absolute percentage error\n# iv) poly : use power function to weigh the errors\n# 6) poly : the power for crit power\n# (it is only meaningful when crit is \"poly\")\n# Condition: 1) length of actual_lst, pred1_lst and pred2_lst is equal\n# 2) h must be an integer and it must be greater than 0 and less than\n# the length of actual_lst.\n# 3) crit must take the 4 values specified in Input\n# 4) Each value of actual_lst, pred1_lst and pred2_lst must\n# be numerical values. Missing values will not be accepted.\n# 5) power must be a numerical value.\n# Return : a named-tuple of 2 elements\n# 1) p_value : the p-value of the DM test\n# 2) DM : the test statistics of the DM test\n##########################################################\n# References:\n#\n# Harvey, D., Leybourne, S., & Newbold, P. (1997). Testing the equality of\n# prediction mean squared errors. International Journal of forecasting,\n# 13(2), 281-291.\n#\n# Diebold, F. X. and Mariano, R. S. (1995), Comparing predictive accuracy,\n# Journal of business & economic statistics 13(3), 253-264.\n#\n##########################################################\ndef dm_test(actual_lst, pred1_lst, pred2_lst, h=1, crit=\"MSE\", power=2):\n # Routine for checking errors\n def error_check():\n rt = 0\n msg = \"\"\n # Check if h is an integer\n if (not isinstance(h, int)):\n rt = -1\n msg = \"The type of the number of steps ahead (h) is not an integer.\"\n return (rt, msg)\n # Check the range of h\n if (h < 1):\n rt = -1\n msg = \"The number of steps ahead (h) is not large enough.\"\n return (rt, msg)\n len_act = len(actual_lst)\n len_p1 = len(pred1_lst)\n len_p2 = len(pred2_lst)\n # Check if lengths of actual values and predicted values are equal\n if (len_act != len_p1 or len_p1 != len_p2 or len_act != len_p2):\n rt = -1\n msg = \"Lengths of actual_lst, pred1_lst and pred2_lst do not match.\"\n return (rt, msg)\n # Check range of h\n if (h >= len_act):\n rt = -1\n msg = \"The number of steps ahead is too large.\"\n return (rt, msg)\n # Check if criterion supported\n if (crit != \"MSE\" and crit != \"MAPE\" and crit != \"MAD\" and crit != \"poly\"):\n rt = -1\n msg = \"The criterion is not supported.\"\n return (rt, msg)\n # Check if every value of the input lists are numerical values\n from re import compile as re_compile\n comp = re_compile(\"^\\d+?\\.\\d+?$\")\n\n def compiled_regex(s):\n \"\"\" Returns True is string is a number. \"\"\"\n if comp.match(s) is None:\n return s.isdigit()\n return True\n\n for actual, pred1, pred2 in zip(actual_lst, pred1_lst, pred2_lst):\n is_actual_ok = compiled_regex(str(abs(actual)))\n is_pred1_ok = compiled_regex(str(abs(pred1)))\n is_pred2_ok = compiled_regex(str(abs(pred2)))\n if (not (is_actual_ok and is_pred1_ok and is_pred2_ok)):\n msg = \"An element in the actual_lst, pred1_lst or pred2_lst is not numeric.\"\n rt = -1\n return (rt, msg)\n return (rt, msg)\n\n # Error check\n error_code = error_check()\n # Raise error if cannot pass error check\n if (error_code[0] == -1):\n raise SyntaxError(error_code[1])\n return\n # Import libraries\n from scipy.stats import t\n import collections\n import pandas as pd\n import numpy as np\n\n # Initialise lists\n e1_lst = []\n e2_lst = []\n d_lst = []\n\n # convert every value of the lists into real values\n actual_lst = pd.Series(actual_lst).apply(lambda x: float(x)).tolist()\n pred1_lst = pd.Series(pred1_lst).apply(lambda x: float(x)).tolist()\n pred2_lst = pd.Series(pred2_lst).apply(lambda x: float(x)).tolist()\n\n # Length of lists (as real numbers)\n T = float(len(actual_lst))\n\n # construct d according to crit\n if (crit == \"MSE\"):\n for actual, p1, p2 in zip(actual_lst, pred1_lst, pred2_lst):\n e1_lst.append((actual - p1) ** 2)\n e2_lst.append((actual - p2) ** 2)\n for e1, e2 in zip(e1_lst, e2_lst):\n d_lst.append(e1 - e2)\n elif (crit == \"MAD\"):\n for actual, p1, p2 in zip(actual_lst, pred1_lst, pred2_lst):\n e1_lst.append(abs(actual - p1))\n e2_lst.append(abs(actual - p2))\n for e1, e2 in zip(e1_lst, e2_lst):\n d_lst.append(e1 - e2)\n elif (crit == \"MAPE\"):\n for actual, p1, p2 in zip(actual_lst, pred1_lst, pred2_lst):\n e1_lst.append(abs((actual - p1) / actual))\n e2_lst.append(abs((actual - p2) / actual))\n for e1, e2 in zip(e1_lst, e2_lst):\n d_lst.append(e1 - e2)\n elif (crit == \"poly\"):\n for actual, p1, p2 in zip(actual_lst, pred1_lst, pred2_lst):\n e1_lst.append(((actual - p1)) ** (power))\n e2_lst.append(((actual - p2)) ** (power))\n for e1, e2 in zip(e1_lst, e2_lst):\n d_lst.append(e1 - e2)\n\n # Mean of d\n mean_d = pd.Series(d_lst).mean()\n\n # Find autocovariance and construct DM test statistics\n def autocovariance(Xi, N, k, Xs):\n autoCov = 0\n T = float(N)\n for i in np.arange(0, N - k):\n autoCov += ((Xi[i + k]) - Xs) * (Xi[i] - Xs)\n return (1 / (T)) * autoCov\n\n gamma = []\n for lag in range(0, h):\n gamma.append(autocovariance(d_lst, len(d_lst), lag, mean_d)) # 0, 1, 2\n V_d = (gamma[0] + 2 * sum(gamma[1:])) / T\n DM_stat = V_d ** (-0.5) * mean_d\n harvey_adj = ((T + 1 - 2 * h + h * (h - 1) / T) / T) ** (0.5)\n DM_stat = harvey_adj * DM_stat\n # Find p-value\n p_value = 2 * t.cdf(-abs(DM_stat), df=T - 1)\n\n # Construct named tuple for return\n dm_return = collections.namedtuple('dm_return', 'DM p_value')\n\n rt = dm_return(DM=DM_stat, p_value=p_value)\n\n return rt" ]
[ [ "numpy.arange", "pandas.Series" ] ]
goktug97/PyYOLO
[ "69c6997e3e3762199ee04e7339725b51059e56f4" ]
[ "pyyolo/yolo.py" ]
[ "#!/usr/bin/env python3\n\nimport cv2\nfrom .cyolo import *\nimport numpy as np\n\n\nclass BBox(np.ndarray):\n def __new__(cls, x, y, w, h, prob, name):\n cls.name = \"\"\n cls.prob = 0\n obj = np.asarray([x, y, w, h]).view(cls)\n obj.x, obj.y, obj.w, obj.h = obj.view()\n obj.name = name\n obj.prob = prob\n return obj\n\n def __str__(self):\n x, y, w, h = self.view()\n string = f'x: {x}, y: {y}, w: {w}, h: {h}, '\n string += f'probability: {self.prob}, name: {self.name}'\n return string\n\n def to_xyxy(self):\n x, y, w, h = self.view()\n return np.array([x, y, x + w, y + h])\n\n def __array_finalize__(self, obj):\n if obj is None: return\n\n\n# https://github.com/AlexeyAB/darknet/blob/master/darknet.py\nclass YOLO(object):\n def __init__(self, config_path,\n weights_path,\n data_path,\n detection_threshold = 0.5,\n hier_threshold = 0.5,\n nms_threshold = 0.5):\n\n if not os.path.exists(config_path):\n raise ValueError(f'Invalid config path: {os.path.abspath(config_path)}')\n if not os.path.exists(weights_path):\n raise ValueError(f'Invalid weight path: {os.path.abspath(weights_path)}')\n if not os.path.exists(data_path):\n raise ValueError(f'Invalid data file path: {os.path.abspath(data_path)}')\n\n self.net_main = load_net_custom(config_path.encode(\"ascii\"),\n weights_path.encode(\"ascii\"),\n 0, 1)\n self.meta_main = load_meta(data_path.encode(\"ascii\"))\n\n self.height = lib.network_height(self.net_main)\n self.width = lib.network_width(self.net_main)\n\n with open(data_path) as metaFH:\n meta_contents = metaFH.read()\n import re\n match = re.search(\"names *= *(.*)$\",\n meta_contents,\n re.IGNORECASE | re.MULTILINE)\n if match:\n result = match.group(1)\n else:\n result = None\n if os.path.exists(result):\n with open(result) as namesFH:\n names_list = namesFH.read().strip().split(\"\\n\")\n self.alt_names = [x.strip() for x in names_list]\n\n self.threshold = detection_threshold\n self.hier_threshold = hier_threshold\n self.nms = nms_threshold\n\n\n def detect(self, image, rgb=False):\n original_h, original_w, _ = image.shape\n image = cv2.resize(image,\n (self.width, self.height),\n interpolation=cv2.INTER_CUBIC)[:,:,::-1]\n if not rgb:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n im, arr = array_to_image(image)\n\n num = c_int(0)\n pnum = pointer(num)\n\n predict_image(self.net_main, im)\n dets = get_network_boxes(\n self.net_main, im.w, im.h,\n self.threshold,\n self.hier_threshold,\n None, 0, pnum, 0)\n num = pnum[0]\n\n if self.nms:\n do_nms_sort(dets, num, self.meta_main.classes, self.nms)\n\n res = []\n for j in range(num):\n for i in range(self.meta_main.classes):\n if dets[j].prob[i] > 0:\n b = dets[j].bbox\n\n # coordinates as percentage\n x = (b.x-b.w/2)/self.width\n y = (b.y-b.h/2)/self.height\n w = b.w/self.width\n h = b.h/self.height\n\n # scale detections to input image\n x = int(round(x*original_w))\n y = int(round(y*original_h))\n w = int(round(w*original_w))\n h = int(round(h*original_h))\n\n bbox = BBox(x, y, w, h, dets[j].prob[i], self.alt_names[i])\n\n res.append(bbox)\n\n free_detections(dets, num)\n return res\n" ]
[ [ "numpy.array", "numpy.asarray" ] ]
emanuelevivoli/CompReGAN
[ "33589c3871bed8adcc157bf25a45b8d12ba1af66" ]
[ "data_utils.py" ]
[ "from os import listdir\nfrom os.path import join\n\nfrom PIL import Image\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision.transforms import Compose, RandomCrop, ToTensor, ToPILImage, CenterCrop, Resize, transforms\nfrom utils.jpeg_layer import jpeg_compression_transform, simg_jpeg_compression, jpeg_compression\n# from utils.custom_trasform import NRandomCrop\n\nfrom numpy import asarray, clip\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in ['.png', '.jpg', '.jpeg', '.PNG', '.JPG', '.JPEG'])\n\n\ndef calculate_valid_crop_size(crop_size, upscale_factor):\n return crop_size - (crop_size % upscale_factor)\n\n\ndef train_hr_transform(crop_size):\n return Compose([\n RandomCrop(crop_size),\n ToTensor(),\n ])\n\ndef val_hr_transform(crop_size):\n return Compose([\n CenterCrop(crop_size),\n ToTensor(),\n ])\n\n# def train_multiple_hr_transform(crop_size, crop_numb, padding=0):\n# return Compose([\n# NRandomCrop(size=crop_size, n=crop_numb, padding=padding),\n# transforms.Lambda(\n# lambda crops: torch.stack([\n# transforms.ToTensor()(crop)\n# for crop in crops\n# ]))\n# ])\n\ndef jr_transform(quality_factor):\n return Compose([\n jpeg_compression_transform(quality_factor)\n ])\n\n\ndef display_transform():\n return Compose([\n ToPILImage(),\n # Resize(400),\n # CenterCrop(400),\n ToTensor()\n ])\n\n\ndef weight_init(m):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n torch.nn.init.xavier_uniform_(m.bias)\n else:\n m.bias.data.zero_()\n\nclass TrainDatasetFromFolder(Dataset):\n def __init__(self, dataset_dir, crop_size, upscale_factor, quality_factor, train=True, crop_numb=1, padding=0):\n super(TrainDatasetFromFolder, self).__init__()\n self.image_filenames = [join(dataset_dir, x) for x in listdir(dataset_dir) if is_image_file(x)] * crop_numb\n crop_size = calculate_valid_crop_size(crop_size, upscale_factor)\n # self.hr_transform = train_multiple_hr_transform(crop_size, crop_numb, padding)\n self.hr_transform = train_hr_transform(crop_size) if train else val_hr_transform(crop_size)\n self.quality_factor = quality_factor\n # self.jr_transform = jr_transform(quality_factor)\n\n def __getitem__(self, index):\n hr_image = self.hr_transform(Image.open(self.image_filenames[index]))\n jr_image = simg_jpeg_compression(hr_image, self.quality_factor)\n return jr_image, hr_image\n\n def __len__(self):\n return len(self.image_filenames)\n\n\nclass ValDatasetFromFolder(Dataset):\n def __init__(self, dataset_dir, crop_size, upscale_factor, quality_factor):\n super(ValDatasetFromFolder, self).__init__()\n self.upscale_factor = upscale_factor\n self.quality_factor = quality_factor\n self.image_filenames = [join(dataset_dir, x) for x in listdir(dataset_dir) if is_image_file(x)]\n self.crop_size = crop_size\n # self.jr_transform = jr_transform(quality_factor)\n\n def __getitem__(self, index):\n hr_image = Image.open(self.image_filenames[index])\n w, h = hr_image.size\n # crop_size = calculate_valid_crop_size(min(w, h), self.upscale_factor)\n\n hr_image = ToTensor()(CenterCrop(self.crop_size)(hr_image))\n jr_image = simg_jpeg_compression(hr_image, self.quality_factor)\n\n return jr_image, hr_image\n\n def __len__(self):\n return len(self.image_filenames)\n\ndef scalePixels(image):\n pixels = asarray(image.cpu())\n # convert from integers to floats\n pixels = pixels.astype('float32')\n # calculate global mean and standard deviation\n mean, std = pixels.mean(), pixels.std()\n print('Mean: %.3f, Standard Deviation: %.3f' % (mean, std))\n # global standardization of pixels\n pixels = (pixels - mean) / std\n # clip pixel values to [-1,1]\n pixels = clip(pixels, -1.0, 1.0)\n print('Min: %.3f, Max: %.3f' % (pixels.min(), pixels.max()))\n return torch.Tensor(pixels).cuda()" ]
[ [ "torch.nn.init.xavier_uniform_", "numpy.clip", "torch.Tensor" ] ]
rishabhsamb/fairlearn
[ "c039a3fb292a57d5d2995ded8400122e4c736985" ]
[ "fairlearn/metrics/_metric_frame.py" ]
[ "# Copyright (c) Microsoft Corporation and Fairlearn contributors.\n# Licensed under the MIT License.\n\nimport copy\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom typing import Any, Callable, Dict, List, Optional, Union\nfrom sklearn.utils import check_consistent_length\nimport warnings\nfrom functools import wraps\n\nfrom fairlearn.metrics._input_manipulations import _convert_to_ndarray_and_squeeze\nfrom ._function_container import FunctionContainer, _SAMPLE_PARAMS_NOT_DICT\nfrom ._group_feature import GroupFeature\n\n\nlogger = logging.getLogger(__name__)\n\n_SUBGROUP_COUNT_WARNING_THRESHOLD = 20\n\n_SF_DICT_CONVERSION_FAILURE = \"DataFrame.from_dict() failed on sensitive features. \" \\\n \"Please ensure each array is strictly 1-D.\"\n_BAD_FEATURE_LENGTH = \"Received a feature of length {0} when length {1} was expected\"\n_SUBGROUP_COUNT_WARNING = \"Found {0} subgroups. Evaluation may be slow\"\n_FEATURE_LIST_NONSCALAR = \"Feature lists must be of scalar types\"\n_FEATURE_DF_COLUMN_BAD_NAME = \"DataFrame column names must be strings. Name '{0}' is of type {1}\"\n_DUPLICATE_FEATURE_NAME = \"Detected duplicate feature name: '{0}'\"\n_TOO_MANY_FEATURE_DIMS = \"Feature array has too many dimensions\"\n_SAMPLE_PARAM_KEYS_NOT_IN_FUNC_DICT = \\\n \"Keys in 'sample_params' do not match those in 'metric'\"\n\n\ndef _deprecate_metric_frame_init(new_metric_frame_init):\n \"\"\"Issue deprecation warnings for the `MetricFrame` constructor.\n\n Decorator to issue warnings if called with positional arguments\n or with the keyword argument `metric` instead of `metrics`.\n\n Parameters\n ----------\n new_metric_frame_init : callable\n New MetricFrame constructor.\n \"\"\"\n\n @wraps(new_metric_frame_init)\n def compatible_metric_frame_init(self, *args, metric=None, **kwargs):\n positional_names = [\"metrics\", \"y_true\", \"y_pred\"]\n version = \"0.10.0\"\n\n positional_dict = dict(zip(positional_names, args))\n\n # If more than 3 positional arguments are provided (apart from self), show\n # the error message applicable to the new constructor implementation (with `self`\n # being the only positional argument).\n if len(args) > 3:\n raise TypeError(f\"{new_metric_frame_init.__name__}() takes 1 positional \"\n f\"argument but {1+len(args)} positional arguments \"\n f\"were given\")\n\n # If 1-3 positional arguments are provided (apart fom self), issue warning.\n if len(args) > 0:\n args_msg = \", \".join([f\"'{name}'\" for name in positional_dict.keys()])\n warnings.warn(f\"You have provided {args_msg} as positional arguments. \"\n f\"Please pass them as keyword arguments. From version \"\n f\"{version} passing them as positional arguments \"\n f\"will result in an error.\",\n FutureWarning)\n\n # If a keyword argument `metric` is provided, issue warning.\n metric_arg_dict = {}\n if metric is not None:\n metric_arg_dict = {\"metrics\": metric}\n warnings.warn(f\"The positional argument 'metric' has been replaced \"\n f\"by a keyword argument 'metrics'. \"\n f\"From version {version} passing it as a positional argument \"\n f\"or as a keyword argument 'metric' will result in an error\",\n FutureWarning)\n\n # Call the new constructor with positional arguments passed as keyword arguments\n # and with the `metric` keyword argument renamed to `metrics`.\n new_metric_frame_init(self,\n **metric_arg_dict,\n **positional_dict,\n **kwargs)\n\n return compatible_metric_frame_init\n\n\nclass MetricFrame:\n \"\"\"Collection of disaggregated metric values.\n\n This data structure stores and manipulates disaggregated values for any number of underlying\n metrics. At least one sensitive feature must be supplied, which is used\n to split the data into subgroups. The underlying metric(s) is(are) calculated\n across the entire dataset (made available by the :attr:`.overall` property) and\n for each identified subgroup (made available by the :attr:`.by_group` property).\n\n The only limitations placed on the metric functions are that:\n\n * The first two arguments they take must be ``y_true`` and ``y_pred`` arrays\n * Any other arguments must correspond to sample properties (such as sample weights),\n meaning that their first dimension is the same as that of y_true and y_pred. These\n arguments will be split up along with the ``y_true`` and ``y_pred`` arrays\n\n The interpretation of the ``y_true`` and ``y_pred`` arrays is up to the\n underlying metric - it is perfectly possible to pass in lists of class\n probability tuples. We also support non-scalar return types for the\n metric function (such as confusion matrices) at the current time. However,\n the aggregation functions will not be well defined in this case.\n\n Group fairness metrics are obtained by methods that implement\n various aggregators over group-level metrics, such such as the\n maximum, minimum, or the worst-case difference or ratio.\n\n This data structure also supports the concept of 'control features.' Like the sensitive\n features, control features identify subgroups within the data, but\n aggregations are not performed over the control features. Instead, the\n aggregations produce a result for each subgroup identified by the control\n feature(s). The name 'control features' refers to the statistical practice\n of 'controlling' for a variable.\n\n Parameters\n ----------\n metrics : callable or dict\n The underlying metric functions which are to be calculated. This\n can either be a single metric function or a dictionary of functions.\n These functions must be callable as\n ``fn(y_true, y_pred, **sample_params)``.\n If there are any other arguments required (such as ``beta`` for\n :func:`sklearn.metrics.fbeta_score`) then\n :func:`functools.partial` must be used.\n\n **Note** that the values returned by various members of the class change\n based on whether this argument is a callable or a dictionary of\n callables. This distinction remains *even if* the dictionary only\n contains a single entry.\n\n y_true : List, pandas.Series, numpy.ndarray, pandas.DataFrame\n The ground-truth labels (for classification) or target values (for regression).\n\n y_pred : List, pandas.Series, numpy.ndarray, pandas.DataFrame\n The predictions.\n\n sensitive_features : List, pandas.Series, dict of 1d arrays, numpy.ndarray, pandas.DataFrame\n The sensitive features which should be used to create the subgroups.\n At least one sensitive feature must be provided.\n All names (whether on pandas objects or dictionary keys) must be strings.\n We also forbid DataFrames with column names of ``None``.\n For cases where no names are provided we generate names ``sensitive_feature_[n]``.\n\n control_features : List, pandas.Series, dict of 1d arrays, numpy.ndarray, pandas.DataFrame\n Control features are similar to sensitive features, in that they\n divide the input data into subgroups.\n Unlike the sensitive features, aggregations are not performed\n across the control features - for example, the ``overall`` property\n will have one value for each subgroup in the control feature(s),\n rather than a single value for the entire data set.\n Control features can be specified similarly to the sensitive features.\n However, their default names (if none can be identified in the\n input values) are of the format ``control_feature_[n]``.\n\n **Note** the types returned by members of the class vary based on whether\n control features are present.\n\n sample_params : dict\n Parameters for the metric function(s). If there is only one metric function,\n then this is a dictionary of strings and array-like objects, which are split\n alongside the ``y_true`` and ``y_pred`` arrays, and passed to the metric function.\n If there are multiple metric functions (passed as a dictionary), then this is\n a nested dictionary, with the first set of string keys identifying the\n metric function name, with the values being the string-to-array-like dictionaries.\n\n metric : callable or dict\n The underlying metric functions which are to be calculated. This\n can either be a single metric function or a dictionary of functions.\n These functions must be callable as\n ``fn(y_true, y_pred, **sample_params)``.\n If there are any other arguments required (such as ``beta`` for\n :func:`sklearn.metrics.fbeta_score`) then\n :func:`functools.partial` must be used.\n\n .. deprecated:: 0.7.0\n `metric` will be removed in version 0.10.0, use `metrics` instead.\n \"\"\"\n\n # The deprecation decorator does two things:\n # (1) turns first three positional arguments into keyword arguments\n # (2) renames the 'metric' keyword argument into 'metrics'\n @_deprecate_metric_frame_init\n def __init__(self,\n *,\n metrics: Union[Callable, Dict[str, Callable]],\n y_true,\n y_pred,\n sensitive_features,\n control_features: Optional = None,\n sample_params: Optional[Union[Dict[str, Any], Dict[str, Dict[str, Any]]]] = None):\n \"\"\"Read a placeholder comment.\"\"\"\n check_consistent_length(y_true, y_pred)\n y_t = _convert_to_ndarray_and_squeeze(y_true)\n y_p = _convert_to_ndarray_and_squeeze(y_pred)\n\n func_dict = self._process_functions(metrics, sample_params)\n\n # Now, prepare the sensitive features\n sf_list = self._process_features(\"sensitive_feature_\", sensitive_features, y_t)\n self._sf_names = [x.name for x in sf_list]\n\n # Prepare the control features\n # Adjust _sf_indices if needed\n cf_list = None\n self._cf_names = None\n if control_features is not None:\n cf_list = self._process_features(\"control_feature_\", control_features, y_t)\n self._cf_names = [x.name for x in cf_list]\n\n # Check for duplicate feature names\n nameset = set()\n namelist = self._sf_names\n if self._cf_names:\n namelist = namelist + self._cf_names\n for name in namelist:\n if name in nameset:\n raise ValueError(_DUPLICATE_FEATURE_NAME.format(name))\n nameset.add(name)\n\n self._overall = self._compute_overall(func_dict, y_t, y_p, cf_list)\n self._by_group = self._compute_by_group(func_dict, y_t, y_p, sf_list, cf_list)\n\n def _compute_overall(self, func_dict, y_true, y_pred, cf_list):\n if cf_list is None:\n result = pd.Series(index=func_dict.keys(), dtype='object')\n for func_name in func_dict:\n metric_value = func_dict[func_name].evaluate_all(y_true, y_pred)\n result[func_name] = metric_value\n else:\n result = self._compute_dataframe_from_rows(func_dict, y_true, y_pred, cf_list)\n return result\n\n def _compute_by_group(self, func_dict, y_true, y_pred, sf_list, cf_list):\n rows = copy.deepcopy(sf_list)\n if cf_list is not None:\n # Prepend the conditional features, so they are 'higher'\n rows = copy.deepcopy(cf_list) + rows\n\n return self._compute_dataframe_from_rows(func_dict, y_true, y_pred, rows)\n\n def _compute_dataframe_from_rows(self, func_dict, y_true, y_pred, rows):\n if len(rows) == 1:\n row_index = pd.Index(data=rows[0].classes, name=rows[0].name)\n else:\n row_index = pd.MultiIndex.from_product([x.classes for x in rows],\n names=[x.name for x in rows])\n\n if len(row_index) > _SUBGROUP_COUNT_WARNING_THRESHOLD:\n msg = _SUBGROUP_COUNT_WARNING.format(len(row_index))\n logger.warning(msg)\n\n result = pd.DataFrame(index=row_index, columns=func_dict.keys())\n for func_name in func_dict:\n for row_curr in row_index:\n mask = None\n if len(rows) > 1:\n mask = self._mask_from_tuple(row_curr, rows)\n else:\n # Have to force row_curr to be an unary tuple\n mask = self._mask_from_tuple((row_curr,), rows)\n\n # Only call the metric function if the mask is non-empty\n if sum(mask) > 0:\n curr_metric = func_dict[func_name].evaluate(y_true, y_pred, mask)\n result[func_name][row_curr] = curr_metric\n return result\n\n @property\n def overall(self) -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the underlying metrics evaluated on the whole dataset.\n\n Returns\n -------\n typing.Any or pandas.Series or pandas.DataFrame\n The exact type varies based on whether control featuers were\n provided and how the metric functions were specified.\n\n ======== ================ =================================\n Metrics Control Features Result Type\n ======== ================ =================================\n Callable None Return type of callable\n -------- ---------------- ---------------------------------\n Callable Provided Series, indexed by the subgroups\n of the conditional feature(s)\n -------- ---------------- ---------------------------------\n Dict None Series, indexed by the metric\n names\n -------- ---------------- ---------------------------------\n Dict Provided DataFrame. Columns are\n metric names, rows are subgroups\n of conditional feature(s)\n ======== ================ =================================\n\n The distinction applies even if the dictionary contains a\n single metric function. This is to allow for a consistent\n interface when calling programatically, while also reducing\n typing for those using Fairlearn interactively.\n \"\"\"\n if self._user_supplied_callable:\n if self.control_levels:\n return self._overall.iloc[:, 0]\n else:\n return self._overall.iloc[0]\n else:\n return self._overall\n\n @property\n def by_group(self) -> Union[pd.Series, pd.DataFrame]:\n \"\"\"Return the collection of metrics evaluated for each subgroup.\n\n The collection is defined by the combination of classes in the\n sensitive and control features. The exact type depends on\n the specification of the metric function.\n\n Returns\n -------\n pandas.Series or pandas.DataFrame\n When a callable is supplied to the constructor, the result is\n a :class:`pandas.Series`, indexed by the combinations of subgroups\n in the sensitive and control features.\n\n When the metric functions were specified with a dictionary (even\n if the dictionary only has a single entry), then the result is\n a :class:`pandas.DataFrame` with columns named after the metric\n functions, and rows indexed by the combinations of subgroups\n in the sensitive and control features.\n\n If a particular combination of subgroups was not present in the dataset\n (likely to occur as more sensitive and control features\n are specified), then the corresponding entry will be NaN.\n \"\"\"\n if self._user_supplied_callable:\n return self._by_group.iloc[:, 0]\n else:\n return self._by_group\n\n @property\n def control_levels(self) -> List[str]:\n \"\"\"Return a list of feature names which are produced by control features.\n\n If control features are present, then the rows of the :attr:`.by_group`\n property have a :class:`pandas.MultiIndex` index. This property\n identifies which elements of that index are control features.\n\n Returns\n -------\n List[str] or None\n List of names, which can be used in calls to\n :meth:`pandas.DataFrame.groupby` etc.\n \"\"\"\n return self._cf_names\n\n @property\n def sensitive_levels(self) -> List[str]:\n \"\"\"Return a list of the feature names which are produced by sensitive features.\n\n In cases where the :attr:`.by_group` property has a :class:`pandas.MultiIndex`\n index, this identifies which elements of the index are sensitive features.\n\n Returns\n -------\n List[str]\n List of names, which can be used in calls to\n :meth:`pandas.DataFrame.groupby` etc.\n \"\"\"\n return self._sf_names\n\n def group_max(self) -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the maximum value of the metric over the sensitive features.\n\n This method computes the maximum value over all combinations of\n sensitive features for each underlying metric function in the :attr:`.by_group`\n property (it will only succeed if all the underlying metric\n functions return scalar values). The exact return type depends on\n whether control features are present, and whether the metric functions\n were specified as a single callable or a dictionary.\n\n Returns\n -------\n typing.Any or pandas.Series or pandas.DataFrame\n The maximum value over sensitive features. The exact type\n follows the table in :attr:`.MetricFrame.overall`.\n \"\"\"\n if not self.control_levels:\n result = pd.Series(index=self._by_group.columns, dtype='object')\n for m in result.index:\n max_val = self._by_group[m].max()\n result[m] = max_val\n else:\n result = self._by_group.groupby(level=self.control_levels).max()\n\n if self._user_supplied_callable:\n if self.control_levels:\n return result.iloc[:, 0]\n else:\n return result.iloc[0]\n else:\n return result\n\n def group_min(self) -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the minimum value of the metric over the sensitive features.\n\n This method computes the minimum value over all combinations of\n sensitive features for each underlying metric function in the :attr:`.by_group`\n property (it will only succeed if all the underlying metric\n functions return scalar values). The exact return type depends on\n whether control features are present, and whether the metric functions\n were specified as a single callable or a dictionary.\n\n Returns\n -------\n typing.Any pandas.Series or pandas.DataFrame\n The minimum value over sensitive features. The exact type\n follows the table in :attr:`.MetricFrame.overall`.\n \"\"\"\n if not self.control_levels:\n result = pd.Series(index=self._by_group.columns, dtype='object')\n for m in result.index:\n min_val = self._by_group[m].min()\n result[m] = min_val\n else:\n result = self._by_group.groupby(level=self.control_levels).min()\n\n if self._user_supplied_callable:\n if self.control_levels:\n return result.iloc[:, 0]\n else:\n return result.iloc[0]\n else:\n return result\n\n def difference(self,\n method: str = 'between_groups') -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the maximum absolute difference between groups for each metric.\n\n This method calculates a scalar value for each underlying metric by\n finding the maximum absolute difference between the entries in each\n combination of sensitive features in the :attr:`.by_group` property.\n\n Similar to other methods, the result type varies with the\n specification of the metric functions, and whether control features\n are present or not.\n\n There are two allowed values for the ``method=`` parameter. The\n value ``between_groups`` computes the maximum difference between\n any two pairs of groups in the :attr:`.by_group` property (i.e.\n ``group_max() - group_min()``). Alternatively, ``to_overall``\n computes the difference between each subgroup and the\n corresponding value from :attr:`.overall` (if there are control\n features, then :attr:`.overall` is multivalued for each metric).\n The result is the absolute maximum of these values.\n\n Parameters\n ----------\n method : str\n How to compute the aggregate. Default is :code:`between_groups`\n\n Returns\n -------\n typing.Any or pandas.Series or pandas.DataFrame\n The exact type follows the table in :attr:`.MetricFrame.overall`.\n \"\"\"\n subtrahend = np.nan\n if method == 'between_groups':\n subtrahend = self.group_min()\n elif method == 'to_overall':\n subtrahend = self.overall\n else:\n raise ValueError(\"Unrecognised method '{0}' in difference() call\".format(method))\n\n return (self.by_group - subtrahend).abs().max(level=self.control_levels)\n\n def ratio(self,\n method: str = 'between_groups') -> Union[Any, pd.Series, pd.DataFrame]:\n \"\"\"Return the minimum ratio between groups for each metric.\n\n This method calculates a scalar value for each underlying metric by\n finding the minimum ratio (that is, the ratio is forced to be\n less than unity) between the entries in each\n column of the :attr:`.by_group` property.\n\n Similar to other methods, the result type varies with the\n specification of the metric functions, and whether control features\n are present or not.\n\n There are two allowed values for the ``method=`` parameter. The\n value ``between_groups`` computes the minimum ratio between\n any two pairs of groups in the :attr:`.by_group` property (i.e.\n ``group_min() / group_max()``). Alternatively, ``to_overall``\n computes the ratio between each subgroup and the\n corresponding value from :attr:`.overall` (if there are control\n features, then :attr:`.overall` is multivalued for each metric),\n expressing the ratio as a number less than 1.\n The result is the minimum of these values.\n\n Parameters\n ----------\n method : str\n How to compute the aggregate. Default is :code:`between_groups`\n\n Returns\n -------\n typing.Any or pandas.Series or pandas.DataFrame\n The exact type follows the table in :attr:`.MetricFrame.overall`.\n \"\"\"\n result = None\n if method == 'between_groups':\n result = self.group_min() / self.group_max()\n elif method == 'to_overall':\n if self._user_supplied_callable:\n tmp = self.by_group / self.overall\n result = tmp.transform(lambda x: min(x, 1/x)).min(level=self.control_levels)\n else:\n ratios = None\n\n if self.control_levels:\n # It's easiest to give in to the DataFrame columns preference\n ratios = self.by_group.unstack(level=self.control_levels) / \\\n self.overall.unstack(level=self.control_levels)\n else:\n ratios = self.by_group / self.overall\n\n def ratio_sub_one(x):\n if x > 1:\n return 1/x\n else:\n return x\n\n ratios = ratios.apply(lambda x: x.transform(ratio_sub_one))\n if not self.control_levels:\n result = ratios.min()\n else:\n result = ratios.min().unstack(0)\n else:\n raise ValueError(\"Unrecognised method '{0}' in ratio() call\".format(method))\n\n return result\n\n def _process_functions(self, metric, sample_params) -> Dict[str, FunctionContainer]:\n \"\"\"Get the underlying metrics into :class:`fairlearn.metrics.FunctionContainer` objects.\"\"\"\n self._user_supplied_callable = True\n func_dict = dict()\n if isinstance(metric, dict):\n self._user_supplied_callable = False\n s_p = dict()\n if sample_params is not None:\n if not isinstance(sample_params, dict):\n raise ValueError(_SAMPLE_PARAMS_NOT_DICT)\n\n sp_keys = set(sample_params.keys())\n mf_keys = set(metric.keys())\n if not sp_keys.issubset(mf_keys):\n raise ValueError(_SAMPLE_PARAM_KEYS_NOT_IN_FUNC_DICT)\n s_p = sample_params\n\n for name, func in metric.items():\n curr_s_p = None\n if name in s_p:\n curr_s_p = s_p[name]\n fc = FunctionContainer(func, name, curr_s_p)\n func_dict[fc.name_] = fc\n else:\n fc = FunctionContainer(metric, None, sample_params)\n func_dict[fc.name_] = fc\n return func_dict\n\n def _process_features(self, base_name, features, sample_array) -> List[GroupFeature]:\n \"\"\"Extract the features into :class:`fairlearn.metrics.GroupFeature` objects.\"\"\"\n result = []\n\n if isinstance(features, pd.Series):\n check_consistent_length(features, sample_array)\n result.append(GroupFeature(base_name, features, 0, None))\n elif isinstance(features, pd.DataFrame):\n for i in range(len(features.columns)):\n col_name = features.columns[i]\n if not isinstance(col_name, str):\n msg = _FEATURE_DF_COLUMN_BAD_NAME.format(col_name, type(col_name))\n raise ValueError(msg)\n column = features.iloc[:, i]\n check_consistent_length(column, sample_array)\n result.append(GroupFeature(base_name, column, i, None))\n elif isinstance(features, list):\n if np.isscalar(features[0]):\n f_arr = np.atleast_1d(np.squeeze(np.asarray(features)))\n assert len(f_arr.shape) == 1 # Sanity check\n check_consistent_length(f_arr, sample_array)\n result.append(GroupFeature(base_name, f_arr, 0, None))\n else:\n raise ValueError(_FEATURE_LIST_NONSCALAR)\n elif isinstance(features, dict):\n try:\n df = pd.DataFrame.from_dict(features)\n except ValueError as ve:\n raise ValueError(_SF_DICT_CONVERSION_FAILURE) from ve\n for i in range(len(df.columns)):\n col_name = df.columns[i]\n if not isinstance(col_name, str):\n msg = _FEATURE_DF_COLUMN_BAD_NAME.format(col_name, type(col_name))\n raise ValueError(msg)\n column = df.iloc[:, i]\n check_consistent_length(column, sample_array)\n result.append(GroupFeature(base_name, column, i, None))\n else:\n # Need to specify dtype to avoid inadvertent type conversions\n f_arr = np.squeeze(np.asarray(features, dtype=object))\n if len(f_arr.shape) == 1:\n check_consistent_length(f_arr, sample_array)\n result.append(GroupFeature(base_name, f_arr, 0, None))\n elif len(f_arr.shape) == 2:\n # Work similarly to pd.DataFrame(data=ndarray)\n for i in range(f_arr.shape[1]):\n col = f_arr[:, i]\n check_consistent_length(col, sample_array)\n result.append(GroupFeature(base_name, col, i, None))\n else:\n raise ValueError(_TOO_MANY_FEATURE_DIMS)\n\n return result\n\n def _mask_from_tuple(self, index_tuple, feature_list) -> np.ndarray:\n \"\"\"Generate a mask for the ``y_true``, ``y_pred`` and ``sample_params`` arrays.\n\n Given a tuple of feature values (which indexes the ``by_groups``\n DataFrame), generate a mask to select the corresponding samples\n from the input\n \"\"\"\n # Following are internal sanity checks\n assert isinstance(index_tuple, tuple)\n assert len(index_tuple) == len(feature_list)\n\n result = feature_list[0].get_mask_for_class(index_tuple[0])\n for i in range(1, len(index_tuple)):\n result = np.logical_and(\n result,\n feature_list[i].get_mask_for_class(index_tuple[i]))\n return result\n" ]
[ [ "pandas.Series", "pandas.MultiIndex.from_product", "numpy.isscalar", "sklearn.utils.check_consistent_length", "numpy.asarray", "pandas.Index", "pandas.DataFrame.from_dict" ] ]
aditya-vikram-parakala/MachineLearning_CSE574
[ "7816ebd6cc342d0c4405d45e771dd50e800c2463" ]
[ "logreg_hd_concat.py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport csv \nimport random\nimport math\nimport pandas as pd\n\n\n# In[2]:\n\n\nTrainingPercent = 80 # 80% of raw data \nValidationPercent = 10 # 10% of raw data\nTestPercent = 10 #10% of raw data \nIsSynthetic =False\ndef GenerateRawData(filePath, IsSynthetic): \n dataMatrix = [] \n with open(filePath, 'rU') as fi:\n reader = csv.reader(fi)\n for row in reader:\n dataRow = []\n for column in row:\n dataRow.append(float(column))\n dataMatrix.append(dataRow) \n \n #changedif IsSynthetic == False : #this is for deleting the columns in our data that contains 0's which would not contribute to calculation of the varience and is not invertable.\n changeddataMatrix = np.delete(dataMatrix, [0,10], axis=1)# we deletd 5 cols so left with 41 features out of 46 features.\n dataMatrix = np.transpose(changeddataMatrix) #we transpose the data matrix to simplify the further steps of matrix multiplication \n #print (\"Data Matrix Generated..\")\n return dataMatrix # each data row we have 1x41\n#print(Data_values.shape)\ndef GenerateTrainingDataMatrix(rawData, TrainingPercent = 80):\n T_len = int(math.ceil(len(rawData[0])*0.01*TrainingPercent))\n d2 = rawData[:,0:T_len] # generating the training data matrix\n #print(str(TrainingPercent) + \"% Training Data Generated..\")\n return d2\n\ndef GenerateValData(rawData, ValPercent, TrainingCount): #\n valSize = int(math.ceil(len(rawData[0])*ValPercent*0.01))\n V_End = TrainingCount + valSize\n dataMatrix = rawData[:,TrainingCount+1:V_End]\n #print (str(ValPercent) + \"% Val Data Generated..\") \n return dataMatrix\n\ndef GenerateValTargetVector(rawData, ValPercent, TrainingCount): \n valSize = int(math.ceil(len(rawData)*ValPercent*0.01))\n V_End = TrainingCount + valSize\n t =rawData[TrainingCount+1:V_End]\n #print (str(ValPercent) + \"% Val Target Data Generated..\")\n return t\n#Data_1= GenerateRawData(r'C:\\Users\\aditya vikram\\humandata_X_hd_concat.csv',IsSynthetic=False)\n#X = GenerateTrainingDataMatrix(Data_1, TrainingPercent )\n\ndef GetTargetVector(filePath):\n t = []\n with open(filePath, 'rU') as f:\n reader = csv.reader(f)\n for row in reader: \n t.append(int(row[0]))\n #print(\"Raw Training Generated..\")\n return t # we will get the values \n#target_values =GetTargetVector(r'C:\\Users\\aditya vikram\\humandata_t_hd_concat.csv')\n#y = GenerateValTargetVector(target_values, ValPercent, TrainingCount)\n\ndef GenerateTrainingTarget(rawTraining,TrainingPercent = 80): #given to use 80% of the dataset as training\n TrainingLen = int(math.ceil(len(rawTraining)*(TrainingPercent*0.01))) #calculate the length of target training set\n t = rawTraining[:TrainingLen] # loading the elements till the training length it has only one column\n #print(str(TrainingPercent) + \"% Training Target Generated..\")\n return t \n\n\ndef GenerateValData(rawData, ValPercent, TrainingCount): #\n valSize = int(math.ceil(len(rawData[0])*ValPercent*0.01))\n V_End = TrainingCount + valSize\n dataMatrix = rawData[:,TrainingCount+1:V_End]\n #print (str(ValPercent) + \"% Val Data Generated..\") \n return dataMatrix\n\ndef GenerateValTargetVector(rawData, ValPercent, TrainingCount): \n valSize = int(math.ceil(len(rawData)*ValPercent*0.01))\n V_End = TrainingCount + valSize\n t =rawData[TrainingCount+1:V_End]\n #print (str(ValPercent) + \"% Val Target Data Generated..\")\n return t\n\n\n# In[3]:\n\n\n#TrainingTarget = np.array(GenerateTrainingTarget(RawTarget,TrainingPercent))\n#TrainingData = GenerateTrainingDataMatrix(RawData,TrainingPercent)\n\n\n# In[4]:\n\n\nRawTarget = GetTargetVector(r'C:\\Users\\aditya vikram\\humandata_t_hd_concat.csv')\nRawData = GenerateRawData(r'C:\\Users\\aditya vikram\\humandata_X_hd_concat.csv',IsSynthetic)\n#RawData = RawData.loc[:, (~RawData.isin([0])).any(axis=0)]\n#RawData[~np.all(r == 0, axis=1)]\n# preparing the data of taining i.e. training data , training target accordingly\nTrainingTarget = np.array(GenerateTrainingTarget(RawTarget,TrainingPercent))\nTrainingData = GenerateTrainingDataMatrix(RawData,TrainingPercent)\nprint(TrainingTarget.shape)\nprint(TrainingData.shape)\n# preparing the validation data \nValDataAct = np.array(GenerateValTargetVector(RawTarget,ValidationPercent, (len(TrainingTarget))))\nValData = GenerateValData(RawData,ValidationPercent, (len(TrainingTarget)))\nprint(ValDataAct.shape)\nprint(ValData.shape)\n#Preparing the test data \nTestDataAct = np.array(GenerateValTargetVector(RawTarget,TestPercent, (len(TrainingTarget)+len(ValDataAct))))\nTestData = GenerateValData(RawData,TestPercent, (len(TrainingTarget)+len(ValDataAct)))\nprint(ValDataAct.shape)\nprint(ValData.shape)\nX=np.transpose(TrainingData)\nX_val=np.transpose(ValData)\nX_test=np.transpose(TestData)\ny=TrainingTarget\ny_val=ValDataAct\ny_test =TestDataAct\nprint(y.shape)\nprint(y_val.shape)\nprint(y_test.shape)\n\n\n# In[ ]:\n\n\n#source intro to data science website, referenced a part of the code \n\n\n# In[5]:\n\n\nclass LogisticRegression:\n def __init__(self, lr=0.01, num_iter=100000, fit_intercept=True, verbose=False):\n self.lr = lr\n self.num_iter = num_iter\n self.fit_intercept = fit_intercept\n \n def __add_intercept(self, X):\n intercept = np.ones((X.shape[0], 1))\n return np.concatenate((intercept, X), axis=1)\n \n def __sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n def __loss(self, h, y):\n return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()\n \n def fit(self, X, y):\n if self.fit_intercept:\n X = self.__add_intercept(X)\n \n # weights initialization\n self.theta = np.zeros(X.shape[1])\n \n for i in range(self.num_iter):\n z = np.dot(X, self.theta)\n h = self.__sigmoid(z)\n gradient = np.dot(X.T, (h - y)) / y.size\n self.theta -= self.lr * gradient\n \n #if(self.verbose == True and i % 10000 == 0):\n #z = np.dot(X, self.theta)\n #h = self.__sigmoid(z)\n #print(f'loss: {self.__loss(h, y)} \\t')\n \n def predict_prob(self, X):\n if self.fit_intercept:\n X = self.__add_intercept(X)\n \n return self.__sigmoid(np.dot(X, self.theta))\n \n def predict(self, X, threshold):\n return self.predict_prob(X) >= threshold\n\n\n# In[35]:\n\n\nmodel = LogisticRegression(lr=0.1, num_iter=3000)\nget_ipython().run_line_magic('time', 'model.fit(X, y)')\npreds = model.predict(X, 0.5)\n# accuracy\nprint(\"Accuracy HD TRAIN:\",(preds == y).mean())\n\n\n# In[33]:\n\n\nmodel = LogisticRegression(lr=0.1, num_iter=3000)\nget_ipython().run_line_magic('time', 'model.fit(X_val, y_val)')\npreds = model.predict(X_val, 0.5)\n# accuracy\nprint(\"Accuracy HD VAL:\",(preds == y_val).mean())\n\n\n\n# In[34]:\n\n\nmodel = LogisticRegression(lr=0.1, num_iter=3000)\nget_ipython().run_line_magic('time', 'model.fit(X_test, y_test)')\npreds = model.predict(X_test, 0.5)\n# accuracy\nprint(\"Accuracy HD TEST:\",(preds == y_test).mean())\n\n" ]
[ [ "numpy.ones", "numpy.transpose", "numpy.zeros", "numpy.dot", "numpy.exp", "numpy.log", "numpy.delete", "numpy.concatenate" ] ]
ana-simionescu/ddsp
[ "9f37ff66e79cf912c3377ba1beddb220196aa1a3" ]
[ "ddsp/synths_test.py" ]
[ "# Copyright 2020 The DDSP Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Tests for ddsp.synths.\"\"\"\n\nfrom ddsp import core\nfrom ddsp import synths\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\nclass AdditiveTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n synthesizer = synths.Additive(\n n_samples=64000,\n sample_rate=16000,\n scale_fn=None,\n normalize_below_nyquist=True)\n batch_size = 3\n num_frames = 1000\n amp = tf.zeros((batch_size, num_frames, 1), dtype=tf.float32) + 1.0\n harmonic_distribution = tf.zeros(\n (batch_size, num_frames, 16), dtype=tf.float32) + 1.0 / 16\n f0_hz = tf.zeros((batch_size, num_frames, 1), dtype=tf.float32) + 16000\n\n output = synthesizer(amp, harmonic_distribution, f0_hz)\n\n self.assertAllEqual([batch_size, 64000], output.shape.as_list())\n\n\nclass FilteredNoiseTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n synthesizer = synths.FilteredNoise(n_samples=16000)\n filter_bank_magnitudes = tf.zeros((3, 16000, 100), dtype=tf.float32) + 3.0\n output = synthesizer(filter_bank_magnitudes)\n\n self.assertAllEqual([3, 16000], output.shape.as_list())\n\n\nclass WavetableTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n synthesizer = synths.Wavetable(\n n_samples=64000,\n sample_rate=16000,\n scale_fn=None)\n batch_size = 3\n num_frames = 1000\n n_wavetable = 1024\n amp = tf.zeros((batch_size, num_frames, 1), dtype=tf.float32) + 1.0\n wavetables = tf.zeros(\n (batch_size, num_frames, n_wavetable), dtype=tf.float32)\n f0_hz = tf.zeros((batch_size, num_frames, 1), dtype=tf.float32) + 440\n\n output = synthesizer(amp, wavetables, f0_hz)\n\n self.assertAllEqual([batch_size, 64000], output.shape.as_list())\n\n\nclass SinusoidalTest(tf.test.TestCase):\n\n def test_output_shape_is_correct(self):\n synthesizer = synths.Sinusoidal(n_samples=32000, sample_rate=16000)\n batch_size = 3\n num_frames = 1000\n n_partials = 10\n amps = tf.zeros((batch_size, num_frames, n_partials),\n dtype=tf.float32)\n freqs = tf.zeros((batch_size, num_frames, n_partials),\n dtype=tf.float32)\n\n output = synthesizer(amps, freqs)\n\n self.assertAllEqual([batch_size, 32000], output.shape.as_list())\n\n def test_frequencies_controls_are_bounded(self):\n depth = 10\n def freq_scale_fn(x):\n return core.frequencies_sigmoid(x, depth=depth, hz_min=0.0, hz_max=8000.0)\n\n synthesizer = synths.Sinusoidal(\n n_samples=32000, sample_rate=16000, freq_scale_fn=freq_scale_fn)\n batch_size = 3\n num_frames = 10\n n_partials = 100\n amps = tf.zeros((batch_size, num_frames, n_partials), dtype=tf.float32)\n freqs = tf.linspace(-100.0, 100.0, n_partials)\n freqs = tf.tile(freqs[tf.newaxis, tf.newaxis, :, tf.newaxis],\n [batch_size, num_frames, 1, depth])\n\n controls = synthesizer.get_controls(amps, freqs)\n freqs = controls['frequencies']\n lt_nyquist = (freqs <= 8000.0)\n gt_zero = (freqs >= 0.0)\n both_conditions = np.logical_and(lt_nyquist, gt_zero)\n\n self.assertTrue(np.all(both_conditions))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.logical_and", "tensorflow.compat.v2.zeros", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.linspace", "numpy.all", "tensorflow.compat.v2.tile" ] ]
loramf/mlforhealthlabpub
[ "aa5a42a4814cf69c8223f27c21324ee39d43c404" ]
[ "alg/discriminative-jackknife/utils/parameters.py" ]
[ "\n# Copyright (c) 2020, Ahmed M. Alaa\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\n# ---------------------------------------------------------\n# Helper functions and utilities for deep learning models\n# ---------------------------------------------------------\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\n\nif not sys.warnoptions:\n import warnings\n warnings.simplefilter(\"ignore\")\n\nimport torch\nfrom torch import nn\n\nfrom influence.influence_utils import *\n\ntorch.manual_seed(1) \n\n\nACTIVATION_DICT = {\"ReLU\": torch.nn.ReLU(), \"Hardtanh\": torch.nn.Hardtanh(),\n \"ReLU6\": torch.nn.ReLU6(), \"Sigmoid\": torch.nn.Sigmoid(),\n \"Tanh\": torch.nn.Tanh(), \"ELU\": torch.nn.ELU(),\n \"CELU\": torch.nn.CELU(), \"SELU\": torch.nn.SELU(), \n \"GLU\": torch.nn.GLU(), \"LeakyReLU\": torch.nn.LeakyReLU(),\n \"LogSigmoid\": torch.nn.LogSigmoid(), \"Softplus\": torch.nn.Softplus()}\n\n\ndef build_architecture(base_model):\n\n modules = []\n\n if base_model.dropout_active:\n\n modules.append(torch.nn.Dropout(p=base_model.dropout_prob))\n\n modules.append(torch.nn.Linear(base_model.n_dim, base_model.num_hidden))\n modules.append(ACTIVATION_DICT[base_model.activation])\n\n for u in range(base_model.num_layers - 1):\n\n if base_model.dropout_active:\n\n modules.append(torch.nn.Dropout(p=base_model.dropout_prob))\n\n modules.append(torch.nn.Linear(base_model.num_hidden, base_model.num_hidden))\n modules.append(ACTIVATION_DICT[base_model.activation])\n\n modules.append(torch.nn.Linear(base_model.num_hidden, base_model.output_size))\n\n _architecture = nn.Sequential(*modules)\n\n return _architecture\n\n\ndef get_number_parameters(model):\n\n params_ = []\n\n for param in model.parameters():\n \n params_.append(param)\n \n return stack_torch_tensors(params_).shape[0] " ]
[ [ "torch.nn.LeakyReLU", "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.LogSigmoid", "torch.manual_seed", "torch.nn.CELU", "torch.nn.Tanh", "torch.nn.ReLU6", "torch.nn.Hardtanh", "torch.nn.GLU", "torch.nn.Sequential", "torch.nn.ELU", "torch.nn.Sigmoid", "torch.nn.ReLU", "torch.nn.Softplus", "torch.nn.SELU" ] ]
albamr09/PythonML
[ "9848cf913a7cdb73d2b98a8ab7334c04f421ad87" ]
[ "pyml/supervised/SVM/SVM2.py" ]
[ "import numpy as np\n\n\"\"\"\n\n------------------------------------------------------------------------------------------------------------------------------------------------------\n\n SVM2\n\n------------------------------------------------------------------------------------------------------------------------------------------------------\n\n\n\n\"\"\"\n\nclass SVMV2():\n\n def __init__(self) -> None:\n \"\"\"Empty method\"\"\"\n pass\n\n def svmTrain(X, Y, C, kernelFunction, tol=1e-3, max_passes=5, args=()):\n \"\"\"\n Trains an SVM classifier using a simplified version of the SMO algorithm.\n\n Parameters\n ---------\n X : numpy ndarray\n (m x n) Matrix of training examples. Each row is a training example, and the\n jth column holds the jth feature.\n\n Y : numpy ndarray\n (m, ) A vector (1-D numpy array) containing 1 for positive examples and 0 for negative examples.\n\n C : float\n The standard SVM regularization parameter.\n\n kernelFunction : func\n A function handle which computes the kernel. The function should accept two vectors as\n inputs, and returns a scalar as output.\n\n tol : float, optional\n Tolerance value used for determining equality of floating point numbers.\n\n max_passes : int, optional\n Controls the number of iterations over the dataset (without changes to alpha)\n before the algorithm quits.\n\n args : tuple\n Extra arguments required for the kernel function, such as the sigma parameter for a\n Gaussian kernel.\n\n Returns\n -------\n model :\n The trained SVM model.\n\n Notes\n -----\n This is a simplified version of the SMO algorithm for training SVMs. In practice, if\n you want to train an SVM classifier, we recommend using an optimized package such as:\n\n - LIBSVM (http://www.csie.ntu.edu.tw/~cjlin/libsvm/)\n - SVMLight (http://svmlight.joachims.org/)\n - scikit-learn (http://scikit-learn.org/stable/modules/svm.html) which contains python wrappers\n for the LIBSVM library.\n \"\"\"\n # make sure data is signed int\n Y = Y.astype(int)\n # Dataset size parameters\n m, n = X.shape\n\n passes = 0\n E = np.zeros(m)\n alphas = np.zeros(m)\n b = 0\n\n # Map 0 to -1\n Y[Y == 0] = -1\n\n # Pre-compute the Kernel Matrix since our dataset is small\n # (in practice, optimized SVM packages that handle large datasets\n # gracefully will **not** do this)\n\n # We have implemented the optimized vectorized version of the Kernels here so\n # that the SVM training will run faster\n if kernelFunction.__name__ == 'linearKernel':\n # Vectorized computation for the linear kernel\n # This is equivalent to computing the kernel on every pair of examples\n K = np.dot(X, X.T)\n elif kernelFunction.__name__ == 'gaussianKernel':\n # vectorized RBF Kernel\n # This is equivalent to computing the kernel on every pair of examples\n X2 = np.sum(X**2, axis=1)\n K = X2 + X2[:, None] - 2 * np.dot(X, X.T)\n\n if len(args) > 0:\n K /= 2*args[0]**2\n\n K = np.exp(-K)\n else:\n K = np.zeros((m, m))\n for i in range(m):\n for j in range(i, m):\n K[i, j] = kernelFunction(X[i, :], X[j, :])\n K[j, i] = K[i, j]\n\n while passes < max_passes:\n num_changed_alphas = 0\n for i in range(m):\n E[i] = b + np.sum(alphas * Y * K[:, i]) - Y[i]\n\n if (Y[i]*E[i] < -tol and alphas[i] < C) or (Y[i]*E[i] > tol and alphas[i] > 0):\n # select the alpha_j randomly\n j = np.random.choice(list(range(i)) + list(range(i+1, m)), size=1)[0]\n\n E[j] = b + np.sum(alphas * Y * K[:, j]) - Y[j]\n\n alpha_i_old = alphas[i]\n alpha_j_old = alphas[j]\n\n if Y[i] == Y[j]:\n L = max(0, alphas[j] + alphas[i] - C)\n H = min(C, alphas[j] + alphas[i])\n else:\n L = max(0, alphas[j] - alphas[i])\n H = min(C, C + alphas[j] - alphas[i])\n\n if L == H:\n continue\n\n eta = 2 * K[i, j] - K[i, i] - K[j, j]\n\n # objective function positive definite, there will be a minimum along the direction\n # of linear equality constrain, and eta will be greater than zero\n # we are actually computing -eta here (so we skip of eta >= 0)\n if eta >= 0:\n continue\n\n alphas[j] -= Y[j] * (E[i] - E[j])/eta\n alphas[j] = max(L, min(H, alphas[j]))\n\n if abs(alphas[j] - alpha_j_old) < tol:\n alphas[j] = alpha_j_old\n continue\n alphas[i] += Y[i]*Y[j]*(alpha_j_old - alphas[j])\n\n b1 = b - E[i] - Y[i]*(alphas[i] - alpha_i_old) * K[i, j] \\\n - Y[j] * (alphas[j] - alpha_j_old) * K[i, j]\n\n b2 = b - E[j] - Y[i]*(alphas[i] - alpha_i_old) * K[i, j] \\\n - Y[j] * (alphas[j] - alpha_j_old) * K[j, j]\n\n if 0 < alphas[i] < C:\n b = b1\n elif 0 < alphas[j] < C:\n b = b2\n else:\n b = (b1 + b2)/2\n\n num_changed_alphas += 1\n if num_changed_alphas == 0:\n passes += 1\n else:\n passes = 0\n\n idx = alphas > 0\n model = {'X': X[idx, :],\n 'y': Y[idx],\n 'kernelFunction': kernelFunction,\n 'b': b,\n 'args': args,\n 'alphas': alphas[idx],\n 'w': np.dot(alphas * Y, X)}\n return model\n\n\n def svmPredict(model, X):\n \"\"\"\n Returns a vector of predictions using a trained SVM model.\n\n Parameters\n ----------\n model : dict\n The parameters of the trained svm model, as returned by the function svmTrain\n\n X : array_like\n A (m x n) matrix where each example is a row.\n\n Returns\n -------\n pred : array_like\n A (m,) sized vector of predictions {0, 1} values.\n \"\"\"\n # check if we are getting a vector. If so, then assume we only need to do predictions\n # for a single example\n if X.ndim == 1:\n X = X[np.newaxis, :]\n\n m = X.shape[0]\n p = np.zeros(m)\n pred = np.zeros(m)\n\n if model['kernelFunction'].__name__ == 'linearKernel':\n # we can use the weights and bias directly if working with the linear kernel\n p = np.dot(X, model['w']) + model['b']\n elif model['kernelFunction'].__name__ == 'gaussianKernel':\n # vectorized RBF Kernel\n # This is equivalent to computing the kernel on every pair of examples\n X1 = np.sum(X**2, 1)\n X2 = np.sum(model['X']**2, 1)\n K = X2 + X1[:, None] - 2 * np.dot(X, model['X'].T)\n\n if len(model['args']) > 0:\n K /= 2*model['args'][0]**2\n\n K = np.exp(-K)\n p = np.dot(K, model['alphas']*model['y']) + model['b']\n else:\n # other non-linear kernel\n for i in range(m):\n predictions = 0\n for j in range(model['X'].shape[0]):\n predictions += model['alphas'][j] * model['y'][j] \\\n * model['kernelFunction'](X[i, :], model['X'][j, :])\n p[i] = predictions\n\n pred[p >= 0] = 1\n return pred\n\n\n def linearKernel(x1, x2):\n \"\"\"\n Returns a linear kernel between x1 and x2.\n\n Parameters\n ----------\n x1 : numpy ndarray\n A 1-D vector.\n\n x2 : numpy ndarray\n A 1-D vector of same size as x1.\n\n Returns\n -------\n : float\n The scalar amplitude.\n \"\"\"\n return np.dot(x1, x2)\n\n\n def visualizeBoundaryLinear(X, y, model):\n \"\"\"\n Plots a linear decision boundary learned by the SVM.\n\n Parameters\n ----------\n X : array_like\n (m x 2) The training data with two features (to plot in a 2-D plane).\n\n y : array_like\n (m, ) The data labels.\n\n model : dict\n Dictionary of model variables learned by SVM.\n \"\"\"\n w, b = model['w'], model['b']\n xp = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)\n yp = -(w[0] * xp + b)/w[1]\n\n plotData(X, y)\n pyplot.plot(xp, yp, '-b')\n\n\n def visualizeBoundary(X, y, model):\n \"\"\"\n Plots a non-linear decision boundary learned by the SVM and overlays the data on it.\n\n Parameters\n ----------\n X : array_like\n (m x 2) The training data with two features (to plot in a 2-D plane).\n\n y : array_like\n (m, ) The data labels.\n\n model : dict\n Dictionary of model variables learned by SVM.\n \"\"\"\n plotData(X, y)\n\n # make classification predictions over a grid of values\n x1plot = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)\n x2plot = np.linspace(min(X[:, 1]), max(X[:, 1]), 100)\n X1, X2 = np.meshgrid(x1plot, x2plot)\n\n vals = np.zeros(X1.shape)\n for i in range(X1.shape[1]):\n this_X = np.stack((X1[:, i], X2[:, i]), axis=1)\n vals[:, i] = svmPredict(model, this_X)\n\n pyplot.contour(X1, X2, vals, colors='y', linewidths=2)\n pyplot.pcolormesh(X1, X2, vals, cmap='YlGnBu', alpha=0.25, edgecolors='None', lw=0)\n pyplot.grid(False)\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.exp", "numpy.stack", "numpy.dot", "numpy.meshgrid" ] ]
w-sugar/maskrcnn-benchmark
[ "37d985c2c0b190bf76945b9f7a9530b855e370e5" ]
[ "maskrcnn_benchmark/engine/trainer.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport datetime\nimport logging\nimport os\nimport time\n\nimport torch\nimport torch.distributed as dist\nfrom tqdm import tqdm\n\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.utils.comm import get_world_size, synchronize\nfrom maskrcnn_benchmark.utils.metric_logger import MetricLogger\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.utils.visualize import print_dict\n\nfrom apex import amp\n\ndef reduce_loss_dict(loss_dict):\n \"\"\"\n Reduce the loss dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n loss_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return loss_dict\n with torch.no_grad():\n loss_names = []\n all_losses = []\n for k in sorted(loss_dict.keys()):\n loss_names.append(k)\n all_losses.append(loss_dict[k])\n all_losses = torch.stack(all_losses, dim=0)\n dist.reduce(all_losses, dst=0)\n if dist.get_rank() == 0:\n # only main process gets accumulated, so only divide by\n # world_size in this case\n all_losses /= world_size\n reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}\n return reduced_losses\n\n\ndef do_train(\n cfg,\n model,\n data_loader,\n data_loader_val,\n optimizer,\n scheduler,\n checkpointer,\n device,\n checkpoint_period,\n test_period,\n arguments,\n distributed,\n vis_port\n):\n from visdom import Visdom\n vis = None\n if distributed:\n if dist.get_rank() == 0:\n vis = Visdom(server='http://127.0.0.1', port=vis_port)\n else:\n vis = Visdom(server='http://127.0.0.1', port=vis_port)\n logger = logging.getLogger(\"maskrcnn_benchmark.trainer\")\n logger.info(\"Start training\")\n meters = MetricLogger(delimiter=\" \")\n max_iter = len(data_loader)\n start_iter = arguments[\"iteration\"]\n model.train()\n start_training_time = time.time()\n end = time.time()\n\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n dataset_names = cfg.DATASETS.TEST\n\n for iteration, (images, targets, _) in enumerate(data_loader, start_iter):\n \n if any(len(target) < 1 for target in targets):\n logger.error(f\"Iteration={iteration + 1} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}\" )\n continue\n data_time = time.time() - end\n iteration = iteration + 1\n arguments[\"iteration\"] = iteration\n\n images = images.to(device)\n targets = [target.to(device) for target in targets]\n\n loss_dict = model(images, targets)\n\n losses = sum(loss for loss in loss_dict.values())\n\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = reduce_loss_dict(loss_dict)\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n meters.update(loss=losses_reduced, **loss_dict_reduced)\n\n optimizer.zero_grad()\n # Note: If mixed precision is not used, this ends up doing nothing\n # Otherwise apply loss scaling for mixed-precision recipe\n with amp.scale_loss(losses, optimizer) as scaled_losses:\n scaled_losses.backward()\n optimizer.step()\n scheduler.step()\n\n batch_time = time.time() - end\n end = time.time()\n meters.update(time=batch_time, data=data_time)\n\n eta_seconds = meters.time.global_avg * (max_iter - iteration)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n\n if iteration % 20 == 0 or iteration == max_iter:\n logger.info(\n meters.delimiter.join(\n [\n \"eta: {eta}\",\n \"iter: {iter}\",\n \"{meters}\",\n \"lr: {lr:.6f}\",\n \"max mem: {memory:.0f}\",\n ]\n ).format(\n eta=eta_string,\n iter=iteration,\n meters=str(meters),\n lr=optimizer.param_groups[0][\"lr\"],\n memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n )\n )\n\n # 更新 loss 曲线\n loss_dict_print = loss_dict_reduced\n loss_dict_print['loss'] = losses_reduced\n print_dict(vis, loss_dict_print, iteration, need_plot=True)\n\n if iteration % checkpoint_period == 0:\n checkpointer.save(\"model_{:07d}\".format(iteration), **arguments)\n if data_loader_val is not None and test_period > 0 and iteration % test_period == 0:\n meters_val = MetricLogger(delimiter=\" \")\n synchronize()\n _ = inference( # The result can be used for additional logging, e. g. for TensorBoard\n model,\n # The method changes the segmentation mask format in a data loader,\n # so every time a new data loader is created:\n make_data_loader(cfg, is_train=False, is_distributed=(get_world_size() > 1), is_for_period=True),\n dataset_name=\"[Validation]\",\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=None,\n )\n synchronize()\n model.train()\n # with torch.no_grad():\n # # Should be one image for each GPU:\n # for iteration_val, (images_val, targets_val, _) in enumerate(tqdm(data_loader_val)):\n # images_val = images_val.to(device)\n # targets_val = [target.to(device) for target in targets_val]\n # loss_dict = model(images_val, targets_val)\n # losses = sum(loss for loss in loss_dict.values())\n # loss_dict_reduced = reduce_loss_dict(loss_dict)\n # losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n # meters_val.update(loss=losses_reduced, **loss_dict_reduced)\n # synchronize()\n # logger.info(\n # meters_val.delimiter.join(\n # [\n # \"[Validation]: \",\n # \"eta: {eta}\",\n # \"iter: {iter}\",\n # \"{meters}\",\n # \"lr: {lr:.6f}\",\n # \"max mem: {memory:.0f}\",\n # ]\n # ).format(\n # eta=eta_string,\n # iter=iteration,\n # meters=str(meters_val),\n # lr=optimizer.param_groups[0][\"lr\"],\n # memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n # )\n # )\n if iteration == max_iter:\n checkpointer.save(\"model_final\", **arguments)\n\n total_training_time = time.time() - start_training_time\n total_time_str = str(datetime.timedelta(seconds=total_training_time))\n logger.info(\n \"Total training time: {} ({:.4f} s / it)\".format(\n total_time_str, total_training_time / (max_iter)\n )\n )\n" ]
[ [ "torch.stack", "torch.distributed.get_rank", "torch.no_grad", "torch.cuda.max_memory_allocated", "torch.distributed.reduce" ] ]
scuervo91/reservoirpy
[ "a4db620baf3ff66a85c7f61b1919713a8642e6fc" ]
[ "reservoirpy/wellproductivitypy/pi/outflow.py" ]
[ "import numpy as np\nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom ...pvtpy.black_oil import Pvt,Oil,Water,Gas\nfrom scipy.optimize import root_scalar\nfrom .inflow import OilInflow, GasInflow\nfrom ...utils import intercept_curves\nfrom typing import Union\n\n## Incompressible pressure drop\ndef potential_energy_change(\n z1:Union[int,float]=None, \n z2=None, \n delta_z=None,\n length=None, \n ge=1, \n angle=None, \n inc=None,\n p1=0):\n \"\"\"potential_energy_change [ Δp PE accounts for the pressure change due to the weight of the column of fluid (the hydrostatic head); it\n will be zero for flow in a horizontal pipe.\n\n In this equation, Δz is the difference in elevation between positions 1 and 2, with z increasing upward. θ\n is defined as the angle between horizontal and the direction of flow. Thus, θ is +90° for upward, vertical\n flow, 0° for horizontal flow, and –90° for downward flow in a vertical well (Figure 7-4). For flow in a\n straight pipe of length L with flow direction θ,]\n\n Parameters\n ----------\n z1 : [type], optional\n [description], by default None\n z2 : [type], optional\n [description], by default None\n delta_z : [type], optional\n [description], by default None\n length : [type], optional\n [description], by default None\n ge : int, optional\n [description], by default 1\n angle : [type], optional\n [description], by default None\n inc : [type], optional\n [description], by default None\n p1 : int, optional\n [description], by default 0\n\n Returns\n -------\n [type]\n [description]\n \"\"\"\n\n # Assert height difference types\n if delta_z is None:\n if length is None:\n assert isinstance(z1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(z2,(float,int,np.ndarray,np.int64,np.float64)), f\"{type(z1)} {type(z2)}\"\n z1 = np.atleast_1d(z1)\n z2 = np.atleast_1d(z2)\n #assert z1.shape == (1,) and z2.shape == (1,)\n delta_z = z1-z2\n\n else:\n assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64)) \n length = np.atleast_1d(length)\n #assert length.shape == (1,)\n\n if angle is None:\n assert isinstance(inc,(float,int,np.ndarray,np.int64,np.float64))\n inc = np.atleast_1d(inc)\n assert inc <= 90 and inc >= -90\n sign = np.sign(inc)\n\n angle = (90 - np.abs(inc)) * sign\n else:\n # Assert angle between -90 and 90\n assert isinstance(angle,(float,int,np.ndarray,np.int64,np.float64))\n angle = np.atleast_1d(angle)\n assert angle <= 90 and angle >= -90 \n\n delta_z = length * np.sin(np.radians(angle))\n\n else:\n assert isinstance(delta_z,(float,int,np.ndarray,np.int64,np.float64))\n delta_z = np.atleast_1d(delta_z)\n #assert delta_z.shape == (1,)\n\n\n #Assert ge be positive\n assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0, f\"{ge} {type(ge)} not allowed\"\n\n #Calculate Delta P\n delta_p = 0.433 * ge * delta_z\n\n #Calculate P2\n p2 = p1 + delta_p\n\n return delta_p, p2\n\ndef kinetic_energy_change(d1=None,d2=None, ge=1,rate=None,p1=0):\n \"\"\"\n Δp KE is the pressure drop resulting from a change in the velocity of the fluid between positions 1 and 2.\n It will be zero for an incompressible fluid unless the cross-sectional area of the pipe is different at the\n two positions of interest.\n\n Petroleum Production Systems, Economides. Chapter 7 7.2.3.2. Δp KE, the Pressure Drop Due to Kinetic Energy Change. Page 172\n\n \"\"\"\n\n assert isinstance(d1,(float,int,np.ndarray,np.int64,np.float64)) and isinstance(d2,(float,int,np.ndarray,np.int64,np.float64))\n d1 = np.atleast_1d(d1)\n d2 = np.atleast_1d(d2)\n\n\n #Assert Specifi Gravity be positive\n assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0\n ge = np.atleast_1d(ge)\n\n\n # Rate in bbl/d\n assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0\n rate = np.atleast_1d(rate) \n\n #Estimate Density in lb/ft3\n rho = 62.4 * ge\n\n #Estimate delta Pressure in psi\n delta_p = 1.53e-8 * np.power(rate,2) * rho * ((1/np.power(d1,4))-(1/np.power(d2,4)))\n\n p2 = p1 + delta_p\n\n return delta_p, p2\n\ndef reynolds_number(rate,rho,d,mu):\n \"\"\"\n Reynolds Number where q is in bbl/d, ρ in lb m /ft 3 , D in in., and μ in cp.\n \"\"\" \n nre = (1.48 * rate * rho) / (d * mu)\n\n return nre\n\ndef frictional_pressure_drop(\n rate=None, \n epsilon=0.001,\n ge=1,\n d=None, \n mu=1, \n length=None):\n\n # Rate in bbl/d\n assert isinstance(rate,(float,int,np.ndarray,np.int64,np.float64)) and rate>=0\n rate = np.atleast_1d(rate) \n\n # pipe relative roughness\n assert isinstance(epsilon,(float,int,np.ndarray,np.int64,np.float64))\n epsilon = np.atleast_1d(epsilon) \n\n #Assert Specifi Gravity be positive\n assert isinstance(ge,(float,int,np.ndarray,np.int64,np.float64)) and ge>0\n ge = np.atleast_1d(ge)\n\n assert isinstance(d,(float,int,np.ndarray,np.int64,np.float64))\n d = np.atleast_1d(d)\n\n assert isinstance(mu,(float,int,np.ndarray,np.int64,np.float64))\n mu = np.atleast_1d(mu)\n\n assert isinstance(length,(float,int,np.ndarray,np.int64,np.float64))\n length = np.atleast_1d(length)\n\n #Estimate Density in lb/ft3\n rho = 62.4 * ge\n\n #Reynolds Number\n nre = reynolds_number(rate,rho,d,mu)\n\n #Friction Factor\n if nre == 0:\n ff = 0\n else:\n ff = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)\n\n #Velocity ft/s\n u = (4*rate*5.615)/(np.pi*np.power(d/12,2)*86400)\n\n delta_p = (2 * ff * rho * np.power(u,2) * length)/(32.17 * (d/12) * 144)\n delta_p *= -1\n return delta_p\n\n\n\ndef one_phase_pressure_profile(\n p1=0,\n ge=1,\n epsilon=0.001,\n md=None,\n tvd=None,\n d = None,\n rate = None,\n mu=None,\n backwards=1\n ):\n\n assert isinstance(md,(int,float,list,np.ndarray))\n md = np.atleast_1d(md)\n \n if tvd is None:\n tvd = md\n else:\n assert isinstance(tvd,(int,float,list,np.ndarray))\n tvd = np.atleast_1d(tvd)\n \n assert isinstance(d,(int,float,list,np.ndarray))\n if isinstance(d,(int,float)):\n d = np.full(md.shape,d)\n else:\n d = np.atleast_1d(d)\n \n assert isinstance(rate,(int,float, np.ndarray))\n rate = np.atleast_1d(rate)\n assert isinstance(mu,(int,float, np.ndarray))\n mu = np.atleast_1d(mu)\n assert isinstance(p1,(int,float, np.ndarray))\n p1 = np.atleast_1d(p1)\n assert isinstance(ge,(int,float, np.ndarray))\n ge = np.atleast_1d(ge)\n assert isinstance(epsilon,(int,float, np.ndarray))\n epsilon = np.atleast_1d(epsilon)\n\n assert md.shape[0] == tvd.shape[0] == d.shape[0]\n\n n = md.shape[0]\n\n #Create arrays\n pressure = np.zeros(n)\n ppe = np.zeros(n)\n pke = np.zeros(n)\n pf = np.zeros(n)\n delta_p = np.zeros(n)\n gradient = np.zeros(n)\n\n pressure[0] = p1\n\n for i in range(1,n):\n\n #Potential Energy Change\n ppe[i], _ = potential_energy_change(\n z1=tvd[i-1],\n z2=tvd[i],\n ge= ge,\n )\n\n #Kinetic Energy Change\n pke[i], _ = kinetic_energy_change(\n d1=d[i-1],\n d2=d[i],\n rate=rate,\n ge=ge,\n )\n\n #Frictional Pressure drop\n pf[i] = frictional_pressure_drop(\n rate=rate, \n epsilon=epsilon,\n ge=ge,\n d=d[i], \n mu=mu, \n length=np.abs(md[i-1]-md[i])\n ) * backwards\n\n delta_p[i] = ppe[i] + pke[i] + pf[i]\n pressure[i] = pressure[i-1] + delta_p[i]\n gradient[i] = (pressure[i] - pressure[i-1])/np.abs(tvd[i] - tvd[i-1])\n \n # Create dataframe\n pressure_profile = pd.DataFrame({\n 'md':md,\n 'tvd':tvd,\n 'diameter':d,\n 'pressure':pressure,\n 'ppe': ppe,\n 'pke': pke,\n 'pf' : pf,\n 'delta_p': delta_p,\n 'gradient': gradient\n }).set_index('md')\n \n p2 = pressure[-1]\n\n return pressure_profile, p2\n\n\n## Gas Outflow functions\n\ndef gas_pressure_profile_correlation(thp,sg,depth):\n assert isinstance(thp,(float,int,np.ndarray,np.int64,np.float64))\n thp = np.atleast_1d(thp)\n assert thp.ndim == 1\n\n assert isinstance(sg,(float,int,np.ndarray,np.int64,np.float64))\n sg = np.atleast_1d(sg)\n assert sg.shape == (1,)\n\n assert isinstance(depth,(list,float,int,np.ndarray))\n depth = np.atleast_1d(depth)\n assert sg.ndim == 1\n\n pwf = thp*np.exp(3.47e-5*depth)\n\n return pwf\n\n\n\ndef gas_pressure_profile(\n md = None, \n inc = None, \n thp = None, \n rate = None, \n gas_obj = None,\n di=2.99,\n surf_temp=80,\n temp_grad=1,\n epsilon = 0.0006, \n tol = 0.05, \n max_iter=20):\n \"\"\"\n To calculate the pressure drop in a gas well, the compressibility of the fluid must be considered. When\n the fluid is compressible, the fluid density and fluid velocity vary along the pipe, and these variations\n must be included when integrating the mechanical energy balance equation.\n\n Petroleum Production Systems, Economides. Chapter 7 7.3. Single-Phase Flow of a Compressible, Newtonian Fluid. Page 175\n \"\"\"\n # Assert the right types and shapes for input\n assert isinstance(md, (np.ndarray,pd.Series))\n md = np.atleast_1d(md)\n assert md.ndim ==1\n\n assert isinstance(inc, (int,float,np.ndarray,pd.Series))\n if isinstance(inc,np.ndarray):\n assert inc.shape == md.shape\n else:\n inc = np.full(md.shape,inc)\n\n angle = np.radians(90 - inc) \n\n assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n thp = np.atleast_1d(thp)\n assert thp.shape == (1,)\n\n assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None\n\n assert isinstance(di, (int,float,np.ndarray))\n if isinstance(di,np.ndarray):\n assert di.shape == md.shape\n else:\n di = np.full(md.shape,di)\n\n assert isinstance(rate, (int,float,np.ndarray))\n rate = np.atleast_1d(rate)\n assert rate.shape == (1,)\n\n assert gas_obj.sg is not None\n\n #Create the variables\n\n pressure_profile = np.zeros(md.shape)\n temperature_profile = np.zeros(md.shape)\n pressure_gradient = np.zeros(md.shape)\n pressure_profile[0] = thp\n temperature_profile[0] = surf_temp\n\n interations = np.zeros(md.shape)\n\n if gas_obj.chromatography is not None:\n df_rho = gas_obj.chromatography.get_rhog(p=thp,t=surf_temp, rhog_method='real_gas')\n else:\n df_rho = gas_obj.pvt.interpolate(thp,property='rhog')\n\n grad_guess = df_rho['rhog'].values*(0.433/62.4)\n\n #Loop over depth\n for i in range(1,md.shape[0]):\n err = tol + 0.01\n dz = np.sin(angle[i])*(md[i]-md[i-1])\n gas_sg = gas_obj.sg\n it = 0\n while err>= tol and it <= max_iter:\n p_guess = grad_guess*(md[i]-md[i-1])*np.sin(angle[i]) + pressure_profile[i-1]\n\n #Interpolate pvt\n df_pvt = gas_obj.pvt.interpolate(p_guess)\n\n #Reynolds Number\n #nre = (4*28.97*gas_obj.sg*rate*14.7)/(np.pi*di[i]*df_pvt['mug'].values*10.73*520)\n nre = 20.09*(gas_sg*rate)/(di[i]*df_pvt['mug'].values)\n\n #Friction Factor\n friction = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)\n\n #Temperature\n temperature_profile[i] = dz * (temp_grad/100) + temperature_profile[i-1]\n\n #S\n s = (-0.0375*gas_obj.sg*dz)/(df_pvt['z'].values*(temperature_profile[i]+460))\n\n #Calculate next pressure by parts for easily read\n a = np.exp(-s) * np.power(pressure_profile[i-1],2)\n b = (friction*np.power(df_pvt['z'].values*(temperature_profile[i]+460)*rate,2))/(np.sin(angle[i])*np.power(di[i],5))\n c = 1 - np.exp(-s)\n\n p_new = np.sqrt(a - (2.685e-3*b*c))\n grad_new = (p_new - pressure_profile[i-1])/dz\n\n err = np.abs(grad_guess-grad_new)/grad_new\n grad_guess = grad_new\n it +=1\n \n pressure_gradient[i] = grad_new\n pressure_profile[i] = p_new\n interations[i] = it\n\n df_dict = {\n 'pressure':pressure_profile,\n 'pressure_gradient': pressure_gradient,\n 'temperature': temperature_profile,\n 'iterations': interations\n }\n\n df = pd.DataFrame(df_dict, index = md)\n pwf = pressure_profile[-1]\n\n return df, pwf\n\ndef gas_upward_pressure(\n md = None, \n inc = None, \n pwf = None, \n rate = None, \n gas_obj = None,\n di=2.99,\n surf_temp=80,\n temp_grad=1,\n epsilon = 0.0006, \n tol = 0.05, \n max_iter=20,\n guess=None,\n grad_guess = [0.02,0.05]\n):\n\n if guess is None:\n grad = np.atleast_1d(grad_guess)\n delta_h = np.abs(md[-1] - md[0])\n guess = pwf - grad * delta_h\n else:\n assert isinstance(guess,(list,np.ndarray))\n guess = np.atleast_1d(guess)\n\n def solve(x):\n _,_pwf = gas_pressure_profile(\n md = md, \n inc = inc, \n thp = x, \n rate = rate, \n gas_obj = gas_obj,\n di=di,\n surf_temp=surf_temp,\n temp_grad=temp_grad,\n epsilon = epsilon, \n tol = tol, \n max_iter=max_iter,\n )\n\n return pwf - _pwf\n\n sol = root_scalar(solve, x0=guess[0],x1=guess[1])\n\n return sol.root\n\ndef gas_outflow_curve(\n md = None, \n inc = None, \n thp = None, \n gas_obj = None,\n rate=None,\n min_rate=100,\n max_rate=8000,\n n_rate=20,\n di=2.99,\n surf_temp=80,\n temp_grad=1,\n epsilon = 0.0006, \n tol = 0.05, \n max_iter=20,\n operating_point = None,\n op_n = 30\n ):\n\n # Assert the right types and shapes for input\n assert isinstance(md, (np.ndarray,pd.Series)) and md.ndim ==1\n md = np.atleast_1d(md)\n\n assert isinstance(inc, (int,float,np.ndarray,pd.Series))\n if isinstance(inc,np.ndarray):\n assert inc.shape == md.shape\n else:\n inc = np.full(md.shape,inc)\n\n angle = np.radians(90 - inc) \n\n assert isinstance(thp, (int,float,list,np.ndarray))\n thp = np.atleast_1d(thp)\n assert thp.ndim == 1\n\n assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None\n\n assert isinstance(di, list)\n\n assert isinstance(rate, (int,float,list,np.ndarray,type(None)))\n if rate is None:\n rate = np.linspace(min_rate,max_rate,n_rate)\n else:\n rate = np.atleast_1d(rate)\n assert rate.ndim == 1\n\n assert gas_obj.sg is not None\n\n pwf = np.zeros(rate.shape[0]*thp.shape[0]*len(di))\n thp_arr = np.zeros(pwf.shape)\n di_arr = np.zeros(pwf.shape)\n gas_arr = np.zeros(pwf.shape)\n name_list = []\n i = 0\n for p in thp:\n for d in di:\n for q in rate:\n _,pwf[i] = gas_pressure_profile(\n md = md,\n inc = inc,\n thp = p,\n rate = q,\n gas_obj = gas_obj,\n surf_temp=surf_temp,\n temp_grad=temp_grad,\n di=d\n )\n gas_arr[i] = q\n thp_arr[i] = p\n di_arr[i] = d\n case_name = f'thp-{p}_di-{d}'\n name_list.append(case_name)\n i += 1\n\n #df = pd.DataFrame(pwf,columns=name_list,index=rate)\n arr=np.column_stack((pwf,thp_arr,di_arr))\n df = pd.DataFrame(arr,columns=['pwf','thp','di'],index=gas_arr)\n df['case'] = name_list\n df.index.name = 'gas'\n\n op = pd.DataFrame()\n if operating_point is not None:\n inflow = operating_point.df\n\n for case in df['case'].unique():\n df_case = df[df['case']==case]\n\n points, idx = intercept_curves(inflow['q'],inflow['p'],df_case.index,df_case['pwf'], n=op_n)\n\n points_df = pd.DataFrame(points[[-1],:], columns=['q','p'])\n points_df['case'] = case\n points_df['idx'] = idx\n\n op = op.append(points_df)\n \n op = op.merge(df.groupby('case').mean(), left_on='case', right_on='case')\n\n return df, op\n\n### Multiphase Pressure Gradients\n\ndef flow_regime_plot(\n ql=None, \n qg=None,\n d=2.99,\n sg_liquid = 1,\n surface_tension=30,\n ax=None,\n method = 'duns_ros',\n **kwargs\n ):\n \"\"\"\n Plot Flow Regime from Duns and Ros Flow Regime Map\n \n Coordinates extracted from Figure7-10 Duns and Ros Flow Regime Map\n https://apps.automeris.io/wpd/\n\n Petroleum Production Systems, Economides. Chapter 7 7.2.3.2. Δp KE, the Pressure Drop Due to Kinetic Energy Change. Page 84\n\n \"\"\"\n if d is not None:\n assert isinstance(d,(int,float,list,np.ndarray,pd.Series))\n d = np.atleast_1d(d)\n # Estimate Cross section Area [ft2] from diameter [in]\n a = np.power((d*0.5)/12,2)*np.pi\n\n if ql is not None:\n assert isinstance(ql,(int,float,list,np.ndarray,pd.Series))\n ql = np.atleast_1d(ql)\n\n #Liquid velocity. Convert bbl/d to ft3/s then divide area. Result velocity in ft/s\n usl = (ql * 5.616 * (1/86400))/a\n #Calculate the dimensionless numbers for each phase\n nvl = 1.938 * usl * np.power((sg_liquid*62.4)/surface_tension,0.25)\n\n if qg is not None:\n assert isinstance(ql,(int,float,list,np.ndarray,pd.Series))\n qg = np.atleast_1d(qg)\n\n #Gas velocity. Convert ft3/d to ft3/s then divide area. Result velocity in ft/s\n usg = (qg * (1/86400))/a\n nvg = 1.938 * usg * np.power((sg_liquid*62.4)/surface_tension,0.25)\n\n if method == 'duns_ros':\n fax= ax or plt.gca()\n region_1_2 = np.array([\n [1.1753722651306362, 0.1082636733874053],\n [1.1913061720030635, 0.16102620275609392],\n [1.3268047497147244, 0.23950266199874834],\n [1.4777148689707504, 0.35154183187529914],\n [1.7604108438655526, 0.5228664844415476],\n [2.1544346900318843, 0.7880462815669913],\n [2.8585141796844757, 1.2358165955824107],\n [3.545745842465605, 1.790084628235539],\n [5.529553425383406, 3.2470894518548166],\n [8.507942799627454, 5.512889788770675],\n [16.68100537200059, 11.566937549363251],\n [29.76351441631322, 20.43359717856943],\n [61.58482110660267, 39.079952122756026],\n [41.11829402435837, 27.703123342457815],\n [79.53985507023424, 48.93900918477497],\n ])\n\n region_2_t = np.array([\n [53.10631887314356, 0.10543589908346815],\n [59.146605445917515, 0.18139306939110614],\n [66.7669293918757, 0.36097012876068046],\n [80.61813527211957, 0.7674630429274295],\n [104.12232560483065, 1.5475873545578884],\n [141.92103954525945, 2.7338936055226313],\n [270.8622850933671, 5.9684569951223105],\n [204.14630347954724, 4.230939172613499],\n [340.53655850163904, 7.674630429274299],\n [503.2159359259993, 12.195704601594414],\n [714.1692874235849, 18.380944176677932],\n [922.3851039358485, 23.324701361610806],\n ])\n\n region_t_3 = np.array([\n [92.23851039358486, 0.10684043121253317],\n [97.34285811778867, 0.15475873545578891],\n [105.53385749880759, 0.24269312356542563],\n [115.96514767613999, 0.41204298882016666],\n [136.30221830031346, 0.7278953843983147],\n [183.29807108324394, 1.2358165955824107],\n [263.6650898730361, 2.271547585601246],\n [364.25331154496416, 4.120429888201667],\n [531.0631887314356, 6.995642156712631],\n [714.1692874235849, 11.264816923358868],\n [947.5632026539927, 18.139306939110632],\n ])\n\n fax.plot(region_1_2[:,0],region_1_2[:,1], color='black',linestyle='--')\n fax.plot(region_2_t[:,0],region_2_t[:,1], color='black',linestyle='--')\n fax.plot(region_t_3[:,0],region_t_3[:,1], color='black',linestyle='--')\n fax.set_ylabel('Nvl')\n fax.set_ylabel('Nvg')\n fax.set_title('Duns and Ros Flow Regime Map')\n fax.set_xlim([0.1,1000])\n fax.set_ylim([0.1,100])\n annot = kwargs.pop('ann',True)\n font = kwargs.pop('fontsize',8)\n\n if annot:\n fax.annotate(\n f\"Region I \\n Bubble Flow or \\n low-velocity slug flow\",\n xy = (0.2,0.15),\n xycoords='data',\n xytext=(0, 0), \n textcoords='offset points',\n bbox={'boxstyle':'round', 'fc':'0.8'},\n fontsize = font\n )\n\n fax.annotate(\n f\"Region II \\n High-velocity Flow or \\n churn flow\",\n xy = (2,0.15),\n xycoords='data',\n xytext=(0, 0), \n textcoords='offset points',\n bbox={'boxstyle':'round', 'fc':'0.8'},\n fontsize = font\n )\n\n fax.annotate(\n f\"Region III \\n Annular Flow Pattern\",\n xy = (300,0.15),\n xycoords='data',\n xytext=(0, 0), \n textcoords='offset points',\n bbox={'boxstyle':'round', 'fc':'0.8'},\n fontsize = font\n )\n \n if ql is not None and qg is not None:\n fax.scatter(nvg,nvl,color='blue',marker = \"^\")\n\n\n if method == 'taitel_dukler':\n fax= ax or plt.gca()\n\n region_E = np.array([\n [14.977474763452001, 0.0022033318988979545],\n [14.977474763452001, 0.006595844345274293],\n [14.977474763452001, 0.04746934676639568],\n [14.777148689707504, 0.9165263295637442],\n [14.977474763452001, 6.87270243904312],\n [14.977474763452001, 15.857064005032758]\n ])\n\n region_A = np.array([\n [0.08858667904100832, 0.0022372323125884317],\n [0.08858667904100832, 0.005091596044287256],\n [0.0986624843178949, 0.018460289732281962],\n [0.11137395078578621, 0.04142593768347061],\n [0.1326804749714725, 0.08679099331751502],\n [0.1668100537200059, 0.18431459769950134],\n [0.21256187881919958, 0.3275265038954424],\n [0.30575961084169306, 0.695276382058884],\n [0.46415888336127775, 1.2691784682206282],\n [0.7336637748600019, 2.019816384578137],\n [0.9223851039358476, 2.412109197346714]\n ])\n region_B = np.array([\n [0.028585141796844758, 3.4805610999729812],\n [0.0531063188731435, 3.5220947122633963],\n [0.08623280529014943, 3.517016970779084],\n [0.24649769667586238, 3.2292570594299215],\n [0.8978760230238888, 2.4455928433916867],\n [2.0971883035581533, 1.7556200043179786],\n [5.239601353002639, 4.20919831000811],\n [10.412232560483055, 7.572933314656229],\n [14.579502008614657, 10.657087726496014],\n ])\n region_D = np.array([\n [0.26366508987303583, 0.44861391200434203],\n [0.30575961084169306, 0.4018483957905594],\n [0.4398198780581129, 0.2288467215238852],\n [0.5032159359259996, 0.16920697751727592],\n [0.5835551032264551, 0.11058672774921392],\n [0.6676692939187563, 0.05647578739286295],\n [0.6951927961775606, 0.03743162248826758],\n [0.7536903980898542, 0.02284801683862376],\n [0.7639077845044221, 0.015565548854263186],\n [0.7436096708208817, 0.011357807043115235],\n [0.7847599703514607, 0.006933286608265855],\n [0.7536903980898542, 0.0027304200384003397],\n [0.7436096708208817, 0.002162999360197944],\n ])\n\n fax.plot(region_A[:,0],region_A[:,1], color='black',linestyle='--')\n fax.plot(region_B[:,0],region_B[:,1], color='black',linestyle='--')\n fax.plot(region_D[:,0],region_D[:,1], color='black',linestyle='--')\n fax.plot(region_E[:,0],region_E[:,1], color='black',linestyle='--')\n fax.set_ylabel('Usg [m/s]')\n fax.set_ylabel('Usl [m/s]')\n fax.set_title('Taitel-Dukler flow regime map')\n fax.set_xlim([0.01,100])\n fax.set_ylim([0.001,10])\n if ql is not None and qg is not None:\n fax.scatter(usg*0.3048,usl*0.3048,color='blue',marker = \"^\")\n\n\n fax.set_yscale('log')\n fax.set_xscale('log')\n\ndef hb_correlation(\n pressure=None, #Pressure [psi]\n temperature=None, #Temperature [F]\n liquid_rate=None, # Liquid Flow [bbl/d]\n gas_rate=None, # gas flow [kscfd]\n ten_liquid=None, #Surface tension dyne/cm2\n rho_liquid=None, # density lb/ft3\n rho_gas=None, # density lb/ft3\n mu_liquid=None, # Viscosity [cp]\n mu_gas=None, # Viscosity [cp]\n z=1, # Gas compressibility Factor\n di=None, # Diameter,\n epsilon = 0.0006,\n):\n\n \"\"\"\n The modified Hagedorn and Brown method (mH-B) is an empirical two-phase flow correlation based\n on the original work of Hagedorn and Brown (1965). The heart of the Hagedorn-Brown method is a\n correlation for liquid holdup; the modifications of the original method include using the no-slip holdup\n when the original empirical correlation predicts a liquid holdup value less than the no-slip holdup and\n the use of the Griffith correlation (Griffith and Wallis, 1961) for the bubble flow regime.\n\n Petroleum Production Systems, Economides. Chapter 7 7.4.3.1. The Modified Hagedorn and Brown Method Page 187\n\n \"\"\"\n #Check types and converto to np.ndarray\n assert isinstance(pressure,(int,float,np.ndarray,np.float64,np.int64))\n pressure = np.atleast_1d(pressure)\n\n assert isinstance(temperature,(int,float,np.ndarray,np.float64,np.int64))\n temperature = np.atleast_1d(temperature)\n\n assert isinstance(liquid_rate,(int,float,np.ndarray,np.float64,np.int64))\n liquid_rate = np.atleast_1d(liquid_rate)\n\n assert isinstance(gas_rate,(int,float,np.ndarray,np.float64,np.int64))\n gas_rate = np.atleast_1d(gas_rate)\n\n assert isinstance(ten_liquid,(int,float,np.ndarray,np.float64,np.int64))\n ten_liquid = np.atleast_1d(ten_liquid)\n\n assert isinstance(rho_liquid,(int,float,np.ndarray,np.float64,np.int64))\n rho_liquid = np.atleast_1d(rho_liquid)\n\n assert isinstance(rho_gas,(int,float,np.ndarray,np.float64,np.int64))\n rho_gas = np.atleast_1d(rho_gas)\n\n assert isinstance(mu_liquid,(int,float,np.ndarray,np.float64,np.int64))\n mu_liquid = np.atleast_1d(mu_liquid)\n\n assert isinstance(mu_gas,(int,float,np.ndarray,np.float64,np.int64))\n mu_gas = np.atleast_1d(mu_gas)\n\n assert isinstance(z,(int,float,np.ndarray,np.float64,np.int64))\n z = np.atleast_1d(z)\n\n assert isinstance(di,(int,float,np.ndarray,np.float64,np.int64))\n di = np.atleast_1d(di)\n\n assert isinstance(epsilon,(int,float,np.ndarray,np.float64,np.int64))\n epsilon = np.atleast_1d(epsilon)\n\n griffith = False\n\n area = np.power((di*0.5)/12,2)*np.pi\n usl = (liquid_rate * 5.615)/(area * 86400)\n usg = (4*gas_rate*1000*z*(460+temperature)*14.7)/(86400*pressure*520*np.pi*np.power(di/12,2)) \n \n \n #Mixure Velocity\n um = usl + usg \n lambda_g = usg / um \n lambda_l = 1 - lambda_g\n #Check if Buble flow exist\n lb = 1.071 - 0.2218 * (np.power(um,2)/(di/12))\n \n if lb < 0.13:\n lb = 0.13\n\n if lb > lambda_g:\n yl=1-0.5*(1+(um/0.8)-np.sqrt(np.power(1+(um/0.8),2)-4*(usg/0.8)))\n griffith=True\n else:\n #Calculate Dimensionless numbers\n nvl= 1.938*usl*np.power(rho_liquid/ten_liquid,0.25) #Liquid Velocity Number\n nvg=1.938*usg*np.power(rho_liquid/ten_liquid,0.25) #Gas Velocity Number\n nd=120.872*(di/12)*np.power(rho_liquid/ten_liquid,0.5) #Pipe Diameter Number\n nl=0.15726*mu_liquid*np.power(1/(rho_liquid * np.power(ten_liquid,3)),0.25) \n\n #cnl=(0.0019+0.0322*nl-0.6642*np.power(nl,2)+4.9951*np.power(nl,3))/(1+10.0147*nl-33.8696*np.power(nl,2)+277.2817*np.power(nl,3)) # original\n cnl=(0.0019+0.0505*nl-0.0929*np.power(nl,2)+0.061*np.power(nl,3)) #pengtools\n\n # H\n h = (nvl/np.power(nvg,0.575)) * np.power(pressure/14.7,0.1) * (cnl/nd)\n\n #yi/phi ratio\n yl_ratio = np.power(((0.0047+1123.32*h-729489.64*np.power(h,2))/(1+1097.1566*h-722153.97*np.power(h,2))),0.5)\n\n #B\n b = nvg * np.power(nl,0.38)/np.power(nd,2.14)\n\n #Psi calculated by equation from pengtools\n # https://wiki.pengtools.com/index.php?title=Hagedorn_and_Brown_correlation\n if b > 0.055:\n psi = 2.5714*b + 1.5962\n elif b > 0.025:\n psi = -533.33*np.power(b,2) + 58.524*b + 0.1171\n else:\n psi = 27170*np.power(b,3) - 317.52 * np.power(b,2) + 0.5472*b + 0.9999\n\n # Psi calculated from Economides\n #psi=(1.0886+69.9473*b-2334.3497*np.power(b,2)+12896.683*np.power(b,3))/(1+53.4401*b-1517.9369*np.power(b,2)+8419.8115*np.power(b,3))\n\n #yl\n yl = yl_ratio * psi\n\n if yl < lambda_l:\n yl = lambda_l\n\n # Mass flow in lb/d\n mass_flow = area * (usl * rho_liquid + usg * rho_gas) * 86400 \n\n #Reynolds Number\n nre = (2.2e-2 * mass_flow) / ((di/2) * np.power(mu_liquid,yl) * np.power(mu_gas,1-yl))\n\n #Friction Factor\n ff = np.power((1/(-4*np.log10((epsilon/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)\n\n #Average density\n rho_avg = yl*rho_liquid + (1-yl)*rho_gas\n\n if griffith:\n pressure_gradient = (1/144)*(rho_avg+((ff*np.power(mass_flow,2))/(7.413e10*np.power(di/12,5)*rho_avg*np.power(yl,2))))\n else:\n pressure_gradient = (1/144)*(rho_avg+((ff*np.power(mass_flow,2))/(7.413e10*np.power(di/12,5)*rho_avg)))\n\n return pressure_gradient\n\ndef gray_correlation(\n pressure=None, #Pressure [psi]\n temperature=None, #Temperature [F]\n liquid_rate=None, # Liquid Flow [bbl/d]\n gas_rate=None, # gas flow [kscfd]\n ten_liquid=None, #Surface tension dyne/cm2\n rho_liquid=None, # density lb/ft3\n rho_gas=None, # density lb/ft3\n mu_liquid=None, # Viscosity [cp]\n mu_gas=None, # Viscosity [cp]\n z=1, # Gas compressibility Factor\n di=None, # Diameter,\n epsilon = 0.0006,\n):\n #Check types and converto to np.ndarray\n assert isinstance(pressure,(int,float,np.ndarray,np.float64,np.int64))\n pressure = np.atleast_1d(pressure)\n\n assert isinstance(temperature,(int,float,np.ndarray,np.float64,np.int64))\n temperature = np.atleast_1d(temperature)\n\n assert isinstance(liquid_rate,(int,float,np.ndarray,np.float64,np.int64))\n liquid_rate = np.atleast_1d(liquid_rate)\n\n assert isinstance(gas_rate,(int,float,np.ndarray,np.float64,np.int64))\n gas_rate = np.atleast_1d(gas_rate)\n\n assert isinstance(ten_liquid,(int,float,np.ndarray,np.float64,np.int64))\n ten_liquid = np.atleast_1d(ten_liquid)\n\n assert isinstance(rho_liquid,(int,float,np.ndarray,np.float64,np.int64))\n rho_liquid = np.atleast_1d(rho_liquid)\n\n assert isinstance(rho_gas,(int,float,np.ndarray,np.float64,np.int64))\n rho_gas = np.atleast_1d(rho_gas)\n\n assert isinstance(mu_liquid,(int,float,np.ndarray,np.float64,np.int64))\n mu_liquid = np.atleast_1d(mu_liquid)\n\n assert isinstance(mu_gas,(int,float,np.ndarray,np.float64,np.int64))\n mu_gas = np.atleast_1d(mu_gas)\n\n assert isinstance(z,(int,float,np.ndarray,np.float64,np.int64))\n z = np.atleast_1d(z)\n\n assert isinstance(di,(int,float,np.ndarray,np.float64,np.int64))\n di = np.atleast_1d(di)\n\n assert isinstance(epsilon,(int,float,np.ndarray,np.float64,np.int64))\n epsilon = np.atleast_1d(epsilon)\n\n area = np.power((di*0.5)/12,2)*np.pi\n usl = (liquid_rate * 5.615)/(area * 86400)\n usg = (4*gas_rate*1000*z*(460+temperature)*14.7)/(86400*pressure*520*np.pi*np.power(di/12,2)) \n\n #Total velocity\n um = usl + usg\n\n #Lambda liquid\n lambda_l = usl / um\n \n # Rho m\n rho_m = lambda_l*rho_liquid + (1 - lambda_l) * rho_gas \n\n #Calculate N\n n1 = (np.power(rho_m,2)*np.power(um,4))/(32.172*6.85e-5*ten_liquid*(rho_liquid-rho_gas))\n n2 = (32.172 * np.power(di/12,2)*(rho_liquid-rho_gas))/(ten_liquid*6.85e-5)\n \n #N3\n rv = usl / usg\n n3 = 0.0814 * (1 - 0.0554 * np.log(1+((730*rv)/(rv + 1))))\n\n #Liquid Holdup\n fl = -2.314 * np.power(n1*(1+(205/n2)),n3)\n yl = 1 - (1-lambda_l)*(1 - np.exp(fl))\n\n #Rho avg\n rho_avg = yl*rho_liquid + (1-yl)*rho_gas\n\n #potential energy\n ppe = rho_avg / 144 \n\n # Absolute Roughness\n k = epsilon * (di/12)\n\n ko = (0.285*ten_liquid)/(rho_m * np.power(um,2))\n\n if rv >= 0.007:\n ke = ko\n else:\n ke = k + rv*((ko - k)/0.007) \n\n epsilon_relative = ke / (di/12)\n\n #Friction Factor\n nre = np.power(10,7)\n ff = np.power((1/(-4*np.log10((epsilon_relative/3.7065)-(5.0452/nre)*np.log10((np.power(epsilon_relative,1.1098)/2.8257)+np.power(7.149/nre,0.8981))))),2)\n\n\n #ppf\n ppf = (2*ff*rho_m*np.power(um,2))/(32.172 * (di/12) * 144)\n\n pressure_gradient = ppe + ppf\n\n return pressure_gradient\n\n\n\ndef two_phase_pressure_profile(\n depth = None,\n thp = None,\n liquid_rate = None,\n oil_rate = None,\n gas_rate = None,\n glr = None,\n gor = None,\n bsw = None,\n oil_obj = None,\n gas_obj = None,\n water_obj = None, \n epsilon=0.0006, \n surface_temperature=80, \n temperature_gradient=1, \n di=2.99, \n tol=0.02,\n max_iter = 20,\n method = 'hagedorn_brown',\n min_glr = 10\n):\n\n # Assert the right types and shapes for input\n assert isinstance(depth, (np.ndarray,pd.Series,list))\n depth = np.atleast_1d(depth)\n assert depth.ndim == 1\n\n assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n thp = np.atleast_1d(thp)\n assert thp.shape == (1,)\n\n if oil_rate is not None:\n assert isinstance(oil_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n oil_rate = np.atleast_1d(oil_rate)\n assert oil_rate.shape == (1,)\n\n if liquid_rate is not None:\n assert isinstance(liquid_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n liquid_rate = np.atleast_1d(liquid_rate)\n assert liquid_rate.shape == (1,)\n\n assert any([oil_rate is not None,liquid_rate is not None])\n\n if gas_rate is not None:\n assert isinstance(gas_rate, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n gas_rate = np.atleast_1d(gas_rate)\n assert gas_rate.shape == (1,)\n\n if gor is not None:\n assert isinstance(gor, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n gor = np.atleast_1d(gor)\n assert gor.shape == (1,)\n\n if glr is not None:\n assert isinstance(glr, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n glr = np.atleast_1d(glr)\n assert glr.shape == (1,)\n\n assert any([gas_rate is not None,gor is not None,glr is not None])\n\n assert isinstance(bsw, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n bsw = np.atleast_1d(bsw)\n assert bsw.shape == (1,)\n\n assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None\n assert isinstance(oil_obj,Oil) and oil_obj.pvt is not None\n assert isinstance(water_obj,Water) and water_obj.pvt is not None\n\n if isinstance(di,(np.ndarray,pd.Series,list)):\n di = np.atleast_1d(di)\n assert di.shape == depth.shape\n elif isinstance(di,(int,float)):\n di = np.full(depth.shape,di)\n\n assert isinstance(epsilon, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n epsilon = np.atleast_1d(epsilon)\n assert epsilon.shape == (1,)\n\n assert isinstance(surface_temperature,(int,float,np.ndarray))\n surface_temperature = np.atleast_1d(surface_temperature)\n\n assert isinstance(temperature_gradient,(int,float,np.ndarray))\n temperature_gradient = np.atleast_1d(temperature_gradient)\n\n #Start\n if liquid_rate is None:\n liquid_rate = oil_rate / (1-bsw)\n else:\n oil_rate = liquid_rate*(1-bsw)\n\n water_rate = liquid_rate * bsw \n\n if gas_rate is None:\n if gor is None:\n gas_rate = glr * liquid_rate * 1e-3\n else:\n gas_rate = gor * oil_rate * 1e-3\n\n\n pressure_profile = np.zeros(depth.shape)\n pressure_profile[0] = thp\n pressure_gradient = np.zeros(depth.shape)\n iterations = np.zeros(depth.shape)\n free_gas_rate = np.zeros(depth.shape)\n glr = np.zeros(depth.shape)\n temperature_profile = np.abs(depth[0] - depth) * (temperature_gradient/100) + surface_temperature\n\n #Initials Densities\n rho_oil_i = oil_obj.pvt.interpolate(thp,property = 'rhoo').iloc[0,0]\n rho_water_i = water_obj.pvt.interpolate(thp,property = 'rhow').iloc[0,0]\n rho_l = rho_oil_i * (1-bsw) + rho_water_i * bsw \n\n pressure_gradient[0] = rho_l * (0.433/62.4)\n for i in range(1,depth.shape[0]):\n err = tol + 0.01\n it = 0\n grad_guess = pressure_gradient[i-1]\n\n while err>= tol and it <= max_iter:\n p_guess = grad_guess * np.abs(depth[i] - depth[i-1]) + pressure_profile[i-1]\n \n #Interpolate pvt\n gas_pvt_guess = gas_obj.pvt.interpolate(p_guess)\n oil_pvt_guess = oil_obj.pvt.interpolate(p_guess)\n water_pvt_guess = water_obj.pvt.interpolate(p_guess)\n\n ten_liquid = oil_pvt_guess['tension'].iloc[0] * (1-bsw) + water_pvt_guess['tension'].iloc[0] * bsw\n rho_liquid = oil_pvt_guess['rhoo'].iloc[0] * (1-bsw) + water_pvt_guess['rhow'].iloc[0] * bsw\n mu_liquid = oil_pvt_guess['muo'].iloc[0] * (1-bsw) + water_pvt_guess['muw'].iloc[0] * bsw\n rho_gas = (28.97 * gas_obj.sg * p_guess)/(gas_pvt_guess['z'].iloc[0]*10.73*(temperature_profile[i]+460))\n mu_gas = gas_pvt_guess['mug'].iloc[0]\n z = gas_pvt_guess['z'].iloc[0]\n free_gas = gas_rate - (oil_pvt_guess['rs'].iloc[0]*oil_rate*1e-3)\n free_gas = 0 if free_gas < 0 else free_gas\n \n glr_ratio = free_gas*1e3 / liquid_rate\n if glr_ratio > 10:\n if method == 'hagedorn_brown':\n grad_new = hb_correlation(\n pressure=p_guess,\n temperature=temperature_profile[i],\n liquid_rate = liquid_rate,\n gas_rate = free_gas,\n ten_liquid = ten_liquid,\n rho_liquid = rho_liquid,\n rho_gas = rho_gas,\n mu_liquid = mu_liquid,\n mu_gas = mu_gas,\n z = z,\n di = di[i],\n epsilon = epsilon,\n )\n #elif method == 'beggs_brill':\n # grad_new = bb_correlation()\n elif method == 'gray':\n grad_new = gray_correlation(\n pressure=p_guess, #Pressure [psi]\n temperature=temperature_profile[i], #Temperature [F]\n liquid_rate=liquid_rate, # Liquid Flow [bbl/d]\n gas_rate=free_gas, # gas flow [kscfd]\n ten_liquid=ten_liquid, #Surface tension dyne/cm2\n rho_liquid=rho_liquid, # density lb/ft3\n rho_gas=rho_gas, # density lb/ft3\n mu_liquid=mu_liquid, # Viscosity [cp]\n mu_gas=mu_gas, # Viscosity [cp]\n z=z, # Gas compressibility Factor\n di=di[i], # Diameter,\n epsilon = epsilon\n )\n else:\n df, _ = one_phase_pressure_profile(\n p1=p_guess,\n ge=rho_liquid /62.4,\n epsilon=epsilon,\n md=[depth[i], depth[i-1]],\n tvd=[depth[i], depth[i-1]],\n d = [di[i], di[i]],\n rate = liquid_rate,\n mu=mu_liquid\n )\n\n grad_new = df['gradient'].iloc[-1]\n \n err = abs(grad_guess-grad_new)/grad_new\n grad_guess = grad_new\n it += 1\n\n pressure_gradient[i] = grad_new \n pressure_profile[i] = p_guess\n free_gas_rate[i] = free_gas\n glr[i] = glr_ratio\n iterations[i] = it\n\n df_dict = {\n 'pressure':pressure_profile,\n 'pressure_gradient': pressure_gradient,\n 'free_gas_rate': free_gas_rate,\n 'temperature': temperature_profile,\n 'iterations': iterations,\n 'grl': glr\n }\n\n df = pd.DataFrame(df_dict, index = depth)\n pwf = pressure_profile[-1]\n\n return df, pwf\n\ndef two_phase_upward_pressure(\n depth = None,\n pwf = None,\n liquid_rate = None,\n oil_rate = None,\n gas_rate = None,\n glr = None,\n gor = None,\n bsw = None,\n oil_obj = None,\n gas_obj = None,\n water_obj = None, \n epsilon=0.0006, \n surface_temperature=80, \n temperature_gradient=1, \n di=2.99, \n tol=0.02,\n max_iter = 20,\n method = 'hagedorn_brown',\n guess=None,\n grad_guess = [0.41,0.38]\n):\n\n if guess is None:\n grad = np.atleast_1d(grad_guess)\n delta_h = np.abs(depth[-1] - depth[0])\n guess = pwf - grad * delta_h\n else:\n assert isinstance(guess,(list,np.ndarray))\n guess = np.atleast_1d(guess)\n\n def solve(x):\n _,_pwf = two_phase_pressure_profile(\n depth = depth,\n thp = x,\n liquid_rate = liquid_rate,\n oil_rate = oil_rate,\n gas_rate = gas_rate,\n glr = glr,\n gor = gor,\n bsw = bsw,\n oil_obj = oil_obj,\n gas_obj = gas_obj,\n water_obj = water_obj, \n epsilon=epsilon, \n surface_temperature=surface_temperature,\n temperature_gradient=temperature_gradient, \n di=di, \n tol=tol,\n max_iter = max_iter,\n method = method\n )\n\n return pwf - _pwf\n\n sol = root_scalar(solve, x0=guess[0],x1=guess[1])\n\n return sol.root\n\n\ndef two_phase_outflow_curve(\n depth = None,\n thp = None,\n liquid_rate = None,\n oil_rate = None,\n gas_rate = None,\n glr = None,\n gor = None,\n bsw = None,\n oil_obj = None,\n gas_obj = None,\n water_obj = None, \n epsilon=0.0006, \n surface_temperature=80, \n temperature_gradient=1, \n di=2.99, \n tol=0.02,\n max_iter = 20,\n method = 'hagedorn_brown',\n use_gas = False,\n operating_point = None,\n op_n = 30\n):\n\n # Assert the right types and shapes for input\n assert isinstance(depth, (np.ndarray,pd.Series,list))\n depth = np.atleast_1d(depth)\n assert depth.ndim == 1\n\n assert isinstance(thp, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n thp = np.atleast_1d(thp)\n assert thp.ndim == 1\n\n if oil_rate is not None:\n assert isinstance(oil_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n oil_rate = np.atleast_1d(oil_rate)\n assert oil_rate.ndim == 1\n\n if liquid_rate is not None:\n assert isinstance(liquid_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n liquid_rate = np.atleast_1d(liquid_rate)\n assert liquid_rate.ndim == 1\n\n assert any([oil_rate is not None,liquid_rate is not None])\n\n if gas_rate is not None:\n assert isinstance(gas_rate, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n gas_rate = np.atleast_1d(gas_rate)\n assert gas_rate.ndim == 1\n\n if gor is not None:\n assert isinstance(gor, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n gor = np.atleast_1d(gor)\n assert gor.ndim == 1\n\n if glr is not None:\n assert isinstance(glr, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n glr = np.atleast_1d(glr)\n assert glr.ndim == 1\n\n assert any([gas_rate is not None,gor is not None,glr is not None])\n\n assert isinstance(bsw, (int,np.int64,np.float64,float,np.ndarray,list)), f'{type(thp)} not accepted'\n bsw = np.atleast_1d(bsw)\n assert bsw.ndim == 1\n\n assert isinstance(gas_obj,Gas) and gas_obj.pvt is not None\n assert isinstance(oil_obj,Oil) and oil_obj.pvt is not None\n assert isinstance(water_obj,Water) and water_obj.pvt is not None\n\n if isinstance(di,(np.ndarray,list)):\n di = np.atleast_2d(di)\n assert di.shape[0] == depth.shape[0]\n elif isinstance(di,(int,float)):\n di = np.full((depth.shape[0],1),di)\n\n assert isinstance(epsilon, (int,np.int64,np.float64,float,np.ndarray)), f'{type(thp)} not accepted'\n epsilon = np.atleast_1d(epsilon)\n assert epsilon.shape == (1,)\n\n assert isinstance(surface_temperature,(int,float,np.ndarray))\n surface_temperature = np.atleast_1d(surface_temperature)\n\n assert isinstance(temperature_gradient,(int,float,np.ndarray))\n temperature_gradient = np.atleast_1d(temperature_gradient)\n\n if operating_point is not None:\n if use_gas:\n assert isinstance(operating_point,GasInflow)\n else:\n assert isinstance(operating_point,OilInflow)\n\n #Start\n if liquid_rate is None:\n liquid_rate = np.zeros(len(oil_rate)*len(bsw))\n c = 0\n for o in oil_rate:\n for b in bsw:\n liquid_rate[c] = o / (1-b)\n c += 1\n else:\n oil_rate = np.zeros(len(liquid_rate)*len(bsw))\n c = 0\n for l in liquid_rate:\n for b in bsw:\n oil_rate[c] = l * (1 - b)\n c += 1\n\n if gas_rate is None:\n assert use_gas == 'False'\n if gor is None:\n gas_arr = glr\n gas_name = 'glr'\n else:\n gas_arr = gor\n gas_name = 'gor'\n else:\n gas_arr = gas_rate \n gas_name = 'gas_rate'\n\n #Estimate number of columns for 2d matrix\n number_columns = len(bsw)*len(liquid_rate)*len(thp)*di.shape[1] if use_gas else len(bsw)*len(gas_arr)*len(thp)*di.shape[1]\n\n #Create matrix for results\n #pwf = np.zeros((len(gas_arr),number_columns)) if use_gas else np.zeros((len(liquid_rate),number_columns))\n pwf = np.zeros(len(gas_arr)*number_columns) if use_gas else np.zeros(len(liquid_rate)*number_columns)\n bsw_arr = np.zeros(pwf.shape)\n liquid_arr = np.zeros(pwf.shape)\n gas_ = np.zeros(pwf.shape)\n thp_arr = np.zeros(pwf.shape)\n di_arr = np.zeros(pwf.shape)\n\n name_list = []\n i= 0\n\n c = 0\n for b in bsw:\n for l in liquid_rate:\n for pi in thp:\n for d in range(di.shape[1]):\n for g in gas_arr:\n _,pwf[i] = two_phase_pressure_profile(\n depth = depth,\n thp = pi,\n liquid_rate = l,\n oil_rate = None,\n gas_rate = g if gas_rate is not None else None,\n glr = g if glr is not None else None,\n gor = g if gor is not None else None,\n bsw = b,\n oil_obj = oil_obj,\n gas_obj = gas_obj,\n water_obj = water_obj, \n epsilon=epsilon, \n surface_temperature=surface_temperature,\n temperature_gradient=temperature_gradient, \n di=di[:,d], \n tol=tol,\n max_iter = max_iter,\n method = method\n )\n bsw_arr[i] = b\n gas_[i] = g\n liquid_arr[i] = l\n thp_arr[i] = pi\n di_arr[i] = di[:,d].mean()\n i += 1\n c += 1 \n case_name = f\"bsw_{b} liquid_{l} thp_{pi} di_{np.round(di[:,d].mean(),decimals=2)}\"\n name_list.append(case_name)\n\n if use_gas:\n arr=np.column_stack((pwf,bsw_arr,liquid_arr,thp_arr,di_arr))\n df = pd.DataFrame(arr,columns=['pwf','bsw','liquid','thp','di'],index=gas_)\n df.index.name = 'gas'\n else:\n arr=np.column_stack((pwf,bsw_arr,gas_,thp_arr,di_arr))\n df = pd.DataFrame(arr,columns=['pwf','bsw','liquid','thp','di'],index=liquid_arr)\n df.index.name = 'liquid'\n df['case'] = name_list\n\n op = pd.DataFrame()\n if operating_point is not None:\n inflow = operating_point.df\n\n for case in df['case'].unique():\n df_case = df[df['case']==case]\n\n points, idx = intercept_curves(inflow['q'],inflow['p'],df_case.index,df_case['pwf'], n=op_n)\n\n points_df = pd.DataFrame(points[[-1],:], columns=['q','p'])\n points_df['case'] = case\n points_df['idx'] = idx\n\n op = op.append(points_df)\n \n op = op.merge(df.groupby('case').mean(), left_on='case', right_on='case')\n\n return df, op\n \n\n\n" ]
[ [ "numpy.sqrt", "numpy.atleast_2d", "numpy.sign", "numpy.zeros", "matplotlib.pyplot.gca", "pandas.DataFrame", "numpy.abs", "numpy.column_stack", "numpy.exp", "numpy.atleast_1d", "numpy.power", "numpy.log", "numpy.array", "numpy.sin", "numpy.full", "numpy.linspace", "numpy.radians", "scipy.optimize.root_scalar" ] ]
pyrateml/agent
[ "84235db931d6e4ef956962961c619994898ebdd5" ]
[ "utilities/curriculum/InitialStateDistribution.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'cnheider'\nfrom collections import namedtuple\n\nimport numpy as np\n\n\nclass InitStateDistribution(object):\n StateDist = namedtuple('StateDist', ('state', 'prob'))\n\n def __init__(self):\n self.state_tuples = []\n\n def add(self, state, prob):\n self.state_tuples.append(self.StateDist(state, prob))\n\n def sample(self):\n sds = self.StateDist(*zip(*self.state_tuples))\n return np.random.choice(sds.state, p=sds.prob)\n" ]
[ [ "numpy.random.choice" ] ]
conorfalvey/Python-MultilayerExtraction
[ "68cfe9a82c45d52f36c5588e2bce83a5fc8400bb" ]
[ "python/test_init.py" ]
[ "# Testing Setup of Multilayer Extraction\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nimport math\nimport itertools as it\nfrom . import adjacency_to_edgelist\nfrom . import expectation_CM\nfrom . import initialization\nfrom . import score\nimport matplotlib.pyplot as plt\n\n# Gen default testing graph\ng1 = nx.planted_partition_graph(5, 25, 0.5, 0.05)\ngraph = nx.generators.complete_graph(9)\n# Gen adjacency matrix for complete graph\nadjacency = [[[0, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 0, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 0, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 0, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 0, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 0]]]\nprint(adjacency)\n\n# Gen edgelist from adjacency matrix\nedgelist = adjacency_to_edgelist.adjacency_to_edgelist(adjacency)\n\nprint(edgelist)\n\n# Gen Expectation.CM from edgelist\nexpectation_CM = expectation_CM.expectation_CM(edgelist)\n\nnx.draw(expectation_CM[0])\nplt.show()\n\n# Gen initialization outputs\ninitial = initialization.initialization(graph, 0.05, 1, 9)\n\nprint(initial)\n\n# Gen score\n# test_score = score.score(adjacency, initial['vertex_set'], initial['layer_set'], 9)\n\nn = 9\nvertex_set = initial['vertex_set']\nlayer_set = initial['layer_set']\nadjacency_score = expectation_CM\nsuper_mod = None\nif len(layer_set) < 1 or len(vertex_set) < 1:\n print(0)\nif len(layer_set) == 1:\n super_mod = adjacency_score[layer_set[0][0]]\nif len(layer_set) > 1:\n super_mod = nx.empty_graph(n)\n for i in range(0, layer_set):\n super_mod = nx.union(super_mod, adjacency_score[i])\n\nsuper_mod_subgraph = super_mod.subgraph(map(int, vertex_set[0]))\n\nedge_weight_tuples = nx.get_edge_attributes(super_mod_subgraph, 'weight')\nedge_weights = pd.DataFrame({'edge': list(edge_weight_tuples.keys()), 'weight': list(edge_weight_tuples.values())})\nfor _, weights in edge_weights.iterrows():\n if math.isnan(weights['weight']):\n weights['weight'] = 0\n \nmodularity_score = np.sum(edge_weights['weight'])\nmodularity_score = [0 for i in modularity_score if i < 0]\n\ntot_mod = np.sum(modularity_score)\nobs_score = (tot_mod ** 2) / ((n ** 2 * it.combinations(range(0, len(vertex_set)), 2)) * (len(layer_set)))\n\nprint(obs_score)\n\nprint(score.score(edgelist, vertex_set, layer_set, n))\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.show" ] ]
allanwright/media-classifier-core
[ "7d86c0bc4a9361d36da0f6eaf62f2faa257c2339" ]
[ "src/mccore/prediction.py" ]
[ "'''Helper methods for making classification predictions.\n\n'''\n\nimport numpy as np\n\ndef get_class(proba, labels):\n '''Gets the class label from the specified class probability estimates.\n\n Args:\n proba (array like): The estimated class probability estimates.\n labels (dictionary): The label dictionary.\n\n Returns:\n class (object): The class label and associated probability estimate.\n '''\n label_id = np.argmax(proba)\n return {\n \"label\": {\n \"id\": int(label_id),\n \"name\": labels[str(label_id)]\n },\n \"probability\": float(np.max(proba))\n }\n" ]
[ [ "numpy.max", "numpy.argmax" ] ]
rkalahasty/medicaltorch
[ "34ea15075a57271940d26684c34767a8a9e8fb58" ]
[ "medicaltorch/metrics.py" ]
[ "from collections import defaultdict\n\nfrom scipy import spatial\nimport numpy as np\n\n\nclass MetricManager(object):\n def __init__(self, metric_fns):\n self.metric_fns = metric_fns\n self.result_dict = defaultdict(float)\n self.num_samples = 0 \n \n def __call__(self, prediction, ground_truth):\n self.num_samples += len(prediction)\n for metric_fn in self.metric_fns:\n for p, gt in zip(prediction, ground_truth):\n res = metric_fn(p, gt)\n dict_key = metric_fn.__name__\n self.result_dict[dict_key] += res\n \n def get_results(self):\n res_dict = {}\n for key, val in self.result_dict.items():\n res_dict[key] = val / self.num_samples\n return res_dict\n \n def reset(self):\n self.num_samples = 0\n self.result_dict = defaultdict(float)\n \n\ndef numeric_score(prediction, groundtruth):\n \"\"\"Computation of statistical numerical scores:\n\n * FP = False Positives\n * FN = False Negatives\n * TP = True Positives\n * TN = True Negatives\n\n return: tuple (FP, FN, TP, TN)\n \"\"\"\n FP = np.float32(np.sum((prediction == 1) & (groundtruth == 0)))\n FN = np.float32(np.sum((prediction == 0) & (groundtruth == 1)))\n TP = np.float32(np.sum((prediction == 1) & (groundtruth == 1)))\n TN = np.float32(np.sum((prediction == 0) & (groundtruth == 0)))\n return FP, FN, TP, TN\n\n\ndef dice_score(prediction, groundtruth):\n pflat = prediction.flatten()\n gflat = groundtruth.flatten()\n d = (1 - spatial.distance.dice(pflat, gflat)) * 100.0\n if np.isnan(d):\n return 0.0\n return d\n\n\ndef jaccard_score(prediction, groundtruth):\n pflat = prediction.flatten()\n gflat = groundtruth.flatten()\n return (1 - spatial.distance.jaccard(pflat, gflat)) * 100.0\n\n\ndef hausdorff_score(prediction, groundtruth):\n return spatial.distance.directed_hausdorff(prediction, groundtruth)[0]\n\n\ndef precision_score(prediction, groundtruth):\n # PPV\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FP) <= 0.0:\n return 0.0\n\n precision = np.divide(TP, TP + FP)\n return precision * 100.0\n\n\ndef recall_score(prediction, groundtruth):\n # TPR, sensitivity\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FN) <= 0.0:\n return 0.0\n TPR = np.divide(TP, TP + FN)\n return TPR * 100.0\n\n\ndef specificity_score(prediction, groundtruth):\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TN + FP) <= 0.0:\n return 0.0\n TNR = np.divide(TN, TN + FP)\n return TNR * 100.0\n\n\ndef intersection_over_union(prediction, groundtruth):\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FP + FN) <= 0.0:\n return 0.0\n return TP / (TP + FP + FN) * 100.0\n\n\ndef accuracy_score(prediction, groundtruth):\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n N = FP + FN + TP + TN\n accuracy = np.divide(TP + TN, N)\n return accuracy * 100.0\n" ]
[ [ "numpy.sum", "numpy.divide", "scipy.spatial.distance.directed_hausdorff", "scipy.spatial.distance.dice", "numpy.isnan", "scipy.spatial.distance.jaccard" ] ]
dertilo/espnet
[ "4d2414b3d56154ab8c6ded0eb0a3f076e073344b" ]
[ "tools/check_install.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"Script to check whether the installation is done correctly.\"\"\"\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nimport importlib\nimport logging\nimport sys\nimport traceback\n\nfrom distutils.version import LooseVersion\n\n\n# NOTE: add the libraries which are not included in setup.py\nMANUALLY_INSTALLED_LIBRARIES = [\n (\"espnet\", None),\n (\"kaldiio\", None),\n (\"matplotlib\", None),\n (\"chainer\", (\"6.0.0\")),\n (\"chainer_ctc\", None),\n (\"warprnnt_pytorch\", (\"0.1\")),\n]\n\n# NOTE: list all torch versions which are compatible with espnet\nCOMPATIBLE_TORCH_VERSIONS = (\n \"0.4.1\",\n \"1.0.0\",\n \"1.0.1\",\n \"1.0.1.post2\",\n \"1.1.0\",\n \"1.2.0\",\n \"1.3.0\",\n \"1.3.1\",\n \"1.4.0\",\n \"1.5.0\",\n \"1.5.1\",\n \"1.6.0\",\n)\n\n\ndef main(args):\n \"\"\"Check the installation.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--no-cuda\",\n action=\"store_true\",\n default=False,\n help=\"Disable cuda-related tests\",\n )\n parser.add_argument(\n \"--no-cupy\",\n action=\"store_true\",\n default=False,\n help=\"Disable cupy test\",\n )\n args = parser.parse_args(args)\n\n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n logging.info(f\"python version = {sys.version}\")\n\n library_list = []\n if args.no_cuda:\n args.no_cupy = True\n\n if not args.no_cupy:\n library_list.append((\"cupy\", (\"6.0.0\")))\n\n # check torch installation at first\n try:\n import torch\n\n logging.info(f\"pytorch version = {torch.__version__}\")\n if torch.__version__ not in COMPATIBLE_TORCH_VERSIONS:\n logging.warning(f\"{torch.__version__} is not tested. please be careful.\")\n except ImportError:\n logging.warning(\"torch is not installed.\")\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # warpctc can be installed only for pytorch < 1.4\n if LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\"):\n library_list.append((\"warpctc_pytorch\", (\"0.1.1\", \"0.1.2\", \"0.1.3\")))\n\n library_list.extend(MANUALLY_INSTALLED_LIBRARIES)\n\n # check library availableness\n logging.info(\"library availableness check start.\")\n logging.info(\"# libraries to be checked = %d\" % len(library_list))\n is_correct_installed_list = []\n for idx, (name, version) in enumerate(library_list):\n try:\n importlib.import_module(name)\n logging.info(\"--> %s is installed.\" % name)\n is_correct_installed_list.append(True)\n except ImportError:\n logging.warning(\"--> %s is not installed.\\n###### Raw Error ######\\n%s#######################\" % (name, traceback.format_exc()))\n is_correct_installed_list.append(False)\n logging.info(\"library availableness check done.\")\n logging.info(\n \"%d / %d libraries are correctly installed.\"\n % (sum(is_correct_installed_list), len(library_list))\n )\n\n if len(library_list) != sum(is_correct_installed_list):\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check library version\n num_version_specified = sum(\n [True if v is not None else False for n, v in library_list]\n )\n logging.info(\"library version check start.\")\n logging.info(\"# libraries to be checked = %d\" % num_version_specified)\n is_correct_version_list = []\n for idx, (name, version) in enumerate(library_list):\n if version is not None:\n # Note: temp. fix for warprnnt_pytorch\n # not found version with importlib\n if name == \"warprnnt_pytorch\":\n import pkg_resources\n\n vers = pkg_resources.get_distribution(name).version\n else:\n vers = importlib.import_module(name).__version__\n if vers is not None:\n is_correct = vers in version\n if is_correct:\n logging.info(\"--> %s version is matched (%s).\" % (name, vers))\n is_correct_version_list.append(True)\n else:\n logging.warning(\n \"--> %s version is incorrect (%s is not in %s).\"\n % (name, vers, str(version))\n )\n is_correct_version_list.append(False)\n else:\n logging.info(\n \"--> %s has no version info, but version is specified.\" % name\n )\n logging.info(\"--> maybe it is better to reinstall the latest version.\")\n is_correct_version_list.append(False)\n logging.info(\"library version check done.\")\n logging.info(\n \"%d / %d libraries are correct version.\"\n % (sum(is_correct_version_list), num_version_specified)\n )\n\n if sum(is_correct_version_list) != num_version_specified:\n logging.info(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check cuda availableness\n if args.no_cuda:\n logging.info(\"cuda availableness check skipped.\")\n else:\n logging.info(\"cuda availableness check start.\")\n import chainer\n import torch\n\n try:\n assert torch.cuda.is_available()\n logging.info(\"--> cuda is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in torch.\")\n try:\n assert torch.backends.cudnn.is_available()\n logging.info(\"--> cudnn is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in torch.\")\n try:\n assert chainer.backends.cuda.available\n logging.info(\"--> cuda is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in chainer.\")\n try:\n assert chainer.backends.cuda.cudnn_enabled\n logging.info(\"--> cudnn is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in chainer.\")\n try:\n from cupy.cuda import nccl # NOQA\n\n logging.info(\"--> nccl is installed.\")\n except ImportError:\n logging.warning(\n \"--> it seems that nccl is not installed. multi-gpu is not enabled.\"\n )\n logging.warning(\n \"--> if you want to use multi-gpu, please install it and then re-setup.\"\n )\n try:\n assert torch.cuda.device_count() > 1\n logging.info(\n f\"--> multi-gpu is available (#gpus={torch.cuda.device_count()}).\"\n )\n except AssertionError:\n logging.warning(\"--> it seems that only single gpu is available.\")\n logging.warning(\"--> maybe your machine has only one gpu.\")\n logging.info(\"cuda availableness check done.\")\n\n logging.info(\"installation check is done.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" ]
[ [ "torch.cuda.is_available", "torch.backends.cudnn.is_available", "torch.cuda.device_count" ] ]
HubBucket-Team/lingvo
[ "fb929def2f27cf73a6ee1b1eaa8bee982bd92987" ]
[ "lingvo/core/base_model_test.py" ]
[ "# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for base_model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport lingvo.compat as tf\nfrom lingvo.core import base_decoder\nfrom lingvo.core import base_input_generator\nfrom lingvo.core import base_layer\nfrom lingvo.core import base_model\nfrom lingvo.core import base_model_params\nfrom lingvo.core import hyperparams\nfrom lingvo.core import layers\nfrom lingvo.core import learner\nfrom lingvo.core import py_utils\nfrom lingvo.core import task_scheduler\nfrom lingvo.core import test_utils\nimport numpy as np\nimport six\nfrom six.moves import range\n\n\nFLAGS = tf.flags.FLAGS\n\n_NUMPY_RANDOM_SEED = 9885784\n\n\nclass BaseTaskTest(test_utils.TestCase):\n\n def testStatsCounter(self):\n with self.session() as sess:\n foo = base_model.StatsCounter('foo')\n val = foo.Value()\n params = base_layer.BaseLayer.Params()\n inc = foo.IncBy(params, 100)\n\n tf.global_variables_initializer().run()\n self.assertAllEqual(0, val.eval())\n self.assertAllEqual(100, sess.run(inc))\n self.assertAllEqual(100, val.eval())\n self.assertAllEqual([100, 200], sess.run([val, inc]))\n self.assertAllEqual([200, 300], sess.run([val, inc]))\n\n @classmethod\n def TestParams(cls):\n p = base_model.BaseTask.Params()\n p.name = 'base_mdl'\n p.encoder = base_layer.BaseLayer.Params()\n p.encoder.name = 'encoder'\n p.decoder = base_decoder.BaseDecoder.Params()\n p.decoder.name = 'decoder'\n return p\n\n def testInit(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n _ = p.Instantiate()\n\n def testScaleGradients(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n var_grads = py_utils.NestedMap(a=(var_a, tf.ones_like(var_a)))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n FLAGS.enable_check_numerics = False\n with self.session():\n tf.global_variables_initializer().run()\n self.assertFalse(scaled_grads_map.has_nan_or_inf.eval())\n self.assertEqual(1.0, scaled_grads_map.grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(scaled_grads_map.final_var_grads.a[1]).eval())\n self.assertTrue(\n tf.is_finite(scaled_grads_map.final_var_grads.a[1]).eval())\n\n def testScaleGradientsInf(self):\n FLAGS.enable_check_numerics = False\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Infinite gradient.\n var_grads = py_utils.NestedMap(a=(var_a, tf.log(0.)))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n self.assertTrue(scaled_grads_map.has_nan_or_inf.eval())\n self.assertEqual(0., scaled_grads_map.grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(scaled_grads_map.final_var_grads.a[1]).eval())\n self.assertTrue(\n tf.is_finite(scaled_grads_map.final_var_grads.a[1]).eval())\n\n def testScaleGradientsNaN(self):\n FLAGS.enable_check_numerics = False\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Make a NaN gradient.\n var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n self.assertTrue(scaled_grads_map.has_nan_or_inf.eval())\n self.assertEqual(0., scaled_grads_map.grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(tf.is_nan(scaled_grads_map.final_var_grads.a[1]).eval())\n self.assertTrue(\n tf.is_finite(scaled_grads_map.final_var_grads.a[1]).eval())\n\n def testScaleGradientsCheckNumerics(self):\n \"\"\"ScaleGradients when enable_check_numerics=True.\"\"\"\n FLAGS.enable_check_numerics = True\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n # Make a NaN gradient.\n var_grads = py_utils.NestedMap(a=(var_a, 0. * tf.log(0.)))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n with self.session():\n tf.global_variables_initializer().run()\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,\n 'is not finite'):\n self.assertTrue(scaled_grads_map.has_nan_or_inf.eval())\n self.assertEqual(0., scaled_grads_map.grad_scale.eval())\n # The final gradient must be finite.\n self.assertFalse(\n tf.is_nan(scaled_grads_map.final_var_grads.a[1]).eval())\n self.assertTrue(\n tf.is_finite(scaled_grads_map.final_var_grads.a[1]).eval())\n\n def testScaleGradientsError(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n p.train.clip_gradient_single_norm_to_value = 1.0\n p.train.clip_gradient_norm_to_value = 1.0\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n var_a = task.theta.a\n var_grads = py_utils.NestedMap(a=(var_a, tf.ones_like(var_a)))\n self.assertRaises(ValueError, task.learners[0].ScaleGradients, var_grads)\n\n def testScaleGradientsSingleTensorNorm(self):\n p = self.TestParams()\n p.input = base_input_generator.BaseSequenceInputGenerator.Params()\n p.train.clip_gradient_single_norm_to_value = 1.0\n p.train.clip_gradient_norm_to_value = None\n task = p.Instantiate()\n task.CreateVariable(\n 'a',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n task.CreateVariable(\n 'b',\n py_utils.WeightParams(shape=[], init=py_utils.WeightInit.Constant(0)))\n\n var_a = task.theta.a\n var_b = task.theta.b\n var_grads = py_utils.NestedMap(\n a=(var_a, tf.ones_like(var_a) * 10.0),\n b=(var_b, tf.ones_like(var_b) * 0.5))\n scaled_grads_map = task.learners[0].ScaleGradients(var_grads)\n\n FLAGS.enable_check_numerics = False\n with self.session():\n tf.global_variables_initializer().run()\n\n # Each variable is clipped indipendently to grad scale of 1.\n self.assertAllClose(scaled_grads_map.final_var_grads.a[1].eval(), 1.0)\n self.assertAllClose(scaled_grads_map.final_var_grads.b[1].eval(), 0.5)\n\n\nclass TeacherTask(base_model.BaseTask):\n\n @base_layer.initializer\n def __init__(self, params):\n super(TeacherTask, self).__init__(params)\n p = self.params\n with tf.variable_scope(p.name):\n self.CreateVariable('x',\n py_utils.WeightParams(\n shape=[], init=py_utils.WeightInit.Constant(0)))\n\n def ComputePredictions(self, theta, input_batch):\n return theta.x\n\n\nclass StudentTask(base_model.BaseTask):\n\n @base_layer.initializer\n def __init__(self, params):\n super(StudentTask, self).__init__(params)\n p = self.params\n with tf.variable_scope(p.name):\n self.CreateVariable('x',\n py_utils.WeightParams(\n shape=[], init=py_utils.WeightInit.Uniform()))\n\n def ComputePredictions(self, theta, input_batch):\n return theta.x\n\n\nclass TestInputGenerator(base_input_generator.BaseSequenceInputGenerator):\n\n def __init__(self, params):\n super(TestInputGenerator, self).__init__(params)\n self._input_batch_size = tf.constant(1)\n\n def InputBatch(self):\n return 0\n\n\nclass DistillationTestTask(base_model.DistillationTask):\n\n @classmethod\n def Params(cls):\n p = super(DistillationTestTask, cls).Params()\n p.name = 'distillation_test'\n p.teacher = TeacherTask.Params()\n p.student = StudentTask.Params()\n p.input = TestInputGenerator.Params()\n p.train.learning_rate = 1e3\n p.teacher.train = None\n p.teacher.eval = None\n p.student.train = None\n p.student.eval = None\n return p\n\n @base_layer.initializer\n def __init__(self, params):\n super(DistillationTestTask, self).__init__(params)\n\n def ComputeLoss(self, theta, predictions, input_batch):\n return {'loss': (predictions.teacher - predictions.student, 1)}, {}\n\n\nclass DistillationTaskTest(test_utils.TestCase):\n\n def testFProp(self):\n p = DistillationTestTask.Params()\n task = p.Instantiate()\n self.assertFalse(task.params.is_eval)\n self.assertFalse(task.teacher.params.is_eval)\n self.assertIsNotNone(task.teacher.params.input)\n self.assertFalse(task.student.params.is_eval)\n self.assertIsNotNone(task.student.params.input)\n metrics = task.FPropDefaultTheta()[0]\n self.assertItemsEqual(['loss', 'num_samples_in_batch'],\n list(metrics.keys()))\n task.BProp()\n # Expected side effects of BProp().\n self.assertIsNotNone(task.train_op)\n self.assertIsNotNone(task.total_examples)\n\n with self.session() as sess:\n tf.global_variables_initializer().run()\n\n variables = {}\n values_before_training = {}\n values_after_training = {}\n for child in ('teacher', 'student'):\n variables[child] = {\n k: v\n for k, v in getattr(task, child).vars.FlattenItems()\n }\n values_before_training[child] = sess.run(variables[child])\n\n # Train for a few steps.\n for _ in range(10):\n sess.run(task.train_op)\n\n for child in ('teacher', 'student'):\n values_after_training[child] = sess.run(variables[child])\n for k, v in six.iteritems(values_after_training[child]):\n print('Comparing variable %s' % k)\n if child == 'teacher':\n # Teacher vars should not change after training.\n self.assertAllEqual(values_before_training[child][k], v)\n else:\n # Student vars should change after training.\n self.assertNotAlmostEqual(values_before_training[child][k], v)\n\n\nclass SingleTaskModelTest(test_utils.TestCase):\n\n def testInit(self):\n p = base_model.SingleTaskModel.Params()\n p.task = BaseTaskTest.TestParams()\n p.task.train.learner = (learner.Learner.Params().Set(name='loss'))\n p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()\n model = p.Instantiate()\n self.assertEqual(model.params.name, model.GetTask().params.name)\n self.assertEqual(model.params.task, model.GetTask().params)\n self.assertEqual(len(model.tasks), 1)\n self.assertEqual(model.tasks[0], model.GetTask())\n self.assertEqual(model.tasks[0], model.SampleTask(None))\n\n def testExponentialMovingAverage(self):\n p = base_model.SingleTaskModel.Params()\n p.task = BaseTaskTest.TestParams()\n p.task.input = base_input_generator.BaseSequenceInputGenerator.Params()\n p.train.ema_decay = 0.9\n model = p.Instantiate()\n model._task.CreateChild('a',\n layers.BatchNormLayer.Params().Set(name='a', dim=1))\n model._task._train_op = tf.no_op()\n model._task.ApplyExponentialMovingAverage(model.ema)\n with tf.variable_scope('', reuse=True):\n beta = tf.get_variable('a/beta/var')\n mean = tf.get_variable('a/moving_mean/var')\n self.assertIsNotNone(model.ema.average(beta))\n self.assertIsNone(model.ema.average(mean))\n\n\nclass MultiTaskModelTest(test_utils.TestCase):\n\n def testInit(self):\n p = base_model.MultiTaskModel.Params()\n p.name = 'MultiTaskModel'\n p0 = BaseTaskTest.TestParams()\n p0.train.learner = (learner.Learner.Params().Set(name='loss'))\n p1 = BaseTaskTest.TestParams()\n p1.train.learner = (learner.Learner.Params().Set(name='loss'))\n\n p.input = base_model_params.MultiTaskModelParams().Train()\n p.input.Define('a',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n p.input.Define('b',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n\n p.task_params = hyperparams.Params()\n p.task_params.Define('a', p0, '')\n p.task_params.Define('b', p1, '')\n\n p.task_probs = hyperparams.Params()\n p.task_probs.Define('a', 0.5, '')\n p.task_probs.Define('b', 0.5, '')\n\n model = p.Instantiate()\n self.assertEqual(len(model.tasks), 2)\n self.assertEqual(set(model.task_names), {'a', 'b'})\n self.assertEqual(set(model.tasks), {model.GetTask('a'), model.GetTask('b')})\n self.assertEqual(model.params.task_params.a, model.GetTask('a').params)\n self.assertEqual(model.params.task_params.b, model.GetTask('b').params)\n\n def _setUpTestSampleTask(self):\n np.random.seed(_NUMPY_RANDOM_SEED)\n\n # define and initialize tasks, model and params\n p = base_model.MultiTaskModel.Params()\n p.name = 'MultiTaskModel'\n p0 = BaseTaskTest.TestParams()\n p1 = BaseTaskTest.TestParams()\n\n p.input = base_model_params.MultiTaskModelParams().Train()\n p.input.Define('a',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n p.input.Define('b',\n base_input_generator.BaseSequenceInputGenerator.Params(), '')\n\n p.task_params = hyperparams.Params()\n p.task_params.Define('a', p0, '')\n p.task_params.Define('b', p1, '')\n\n return p\n\n def _testSampleTaskHelper(self, p):\n model = p.Instantiate()\n\n task_to_id = {model.children['a']: 'a', model.children['b']: 'b'}\n task_counts = {'a': 0, 'b': 0}\n\n # initialize tensorflow graph and global step\n with self.session() as sess:\n tf.global_variables_initializer().run()\n global_step = sess.run(model.global_step)\n for _ in range(100):\n task = model.SampleTask(global_step)\n task_counts[task_to_id[task]] += 1\n\n self.assertEqual(task_counts['a'], 83)\n self.assertEqual(task_counts['b'], 17)\n\n def testSampleTaskSpecifiedWithoutScheduler(self):\n \"\"\"Expected distribution: 'a': 0.8 , 'b': 0.2.\"\"\"\n p = self._setUpTestSampleTask()\n\n p.task_probs = hyperparams.Params()\n p.task_probs.Define('a', 0.8, '')\n p.task_probs.Define('b', 0.2, '')\n\n self._testSampleTaskHelper(p)\n\n def testSampleTask(self):\n \"\"\"Expected distribution: 'a': 0.8 , 'b': 0.2.\"\"\"\n p = self._setUpTestSampleTask()\n\n p.task_schedule = task_scheduler.ConstantScheduler.Params()\n p.task_schedule.task_probs = [('a', 0.8), ('b', 0.2)]\n\n self._testSampleTaskHelper(p)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.random.seed" ] ]
MilesCranmer/bifrost
[ "951dd4a449850d22cfd74f4db13ecf806fe5cc30" ]
[ "python/bifrost/dtype.py" ]
[ "\n# Copyright (c) 2016, The Bifrost Authors. All rights reserved.\n# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of The Bifrost Authors nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n\ni: signed integer\nu: unsigned integer\nf: floating point\nci: complex signed integer\ncu: complex unsigned integer\ncf: complex floating pointer\n\ni4: 4-bit signed integer\nf16: 16-bit floating point\nci4: 4+4-bit complex signed integer\ncf32: 32+32-bit complex floating point\n\n\"\"\"\n\nfrom libbifrost import _bf\n\nimport numpy as np\n\ndef split_name_nbit(dtype_str):\n\t\"\"\"Splits a dtype string into (name, nbit)\"\"\"\n\tfor i,char in enumerate(dtype_str):\n\t\tif char.isdigit():\n\t\t\tbreak\n\tname = dtype_str[:i]\n\tnbit = int(dtype_str[i:])\n\treturn name, nbit\n\n# Custom dtypes to represent additional complex types\nci8 = np.dtype([('re',np.int8), ('im',np.int8)])\nci16 = np.dtype([('re',np.int16), ('im',np.int16)])\nci32 = np.dtype([('re',np.int32), ('im',np.int32)])\ncf16 = np.dtype([('re',np.float16), ('im',np.float16)])\ndef to_complex64(q):\n\treal_type = q.dtype['re']\n\treturn q.view(real_type).astype(np.float32).view(np.complex64)\ndef from_complex64(f, dtype):\n\treal_type = dtype['re']\n\treturn f.view(np.float32).astype(real_type).view(dtype)\n\ndef numpy2bifrost(dtype):\n\tif dtype == np.int8: return _bf.BF_DTYPE_I8\n\telif dtype == np.int16: return _bf.BF_DTYPE_I16\n\telif dtype == np.int32: return _bf.BF_DTYPE_I32\n\telif dtype == np.uint8: return _bf.BF_DTYPE_U8\n\telif dtype == np.uint16: return _bf.BF_DTYPE_U16\n\telif dtype == np.uint32: return _bf.BF_DTYPE_U32\n\telif dtype == np.float16: return _bf.BF_DTYPE_F16\n\telif dtype == np.float32: return _bf.BF_DTYPE_F32\n\telif dtype == np.float64: return _bf.BF_DTYPE_F64\n\telif dtype == np.float128: return _bf.BF_DTYPE_F128\n\telif dtype == ci8: return _bf.BF_DTYPE_CI8\n\telif dtype == ci16: return _bf.BF_DTYPE_CI16\n\telif dtype == ci32: return _bf.BF_DTYPE_CI32\n\telif dtype == cf16: return _bf.BF_DTYPE_CF16\n\telif dtype == np.complex64: return _bf.BF_DTYPE_CF32\n\telif dtype == np.complex128: return _bf.BF_DTYPE_CF64\n\telif dtype == np.complex256: return _bf.BF_DTYPE_CF128\n\telse: raise ValueError(\"Unsupported dtype: \" + str(dtype))\n\ndef name_nbit2numpy(name, nbit):\n\tif name == 'i':\n\t\tif nbit == 8: return np.int8\n\t\telif nbit == 16: return np.int16\n\t\telif nbit == 32: return np.int32\n\t\telif nbit == 64: return np.int64\n\t\telse: raise TypeError(\"Invalid signed integer type size: %i\" % nbit)\n\telif name == 'u':\n\t\tif nbit == 8: return np.uint8\n\t\telif nbit == 16: return np.uint16\n\t\telif nbit == 32: return np.uint32\n\t\telif nbit == 64: return np.uint64\n\t\telse: raise TypeError(\"Invalid unsigned integer type size: %i\" % nbit)\n\telif name == 'f':\n\t\tif nbit == 16: return np.float16\n\t\telif nbit == 32: return np.float32\n\t\telif nbit == 64: return np.float64\n\t\telif nbit == 128: return np.float128\n\t\telse: raise TypeError(\"Invalid floating-point type size: %i\" % nbit)\n\telif name == 'ci':\n\t\tif nbit == 8: return ci8\n\t\telif nbit == 16: return ci16\n\t\telif nbit == 32: return ci32\n\t#elif name in set(['ci', 'cu']):\n\t\t## Note: This gives integer types in place of proper complex types\n\t\t#return name_nbit2numpy(name[1:], nbit*2)\n\telif name == 'cf':\n\t\tif nbit == 16: return cf16\n\t\telif nbit == 32: return np.complex64\n\t\telif nbit == 64: return np.complex128\n\t\telif nbit == 128: return np.complex256\n\t\telse: raise TypeError(\"Invalid complex floating-point type size: %i\" % nbit)\n\telse:\n\t\traise TypeError(\"Invalid type name: \" + name)\ndef string2numpy(dtype_str):\n\treturn name_nbit2numpy(*split_name_nbit(dtype_str))\n\ndef numpy2string(dtype):\n\tif dtype == np.int8: return 'i8'\n\telif dtype == np.int16: return 'i16'\n\telif dtype == np.int32: return 'i32'\n\telif dtype == np.int64: return 'i64'\n\telif dtype == np.uint8: return 'u8'\n\telif dtype == np.uint16: return 'u16'\n\telif dtype == np.uint32: return 'u32'\n\telif dtype == np.uint64: return 'u64'\n\telif dtype == np.float16: return 'f16'\n\telif dtype == np.float32: return 'f32'\n\telif dtype == np.float64: return 'f64'\n\telif dtype == np.float128: return 'f128'\n\telif dtype == np.complex64: return 'cf32'\n\telif dtype == np.complex128: return 'cf64'\n\telif dtype == np.complex256: return 'cf128'\n\telse: raise TypeError(\"Unsupported dtype: \" + str(dtype))\n" ]
[ [ "numpy.dtype" ] ]
acctouhou/Prediction_of_battery
[ "c7b1f4ccb11ddf416d1026c0a528ff2ef15eb842" ]
[ "1_Predicting/predict.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 26 00:06:46 2019\n\n@author: Acc\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom sklearn import preprocessing\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tqdm import tqdm\n\n\ndata_dir='dataset'\nmodel_dir='pretrained'\n\ndef norm(data):\n a= preprocessing.StandardScaler().fit(data)\n d=a.transform(data)\n m=a.mean_\n s=a.scale_\n return m,s\ndef mish(x):\n return x * K.tanh(K.softplus(x)) \n\ndef mae(x,y):\n return np.abs(x-y).mean()\ndef feature_selector(model,x,norm):\n normalized_data=(np.transpose(x,(0,2,1))-norm[0])/norm[1]\n return model.predict(normalized_data,batch_size=128)\ndef concat_data(x1,x2,x3):\n normalized_data=(np.array(x3)-summary_norm[0])/summary_norm[1]\n return np.hstack((x1,x2,normalized_data))\ndef re_norm(cell_feature):\n log1=[]\n log2=[]\n for i in range(len(cell_feature)):\n len_=len(cell_feature['%d'%(i)])-100\n for k in range(len_):\n for j in range(0,50,1): \n log1.append(np.float32(k))\n log2.append(np.float32(eol_data[i]-k))\n log1=np.float32(norm(np.array(log1).reshape(-1,1)))\n log2=np.float32(norm(np.array(log2).reshape(-1,1)))\n return log1,log2\ndef process2predict(cell_feature):\n x_in=[]\n y_in=[]\n for i in range(len(cell_feature)):\n col1=[]\n col2=[]\n len_=len(cell_feature['%d'%(i)])-100 \n for k in range(len_):\n for j in range(0,50,1): \n temp=cell_feature['%d'%(i)][k:(j+k+1)]\n col1.append(np.float32(np.pad(temp, ((0,50-j-1),(0,0)), 'edge')))\n col2.append(np.float32(((eol_data[i]-k))-rul_norm[0])/rul_norm[1])\n col2.append((np.float32(k)-s_norm[0])/s_norm[1])\n x_in.append(col1)\n y_in.append(col2)\n return x_in,y_in\n\n\neol_data = np.load('%s/battery_EoL.npy'%(data_dir),allow_pickle='TRUE')\nbattery_id = np.load('%s/index_battery.npy'%(data_dir),allow_pickle='TRUE')\ncharge_data=np.load('%s/charge_data.npy'%(data_dir),allow_pickle='TRUE').tolist()\ndischarge_data=np.load('%s/discharge_data.npy'%(data_dir),allow_pickle='TRUE').tolist()\nsummary_data=np.load('%s/summary_data.npy'%(data_dir),allow_pickle='TRUE').tolist()\ncharge_norm=np.load('%s/charge_norm.npy'%(data_dir),allow_pickle='TRUE').tolist()\ndischarge_norm=np.load('%s/discharge_norm.npy'%(data_dir),allow_pickle='TRUE').tolist()\nsummary_norm=np.load('%s/summary_norm.npy'%(data_dir),allow_pickle='TRUE').tolist()\nfeature_selector_ch=tf.keras.models.load_model('%s/feature_selector_ch.h5'%(model_dir), compile=False)\nfeature_selector_dis=tf.keras.models.load_model('%s/feature_selector_dis.h5'%(model_dir), compile=False,custom_objects={'mish':mish})\npredictor=tf.keras.models.load_model('%s/predictor.h5'%(model_dir), compile=False,custom_objects={'mish':mish})\nindex=np.load('%s/index_battery.npy'%(data_dir))\n\ncell_feature={}\n\n\n\nfor i in tqdm(range(len(charge_data))):\n charge_feature=feature_selector(feature_selector_ch,\n charge_data[i],charge_norm)\n discharge_feature=feature_selector(feature_selector_dis,\n discharge_data[i],discharge_norm)\n cell_feature['%d'%(i)]=concat_data(charge_feature,discharge_feature,\n summary_data[i]) \ns_norm,rul_norm=re_norm(cell_feature)\nx_in,y_in=process2predict(cell_feature,s_norm,rul_norm)\ntf.keras.backend.clear_session()\nin_x1,in_x2=[x_in[i] for i in index[17:]],[x_in[j] for j in index[:17]]\nin_x2=np.vstack(in_x2).reshape(-1,50,12)\nin_x1=np.vstack(in_x1).reshape(-1,50,12)\nin_y1,in_y2=[y_in[i] for i in index[17:]],[y_in[j] for j in index[:17]]\nin_y2=np.vstack(in_y2).reshape(-1,2)\nin_y1=np.vstack(in_y1).reshape(-1,2)\n\npredict_renorm=np.stack((rul_norm,s_norm)).reshape(2,2)\n\np1=predictor.predict(in_x1,batch_size=256)*predict_renorm[:,1]+predict_renorm[:,0]\np2=predictor.predict(in_x2,batch_size=256)*predict_renorm[:,1]+predict_renorm[:,0]\n\nans1=in_y1*predict_renorm[:,1]+predict_renorm[:,0]\nans2=in_y2*predict_renorm[:,1]+predict_renorm[:,0]\n\nprint('training_RUL_mae:%.3f'%(mae(p1[:,0],ans1[:,0])))\nprint('training_S_mae:%.3f'%(mae(p1[:,1],ans1[:,1])))\nprint('testing_RUL_mae:%.3f'%(mae(p2[:,0],ans2[:,0])))\nprint('testing_S_rmae:%.3f'%(mae(p2[:,1],ans2[:,1])))\n" ]
[ [ "numpy.vstack", "numpy.load", "numpy.transpose", "tensorflow.keras.models.load_model", "tensorflow.keras.backend.clear_session", "numpy.abs", "numpy.float32", "tensorflow.keras.backend.softplus", "numpy.hstack", "sklearn.preprocessing.StandardScaler", "numpy.stack", "numpy.pad", "numpy.array" ] ]
Ostyk/unet-plus-plus
[ "924edd8b90856650da2f040fa2ae2db6fcda18b1" ]
[ "train.py" ]
[ "import argparse\nimport os\nfrom collections import OrderedDict\nfrom glob import glob\n\nimport pandas as pd\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.optim as optim\nimport yaml\nfrom albumentations.augmentations import transforms\nfrom albumentations.core.composition import Compose, OneOf\nfrom sklearn.model_selection import train_test_split\nfrom torch.optim import lr_scheduler\nfrom tqdm import tqdm\n\nimport archs\nimport losses\nfrom dataset import Dataset\nfrom metrics import iou_score\nfrom utils import AverageMeter, str2bool\n\nARCH_NAMES = archs.__all__\nLOSS_NAMES = losses.__all__\nLOSS_NAMES.append('BCEWithLogitsLoss')\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--name', default=None,\n help='model name: (default: arch+timestamp)')\n parser.add_argument('--epochs', default=100, type=int, metavar='N',\n help='number of total epochs to run')\n parser.add_argument('-b', '--batch_size', default=16, type=int,\n metavar='N', help='mini-batch size (default: 16)')\n \n # model\n parser.add_argument('--arch', '-a', metavar='ARCH', default='NestedUNet',\n choices=ARCH_NAMES,\n help='model architecture: ' +\n ' | '.join(ARCH_NAMES) +\n ' (default: NestedUNet)')\n parser.add_argument('--deep_supervision', default=False, type=str2bool)\n parser.add_argument('--input_channels', default=3, type=int,\n help='input channels')\n parser.add_argument('--num_classes', default=1, type=int,\n help='number of classes')\n parser.add_argument('--input_w', default=384, type=int,\n help='image width')\n parser.add_argument('--input_h', default=384, type=int,\n help='image height')\n \n # loss\n parser.add_argument('--loss', default='BCEDiceLoss',\n choices=LOSS_NAMES,\n help='loss: ' +\n ' | '.join(LOSS_NAMES) +\n ' (default: BCEDiceLoss)')\n \n # dataset\n parser.add_argument('--dataset', default='dsb2018_96',\n help='dataset name')\n parser.add_argument('--img_ext', default='.png',\n help='image file extension')\n parser.add_argument('--mask_ext', default='.png',\n help='mask file extension')\n\n # optimizer\n parser.add_argument('--optimizer', default='SGD',\n choices=['Adam', 'SGD'],\n help='loss: ' +\n ' | '.join(['Adam', 'SGD']) +\n ' (default: Adam)')\n parser.add_argument('--lr', '--learning_rate', default=1e-3, type=float,\n metavar='LR', help='initial learning rate')\n parser.add_argument('--momentum', default=0.9, type=float,\n help='momentum')\n parser.add_argument('--weight_decay', default=1e-4, type=float,\n help='weight decay')\n parser.add_argument('--nesterov', default=False, type=str2bool,\n help='nesterov')\n\n # scheduler\n parser.add_argument('--scheduler', default='CosineAnnealingLR',\n choices=['CosineAnnealingLR', 'ReduceLROnPlateau', 'MultiStepLR', 'ConstantLR'])\n parser.add_argument('--min_lr', default=1e-5, type=float,\n help='minimum learning rate')\n parser.add_argument('--factor', default=0.1, type=float)\n parser.add_argument('--patience', default=2, type=int)\n parser.add_argument('--milestones', default='1,2', type=str)\n parser.add_argument('--gamma', default=2/3, type=float)\n parser.add_argument('--early_stopping', default=-1, type=int,\n metavar='N', help='early stopping (default: -1)')\n \n parser.add_argument('--num_workers', default=4, type=int)\n\n config = parser.parse_args()\n\n return config\n\n\ndef train(config, train_loader, model, criterion, optimizer):\n avg_meters = {'loss': AverageMeter(),\n 'iou': AverageMeter()}\n\n model.train()\n\n pbar = tqdm(total=len(train_loader))\n for input, target, _ in train_loader:\n input = input.cuda()\n target = target.cuda()\n\n # compute output\n if config['deep_supervision']:\n outputs = model(input)\n loss = 0\n for output in outputs:\n loss += criterion(output, target)\n loss /= len(outputs)\n iou = iou_score(outputs[-1], target)\n else:\n output = model(input)\n loss = criterion(output, target)\n iou = iou_score(output, target)\n\n # compute gradient and do optimizing step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n avg_meters['loss'].update(loss.item(), input.size(0))\n avg_meters['iou'].update(iou, input.size(0))\n\n postfix = OrderedDict([\n ('loss', avg_meters['loss'].avg),\n ('iou', avg_meters['iou'].avg),\n ])\n pbar.set_postfix(postfix)\n pbar.update(1)\n pbar.close()\n\n return OrderedDict([('loss', avg_meters['loss'].avg),\n ('iou', avg_meters['iou'].avg)])\n\n\ndef validate(config, val_loader, model, criterion):\n avg_meters = {'loss': AverageMeter(),\n 'iou': AverageMeter()}\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n pbar = tqdm(total=len(val_loader))\n for input, target, _ in val_loader:\n input = input.cuda()\n target = target.cuda()\n\n # compute output\n if config['deep_supervision']:\n outputs = model(input)\n loss = 0\n for output in outputs:\n loss += criterion(output, target)\n loss /= len(outputs)\n iou = iou_score(outputs[-1], target)\n else:\n output = model(input)\n loss = criterion(output, target)\n iou = iou_score(output, target)\n\n avg_meters['loss'].update(loss.item(), input.size(0))\n avg_meters['iou'].update(iou, input.size(0))\n\n postfix = OrderedDict([\n ('loss', avg_meters['loss'].avg),\n ('iou', avg_meters['iou'].avg),\n ])\n pbar.set_postfix(postfix)\n pbar.update(1)\n pbar.close()\n\n return OrderedDict([('loss', avg_meters['loss'].avg),\n ('iou', avg_meters['iou'].avg)])\n\n\ndef main():\n config = vars(parse_args())\n\n if config['name'] is None:\n if config['deep_supervision']:\n config['name'] = '%s_%s_wDS' % (config['dataset'], config['arch'])\n else:\n config['name'] = '%s_%s_woDS' % (config['dataset'], config['arch'])\n os.makedirs('models/%s' % config['name'], exist_ok=True)\n\n print('-' * 20)\n for key in config:\n print('%s: %s' % (key, config[key]))\n print('-' * 20)\n\n with open('models/%s/config.yml' % config['name'], 'w') as f:\n yaml.dump(config, f)\n\n # define loss function (criterion)\n if config['loss'] == 'BCEWithLogitsLoss':\n criterion = nn.BCEWithLogitsLoss().cuda()\n else:\n criterion = losses.__dict__[config['loss']]().cuda()\n\n cudnn.benchmark = True\n\n # create model\n print(\"=> creating model %s\" % config['arch'])\n model = archs.__dict__[config['arch']](config['num_classes'],\n config['input_channels'],\n config['deep_supervision'])\n\n model = model.cuda()\n\n params = filter(lambda p: p.requires_grad, model.parameters())\n if config['optimizer'] == 'Adam':\n optimizer = optim.Adam(\n params, lr=config['lr'], weight_decay=config['weight_decay'])\n elif config['optimizer'] == 'SGD':\n optimizer = optim.SGD(params, lr=config['lr'], momentum=config['momentum'],\n nesterov=config['nesterov'], weight_decay=config['weight_decay'])\n else:\n raise NotImplementedError\n\n if config['scheduler'] == 'CosineAnnealingLR':\n scheduler = lr_scheduler.CosineAnnealingLR(\n optimizer, T_max=config['epochs'], eta_min=config['min_lr'])\n elif config['scheduler'] == 'ReduceLROnPlateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=config['factor'], patience=config['patience'],\n verbose=1, min_lr=config['min_lr'])\n elif config['scheduler'] == 'MultiStepLR':\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[int(e) for e in config['milestones'].split(',')], gamma=config['gamma'])\n elif config['scheduler'] == 'ConstantLR':\n scheduler = None\n else:\n raise NotImplementedError\n\n # Data loading code\n# img_ids = glob(os.path.join('inputs', config['dataset'], 'images', '*' + config['img_ext']))\n# img_ids = [os.path.splitext(os.path.basename(p))[0] for p in img_ids]\n\n# train_img_ids, val_img_ids = train_test_split(img_ids, test_size=0.2, random_state=41)\n\n train_transform = Compose([\n transforms.RandomRotate90(),\n transforms.Flip(),\n OneOf([\n transforms.HueSaturationValue(),\n transforms.RandomBrightness(),\n transforms.RandomContrast(),\n ], p=1),\n transforms.Resize(config['input_h'], config['input_w']),\n transforms.Normalize(),\n ])\n\n val_transform = Compose([\n transforms.Resize(config['input_h'], config['input_w']),\n transforms.Normalize(),\n ])\n\n# train_dataset = Dataset(\n# img_ids=train_img_ids,\n# img_dir=os.path.join('inputs', config['dataset'], 'images'),\n# mask_dir=os.path.join('inputs', config['dataset'], 'masks'),\n# img_ext=config['img_ext'],\n# mask_ext=config['mask_ext'],\n# num_classes=config['num_classes'],\n# transform=train_transform)\n# val_dataset = Dataset(\n# img_ids=val_img_ids,\n# img_dir=os.path.join('inputs', config['dataset'], 'images'),\n# mask_dir=os.path.join('inputs', config['dataset'], 'masks'),\n# img_ext=config['img_ext'],\n# mask_ext=config['mask_ext'],\n# num_classes=config['num_classes'],\n# transform=val_transform)\n\n train_dataset = Dataset(\n root = 'inputs/'+config['dataset'],\n subset = 'train',\n num_classes=1,\n transform=train_transform)\n \n val_dataset = Dataset(\n root = 'inputs/'+config['dataset'],\n subset = 'val',\n num_classes=1,\n transform=val_transform)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=config['batch_size'],\n shuffle=True,\n num_workers=config['num_workers'],\n drop_last=True)\n val_loader = torch.utils.data.DataLoader(\n val_dataset,\n batch_size=config['batch_size'],\n shuffle=False,\n num_workers=config['num_workers'],\n drop_last=False)\n\n log = OrderedDict([\n ('epoch', []),\n ('lr', []),\n ('loss', []),\n ('iou', []),\n ('val_loss', []),\n ('val_iou', []),\n ])\n\n best_iou = 0\n trigger = 0\n for epoch in range(config['epochs']):\n print('Epoch [%d/%d]' % (epoch, config['epochs']))\n\n # train for one epoch\n train_log = train(config, train_loader, model, criterion, optimizer)\n # evaluate on validation set\n val_log = validate(config, val_loader, model, criterion)\n\n if config['scheduler'] == 'CosineAnnealingLR':\n scheduler.step()\n elif config['scheduler'] == 'ReduceLROnPlateau':\n scheduler.step(val_log['loss'])\n\n print('loss %.4f - iou %.4f - val_loss %.4f - val_iou %.4f'\n % (train_log['loss'], train_log['iou'], val_log['loss'], val_log['iou']))\n\n log['epoch'].append(epoch)\n log['lr'].append(config['lr'])\n log['loss'].append(train_log['loss'])\n log['iou'].append(train_log['iou'])\n log['val_loss'].append(val_log['loss'])\n log['val_iou'].append(val_log['iou'])\n\n pd.DataFrame(log).to_csv('models/%s/log.csv' %\n config['name'], index=False)\n\n trigger += 1\n\n if val_log['iou'] > best_iou:\n torch.save(model.state_dict(), 'models/%s/model.pth' %\n config['name'])\n best_iou = val_log['iou']\n print(\"=> saved best model\")\n trigger = 0\n\n # early stopping\n if config['early_stopping'] >= 0 and trigger >= config['early_stopping']:\n print(\"=> early stopping\")\n break\n\n torch.cuda.empty_cache()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.lr_scheduler.CosineAnnealingLR", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "torch.optim.SGD", "torch.no_grad", "pandas.DataFrame", "torch.optim.Adam", "torch.nn.BCEWithLogitsLoss", "torch.optim.lr_scheduler.ReduceLROnPlateau" ] ]
rogersheu/AllLeague-NBA-Predictions
[ "3675277e283ed48b4f0ab6a87b6403e8c29d287e" ]
[ "scripts/daily_database_update.py" ]
[ "import sqlite3\nfrom os import listdir\n\nimport pandas as pd\n\nfrom transfer_data import pick_path\n\n\ndef database_pipeline(path):\n connection = sqlite3.connect(\"./baseData/allPlayerStats.db\")\n\n cursor = connection.cursor()\n\n # See this for various ways to import CSV into sqlite using Python. Pandas used here because files are not prohibitively large.\n # https://stackoverflow.com/questions/2887878/importing-a-csv-file-into-a-sqlite3-database-table-using-python\n\n print(\"SQL scripts starting...\")\n # Drop old tables, might not be necessary since we're dropping them\n sql_file = open(\"./scripts/SQL/drop_old_tables.sql\")\n try:\n sql_as_string = sql_file.read()\n cursor.executescript(sql_as_string)\n sql_file.close()\n except Exception:\n pass\n\n # Decide whether to have user pick path or just set it automatically...\n for fileName in listdir(path):\n if fileName.endswith('.csv'): # Avoid any accidents\n df = pd.read_csv(f'{path}/{fileName}')\n df.to_sql(\n f'{fileName.replace(\".csv\",\"\").split(\"_\")[0]}', connection, if_exists='replace', index=False)\n try:\n date = f'{fileName.replace(\".csv\",\"\").split(\"_\")[1]}'\n except Exception:\n pass\n\n # Make changes to tables\n sql_file = open(\"./scripts/SQL/prep_tables_for_extraction.sql\")\n try:\n sql_as_string = sql_file.read()\n cursor.executescript(sql_as_string)\n except Exception:\n pass\n\n sql_file.close()\n\n # Extract this season's qualified players\n sql_file = open(\"./scripts/SQL/players2022_dbeaver.sql\")\n df_output = pd.read_sql_query(sql_file.read(), connection)\n sql_file.close()\n # sql_as_string = sql_file.read()\n # cursor.executescript(sql_as_string)\n print(df_output)\n df_output.to_csv(f'{path}/stats_{date}.csv', index=False)\n\n print(\"SQL scripts complete.\")\n\n\ndef main():\n data_path = pick_path()\n database_pipeline(data_path)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.read_csv" ] ]
tonyshao5/Tensorflow-up
[ "f8f8fce9436c40cad298f6211db2be3a18480bad" ]
[ "tflib/data/disk_image.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\n\nimport tensorflow as tf\nfrom tflib.data.dataset import batch_dataset, Dataset\n\n\n_N_CPU = multiprocessing.cpu_count()\n\n\ndef disk_image_batch_dataset(img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,\n map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):\n \"\"\"Disk image batch dataset.\n\n This function is suitable for jpg and png files\n\n img_paths: string list or 1-D tensor, each of which is an iamge path\n labels: label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label\n \"\"\"\n if labels is None:\n dataset = tf.data.Dataset.from_tensor_slices(img_paths)\n elif isinstance(labels, tuple):\n dataset = tf.data.Dataset.from_tensor_slices((img_paths,) + tuple(labels))\n else:\n dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels))\n\n def parse_func(path, *label):\n img = tf.read_file(path)\n img = tf.image.decode_png(img, 3)\n return (img,) + label\n\n if map_func:\n def map_func_(*args):\n return map_func(*parse_func(*args))\n else:\n map_func_ = parse_func\n\n # dataset = dataset.map(parse_func, num_parallel_calls=num_threads) is slower\n\n dataset = batch_dataset(dataset, batch_size, prefetch_batch, drop_remainder, filter,\n map_func_, num_threads, shuffle, buffer_size, repeat)\n\n return dataset\n\n\nclass DiskImageData(Dataset):\n \"\"\"DiskImageData.\n\n This function is suitable for jpg and png files\n\n img_paths: string list or 1-D tensor, each of which is an iamge path\n labels: label list or tensor, each of which is a corresponding label\n \"\"\"\n\n def __init__(self, img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,\n map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1, sess=None):\n super(DiskImageData, self).__init__()\n dataset = disk_image_batch_dataset(img_paths, batch_size, labels, prefetch_batch, drop_remainder, filter,\n map_func, num_threads, shuffle, buffer_size, repeat)\n self._bulid(dataset, sess)\n" ]
[ [ "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.image.decode_png", "tensorflow.read_file" ] ]
machanic/TangentAttack
[ "17c1a8e93f9bbd03e209e8650631af744a0ff6b8" ]
[ "adversarial_defense/model/feature_defense_model.py" ]
[ "import glob\nimport os\nimport pretrainedmodels\nimport torch\nfrom torch import nn\nfrom torchvision import models as torch_models\nimport cifar_models as models\nfrom adversarial_defense.model.denoise_resnet import DenoiseResNet50, DenoiseResNet101, DenoiseResNet152\nfrom adversarial_defense.model.pcl_resnet import PrototypeConformityLossResNet\nfrom cifar_models_myself import Conv3, DenseNet121, DenseNet169, DenseNet201, GoogLeNet, MobileNet, MobileNetV2, \\\n ResNet18, \\\n ResNet34, ResNet50, ResNet101, ResNet152, PNASNetA, PNASNetB, EfficientNetB0, DPN26, DPN92, ResNeXt29_2x64d, \\\n ResNeXt29_4x64d, ResNeXt29_8x64d, ResNeXt29_32x4d, SENet18, ShuffleNetG2, ShuffleNetG3, vgg11, vgg13, vgg16, vgg19, \\\n PreActResNet18, PreActResNet34, PreActResNet50, PreActResNet101, PreActResNet152, wideresnet28, wideresnet34, \\\n wideresnet40, carlinet, wideresnet28drop, wideresnet34drop, wideresnet40drop\nfrom cifar_models_myself.miscellaneous import Identity\nfrom config import pretrained_cifar_model_conf, IN_CHANNELS, IMAGE_SIZE, CLASS_NUM, PROJECT_PATH\nfrom cifar_models_myself.efficient_densenet import EfficientDenseNet\nfrom cifar_models_myself.ghostnet import ghost_net\nfrom tiny_imagenet_models.densenet import densenet161, densenet121, densenet169, densenet201\nfrom tiny_imagenet_models.resnext import resnext101_32x4d, resnext101_64x4d\nimport torchvision.models as vision_models\nfrom tiny_imagenet_models.inception import inception_v3\nfrom tiny_imagenet_models.wrn import tiny_imagenet_wrn\n\n\nclass FeatureDefenseModel(nn.Module):\n \"\"\"\n A StandardModel object wraps a cnn model.\n This model always accept standard image: in [0, 1] range, RGB order, un-normalized, NCHW format\n \"\"\"\n def __init__(self, dataset, arch, no_grad=True):\n super(FeatureDefenseModel, self).__init__()\n # init cnn model\n self.in_channels = IN_CHANNELS[dataset]\n self.dataset = dataset\n if \"denoise\" in arch.lower():\n # CIFAR-100@ResNet50_with_denoise_NonLocal_Filter_3.pth.tar\n trained_model_path = \"{root}/train_pytorch_model/adversarial_train/feature_denoise/{dataset}@{arch}_NonLocal_Filter_3.pth.tar\".format(root=PROJECT_PATH, dataset=dataset, arch=arch)\n assert os.path.exists(trained_model_path), \"{} does not exist!\".format(trained_model_path)\n elif dataset.startswith(\"CIFAR\"):\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/{arch}/checkpoint.pth.tar\".format(root=PROJECT_PATH, dataset=dataset, arch=arch)\n assert os.path.exists(trained_model_path), \"{} does not exist!\".format(trained_model_path)\n elif dataset == \"TinyImageNet\":\n arch = arch.replace(\"resnet-\", \"resnet\")\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}@{arch}@*.pth.tar\".format(root=PROJECT_PATH, dataset=dataset, arch=arch)\n trained_model_path_list = list(glob.glob(trained_model_path))\n assert len(trained_model_path_list)>0, \"{} does not exist!\".format(trained_model_path)\n trained_model_path = trained_model_path_list[0]\n else:\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/checkpoints/{arch}*.pth\".format(\n root=PROJECT_PATH, dataset=dataset, arch=arch)\n trained_model_path_ls = list(glob.glob(trained_model_path))\n assert trained_model_path_ls, \"{} does not exist!\".format(trained_model_path)\n trained_model_path = trained_model_path_ls[0]\n\n self.cnn = self.make_model(dataset, arch, self.in_channels, CLASS_NUM[dataset], trained_model_path=trained_model_path)\n # init cnn model meta-information\n self.mean = torch.FloatTensor(self.cnn.mean).view(1, self.in_channels, 1, 1).cuda()\n self.mean.requires_grad =True\n\n self.std = torch.FloatTensor(self.cnn.std).view(1, self.in_channels, 1, 1).cuda()\n self.std.requires_grad = True\n\n self.input_space = self.cnn.input_space # 'RGB' or 'GBR'\n self.input_range = self.cnn.input_range # [0, 1] or [0, 255]\n self.input_size = self.cnn.input_size\n self.no_grad = no_grad\n self.arch = arch\n\n @staticmethod\n def check_arch(arch, dataset):\n if dataset == \"ImageNet\":\n return arch in pretrainedmodels.__dict__\n elif dataset == \"TinyImageNet\":\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}@{arch}@*.pth.tar\".format(\n root=PROJECT_PATH, dataset=dataset, arch=arch)\n trained_model_path_list = list(glob.glob(trained_model_path))\n return len(trained_model_path_list) > 0\n else:\n trained_model_path = \"{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/{arch}*\".format(\n root=PROJECT_PATH, dataset=dataset, arch=arch)\n trained_model_path = glob.glob(trained_model_path)\n if len(trained_model_path) > 0:\n return os.path.exists(trained_model_path[0] + \"/checkpoint.pth.tar\")\n else:\n return False\n\n\n def forward(self, x):\n # assign dropout probability\n # if hasattr(self, 'drop'):\n # self.cnn.drop = self.drop\n # channel order\n if self.input_space == 'BGR':\n x = x[:, [2, 1, 0], :, :] # pytorch does not support negative stride index (::-1) yet\n # input range\n if max(self.input_range) == 255:\n x = x * 255\n # normalization\n x = (x - self.mean.type(x.dtype).to(x.device)) / self.std.type(x.dtype).to(x.device)\n if self.no_grad:\n with torch.no_grad():\n if \"pcl\" in self.arch:\n feats128, feats256, feats1024, x = self.cnn(x)\n else:\n x = self.cnn(x)\n else:\n if \"pcl\" in self.arch:\n feats128, feats256, feats1024, x = self.cnn(x)\n else:\n x = self.cnn(x)\n\n x = x.view(x.size(0), -1)\n if \"pcl\" in self.arch:\n return feats128, feats256, feats1024, x\n return x\n\n def load_weight_from_pth_checkpoint(self, model, fname):\n raw_state_dict = torch.load(fname, map_location='cpu')\n if \"state_dict\" in raw_state_dict:\n raw_state_dict = raw_state_dict[\"state_dict\"]\n state_dict = dict()\n for key, val in raw_state_dict.items():\n new_key = key.replace('module.', '')\n state_dict[new_key] = val\n model.load_state_dict(state_dict)\n\n\n\n def construct_cifar_model(self, arch, dataset, num_classes):\n if \"denoise\" not in arch.lower():\n conf = pretrained_cifar_model_conf[dataset][arch]\n arch = arch.split(\"-\")[0]\n if arch.startswith('resnext'):\n model = models.__dict__[arch](\n cardinality=conf[\"cardinality\"],\n num_classes=num_classes,\n depth=conf[\"depth\"],\n widen_factor=conf[\"widen_factor\"],\n dropRate=conf[\"drop\"],\n )\n elif arch.startswith('densenet'):\n model = models.__dict__[arch](\n num_classes=num_classes,\n depth=conf[\"depth\"],\n growthRate=conf[\"growthRate\"],\n compressionRate=conf[\"compressionRate\"],\n dropRate=conf[\"drop\"],\n )\n elif arch.startswith('wrn'):\n model = models.__dict__[arch](\n num_classes=num_classes,\n depth=conf[\"depth\"],\n widen_factor=conf[\"widen_factor\"],\n dropRate=conf[\"drop\"],\n )\n elif arch.endswith('resnet') and \"pcl_\" not in arch and \"denoise\" not in arch:\n model = models.__dict__[arch](\n num_classes=num_classes,\n depth=conf[\"depth\"],\n block_name=conf[\"block_name\"],\n )\n elif \"pcl_resnet\" in arch:\n model = PrototypeConformityLossResNet(in_channels=IN_CHANNELS[dataset], depth=conf[\"depth\"], num_classes=CLASS_NUM[dataset])\n elif arch == \"DenoiseResNet50\":\n model = DenoiseResNet50(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n elif arch == \"DenoiseResNet101\":\n model = DenoiseResNet101(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n elif arch == \"DenoiseResNet152\":\n model = DenoiseResNet152(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n else:\n model = models.__dict__[arch](num_classes=num_classes)\n return model\n\n def make_model(self, dataset, arch, in_channel, num_classes, trained_model_path=None):\n \"\"\"\n Make model, and load pre-trained weights.\n :param dataset: cifar10 or imagenet\n :param arch: arch name, e.g., alexnet_bn\n :return: model (in cpu and training mode)\n \"\"\"\n if dataset in ['CIFAR-10',\"CIFAR-100\", \"MNIST\",\"FashionMNIST\"]:\n assert trained_model_path is not None and os.path.exists(trained_model_path), \"Pretrained weight model file {} does not exist!\".format(trained_model_path)\n if arch == 'gdas':\n model = models.gdas(in_channel, num_classes)\n model.mean = [125.3 / 255, 123.0 / 255, 113.9 / 255]\n model.std = [63.0 / 255, 62.1 / 255, 66.7 / 255]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]\n elif arch == 'pyramidnet272':\n model = models.pyramidnet272(in_channel, num_classes)\n model.mean = [0.49139968, 0.48215841, 0.44653091]\n model.std = [0.24703223, 0.24348513, 0.26158784]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]\n else:\n model = self.construct_cifar_model(arch, dataset, num_classes) #\n model.mean = [0.4914, 0.4822, 0.4465]\n model.std = [0.2023, 0.1994, 0.2010]\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]\n # self.load_weight_from_pth_checkpoint(model, trained_model_path)\n elif dataset == \"TinyImageNet\":\n model = MetaLearnerModelBuilder.construct_tiny_imagenet_model(arch, dataset)\n model.input_space = 'RGB'\n model.input_range = [0, 1]\n model.mean = [0.4914, 0.4822, 0.4465] # if \"defense_resnet\" not in arch and \"denoise\" not in arch: [0,0,0] . [1,1,1]\n model.std = [0.2023, 0.1994, 0.2010]\n model.input_size = [in_channel,IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]\n # model.load_state_dict(torch.load(trained_model_path, map_location=lambda storage, location: storage)[\"state_dict\"])\n elif dataset == 'ImageNet':\n os.environ[\"TORCH_HOME\"] = \"{}/train_pytorch_model/real_image_model/ImageNet-pretrained\".format(PROJECT_PATH)\n model = pretrainedmodels.__dict__[arch](num_classes=1000, pretrained=\"imagenet\")\n return model\n\n\nclass MetaLearnerModelBuilder(object):\n @staticmethod\n def construct_cifar_model(arch, dataset):\n if arch == \"conv3\":\n network = Conv3(IN_CHANNELS[dataset], IMAGE_SIZE[dataset], CLASS_NUM[dataset])\n elif arch == \"densenet121\":\n network = DenseNet121(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"densenet169\":\n network = DenseNet169(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"densenet201\":\n network = DenseNet201(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"googlenet\":\n network = GoogLeNet(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"mobilenet\":\n network = MobileNet(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"mobilenet_v2\":\n network = MobileNetV2(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"ghost_net\":\n network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet18\":\n network = ResNet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet34\":\n network = ResNet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet50\":\n network = ResNet50(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet101\":\n network = ResNet101(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnet152\":\n network = ResNet152(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"pnasnetA\":\n network = PNASNetA(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"pnasnetB\":\n network = PNASNetB(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"efficientnet\":\n network = EfficientNetB0(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"dpn26\":\n network = DPN26(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"dpn92\":\n network = DPN92(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnext29_2\":\n network = ResNeXt29_2x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnext29_4\":\n network = ResNeXt29_4x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnext29_8\":\n network = ResNeXt29_8x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"resnext29_32\":\n network = ResNeXt29_32x4d(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"senet18\":\n network = SENet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"shufflenet_G2\":\n network = ShuffleNetG2(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"shufflenet_G3\":\n network = ShuffleNetG3(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"vgg11\":\n network = vgg11(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"vgg13\":\n network = vgg13(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"vgg16\":\n network = vgg16(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"vgg19\":\n network = vgg19(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet18\":\n network = PreActResNet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet34\":\n network = PreActResNet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet50\":\n network = PreActResNet50(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet101\":\n network = PreActResNet101(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"preactresnet152\":\n network = PreActResNet152(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet28\":\n network = wideresnet28(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet28drop\":\n network = wideresnet28drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet34\":\n network = wideresnet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet34drop\":\n network = wideresnet34drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet40\":\n network = wideresnet40(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"wideresnet40drop\":\n network = wideresnet40drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == \"carlinet\":\n network = carlinet(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch == 'efficient_densenet':\n depth = 40\n block_config = [(depth - 4) // 6 for _ in range(3)]\n network = EfficientDenseNet(IN_CHANNELS[dataset], block_config=block_config,\n num_classes=CLASS_NUM[dataset], small_inputs=dataset != \"ImageNet\", efficient=False)\n return network\n\n @staticmethod\n def construct_imagenet_model(arch, dataset):\n os.environ[\"TORCH_HOME\"] = \"{}/train_pytorch_model/real_image_model/ImageNet-pretrained\".format(PROJECT_PATH)\n if arch == 'efficient_densenet':\n depth = 40\n block_config = [(depth - 4) // 6 for _ in range(3)]\n return EfficientDenseNet(IN_CHANNELS[dataset],block_config=block_config, num_classes=CLASS_NUM[dataset], small_inputs=False, efficient=False)\n elif arch == \"ghost_net\":\n network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n return network\n model = vision_models.__dict__[arch](pretrained=False)\n return model\n\n @staticmethod\n def construct_tiny_imagenet_model(arch, dataset):\n if not arch.startswith(\"densenet\") and not arch.startswith(\"resnext\") and arch in torch_models.__dict__:\n network = torch_models.__dict__[arch](pretrained=False)\n num_classes = CLASS_NUM[dataset]\n if arch.startswith(\"resnet\"):\n num_ftrs = network.fc.in_features\n network.fc = nn.Linear(num_ftrs, num_classes)\n elif arch.startswith(\"densenet\"):\n if arch == \"densenet161\":\n network = densenet161(pretrained=False)\n elif arch == \"densenet121\":\n network = densenet121(pretrained=False)\n elif arch == \"densenet169\":\n network = densenet169(pretrained=False)\n elif arch == \"densenet201\":\n network = densenet201(pretrained=False)\n elif arch == \"resnext32_4\":\n network = resnext101_32x4d(pretrained=None)\n elif arch == \"resnext64_4\":\n network = resnext101_64x4d(pretrained=None)\n elif arch == \"ghost_net\":\n network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])\n elif arch.startswith(\"inception\"):\n network = inception_v3(pretrained=False)\n elif arch == \"WRN-28-10-drop\":\n network = tiny_imagenet_wrn(in_channels=IN_CHANNELS[dataset],depth=28,num_classes=CLASS_NUM[dataset],widen_factor=10, dropRate=0.3)\n elif arch == \"WRN-40-10-drop\":\n network = tiny_imagenet_wrn(in_channels=IN_CHANNELS[dataset], depth=40, num_classes=CLASS_NUM[dataset],\n widen_factor=10, dropRate=0.3)\n elif arch.startswith(\"vgg\"):\n network.avgpool = Identity()\n network.classifier[0] = nn.Linear(512 * 2 * 2, 4096) # 64 /2**5 = 2\n network.classifier[-1] = nn.Linear(4096, num_classes)\n elif \"pcl_resnet\" in arch:\n network = PrototypeConformityLossResNet(in_channels=IN_CHANNELS[dataset], depth=pretrained_cifar_model_conf[dataset][arch][\"depth\"], num_classes=CLASS_NUM[dataset])\n elif arch == \"DenoiseResNet50\":\n network = DenoiseResNet50(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n elif arch == \"DenoiseResNet101\":\n network = DenoiseResNet101(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n elif arch == \"DenoiseResNet152\":\n network = DenoiseResNet152(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)\n return network\n\n\n\n" ]
[ [ "torch.FloatTensor", "torch.no_grad", "torch.nn.Linear", "torch.load" ] ]
miksu/edward2
[ "973acdb23701f320ebaee8a56fc44d4414acfa4e" ]
[ "edward2/tensorflow/initializers.py" ]
[ "# coding=utf-8\n# Copyright 2019 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Initializers.\n\nThis module extends `tf.keras.initializers` with the notion of \"trainable\ninitializers\", where initializers to weights and biases in `tf.keras.layers` may\nthemselves carry parameters. For example, consider a weight initializer which\nreturns a variational distribution: this is reified as an `ed.RandomVariable`\nparameterized by `tf.Variables`.\n\nOne subtlety is how `tf.keras.constraints` are used on the parameters of\ntrainable initializers. Typically, Keras constraints are used with projected\ngradient descent, where one performs unconstrained optimization and then applies\na projection (the constraint) after each gradient update. To stay in line with\nprobabilistic literature, trainable initializers apply constraints on the\n`tf.Variables` themselves (i.e., a constrained parameterization) and do not\napply projections during optimization.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nfrom edward2.tensorflow import constraints\nfrom edward2.tensorflow import generated_random_variables\nfrom edward2.tensorflow import regularizers\nimport six\nimport tensorflow.compat.v1 as tf1\nimport tensorflow.compat.v2 as tf\nimport tensorflow_probability as tfp\n\n\n# From `tensorflow/python/ops/init_ops.py`\ndef _compute_fans(shape):\n \"\"\"Computes the number of input and output units for a weight shape.\n\n Args:\n shape: Integer shape tuple or TF tensor shape.\n\n Returns:\n A tuple of scalars (fan_in, fan_out).\n \"\"\"\n if len(shape) < 1: # Just to avoid errors for constants.\n fan_in = fan_out = 1\n elif len(shape) == 1:\n fan_in = fan_out = shape[0]\n elif len(shape) == 2:\n fan_in = shape[0]\n fan_out = shape[1]\n else:\n # Assuming convolution kernels (2D, 3D, or more).\n # kernel shape: (..., input_depth, depth)\n receptive_field_size = 1.\n for dim in shape[:-2]:\n receptive_field_size *= dim\n fan_in = shape[-2] * receptive_field_size\n fan_out = shape[-1] * receptive_field_size\n if isinstance(fan_in, tf1.Dimension):\n fan_in = fan_in.value\n if isinstance(fan_out, tf1.Dimension):\n fan_out = fan_out.value\n return fan_in, fan_out\n\n\nclass ScaledNormalStdDev(tf.keras.initializers.VarianceScaling):\n \"\"\"Initializer capable of adapting its scale to the shape of weights tensors.\n\n This initializes the standard deviation parameter of a Trainable Normal\n distribution with a scale based on the shape of the weights tensor.\n Additionally, A small amount of noise will be added to break weigh symmetry.\n\n With `distribution=\"truncated_normal\" or \"untruncated_normal\"`, the standard\n deviation (after truncation, if used) is `stddev = sqrt(scale / n)`, where n\n is:\n - number of input units in the weight tensor, if mode = \"fan_in\"\n - number of output units, if mode = \"fan_out\"\n - average of the numbers of input and output units, if mode = \"fan_avg\"\n \"\"\"\n\n def __init__(self,\n scale=1.0,\n mode='fan_in',\n distribution='untruncated_normal',\n seed=None):\n \"\"\"Constructs the initializer.\n\n Args:\n scale: Scaling factor (positive float).\n mode: One of \"fan_in\", \"fan_out\", \"fan_avg\".\n distribution: Random distribution to use. One of \"truncated_normal\", or\n \"untruncated_normal\".\n seed: A Python integer. Used to create random seeds. See\n `tf.set_random_seed`\n for behavior.\n\n Raises:\n ValueError: In case of an invalid value for the \"scale\", mode\" or\n \"distribution\" arguments.\n \"\"\"\n distribution = distribution.lower()\n if distribution not in {'truncated_normal', 'untruncated_normal'}:\n raise ValueError('Invalid `distribution` argument:', distribution)\n super(ScaledNormalStdDev, self).__init__(scale=scale, mode=mode,\n distribution=distribution,\n seed=seed)\n\n def __call__(self, shape, dtype=None):\n if dtype is None:\n dtype = self.dtype\n scale = self.scale\n scale_shape = shape\n fan_in, fan_out = _compute_fans(scale_shape)\n if self.mode == 'fan_in':\n scale /= max(1., fan_in)\n elif self.mode == 'fan_out':\n scale /= max(1., fan_out)\n else:\n scale /= max(1., (fan_in + fan_out) / 2.)\n if self.distribution == 'truncated_normal':\n # constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)\n stddev = math.sqrt(scale) / .87962566103423978\n else: # self.distribution == 'untruncated_normal':\n stddev = math.sqrt(scale)\n return tf.random.truncated_normal(shape, mean=stddev, stddev=stddev*0.1,\n dtype=dtype, seed=self.seed)\n\n\nclass TrainableHalfCauchy(tf.keras.layers.Layer):\n \"\"\"Half-Cauchy distribution initializer with trainable parameters.\"\"\"\n\n def __init__(self,\n loc_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=1e-5),\n scale_initializer=tf.keras.initializers.TruncatedNormal(\n mean=-3., stddev=0.1),\n loc_regularizer=None,\n scale_regularizer=None,\n loc_constraint=None,\n scale_constraint='softplus',\n seed=None,\n **kwargs):\n \"\"\"Constructs the initializer.\"\"\"\n super(TrainableHalfCauchy, self).__init__(**kwargs)\n self.loc_initializer = get(loc_initializer)\n self.scale_initializer = get(scale_initializer)\n self.loc_regularizer = regularizers.get(loc_regularizer)\n self.scale_regularizer = regularizers.get(scale_regularizer)\n self.loc_constraint = constraints.get(loc_constraint)\n self.scale_constraint = constraints.get(scale_constraint)\n self.seed = seed\n\n def build(self, shape, dtype=None):\n if dtype is None:\n dtype = self.dtype\n\n self.loc = self.add_weight(\n 'loc',\n shape=shape,\n initializer=self.loc_initializer,\n regularizer=self.loc_regularizer,\n constraint=None,\n dtype=dtype,\n trainable=True)\n self.scale = self.add_weight(\n 'scale',\n shape=shape,\n initializer=self.scale_initializer,\n regularizer=self.scale_regularizer,\n constraint=None,\n dtype=dtype,\n trainable=True)\n self.built = True\n\n def __call__(self, shape, dtype=None):\n if not self.built:\n self.build(shape, dtype)\n loc = self.loc\n if self.loc_constraint:\n loc = self.loc_constraint(loc)\n scale = self.scale\n if self.scale_constraint:\n scale = self.scale_constraint(scale)\n return generated_random_variables.Independent(\n generated_random_variables.HalfCauchy(loc=loc,\n scale=scale).distribution,\n reinterpreted_batch_ndims=len(shape))\n\n def get_config(self):\n return {\n 'loc_initializer':\n serialize(self.loc_initializer),\n 'scale_initializer':\n serialize(self.scale_initializer),\n 'loc_regularizer':\n regularizers.serialize(self.loc_regularizer),\n 'scale_regularizer':\n regularizers.serialize(self.scale_regularizer),\n 'loc_constraint':\n constraints.serialize(self.loc_constraint),\n 'scale_constraint':\n constraints.serialize(self.scale_constraint),\n 'seed': self.seed,\n }\n\n\nclass TrainableNormal(tf.keras.layers.Layer):\n \"\"\"Random normal op as an initializer with trainable mean and stddev.\"\"\"\n\n def __init__(self,\n mean_initializer=tf.keras.initializers.TruncatedNormal(\n stddev=1e-5),\n stddev_initializer=tf.keras.initializers.TruncatedNormal(\n mean=-3., stddev=0.1),\n mean_regularizer=None,\n stddev_regularizer=None,\n mean_constraint=None,\n stddev_constraint='softplus',\n seed=None,\n **kwargs):\n \"\"\"Constructs the initializer.\"\"\"\n super(TrainableNormal, self).__init__(**kwargs)\n self.mean_initializer = get(mean_initializer)\n self.stddev_initializer = get(stddev_initializer)\n self.mean_regularizer = regularizers.get(mean_regularizer)\n self.stddev_regularizer = regularizers.get(stddev_regularizer)\n self.mean_constraint = constraints.get(mean_constraint)\n self.stddev_constraint = constraints.get(stddev_constraint)\n self.seed = seed\n\n def build(self, shape, dtype=None):\n if dtype is None:\n dtype = self.dtype\n\n self.mean = self.add_weight(\n 'mean',\n shape=shape,\n initializer=self.mean_initializer,\n regularizer=self.mean_regularizer,\n constraint=None,\n dtype=dtype,\n trainable=True)\n self.stddev = self.add_weight(\n 'stddev',\n shape=shape,\n initializer=self.stddev_initializer,\n regularizer=self.stddev_regularizer,\n constraint=None,\n dtype=dtype,\n trainable=True)\n self.built = True\n\n def __call__(self, shape, dtype=None):\n if not self.built:\n self.build(shape, dtype)\n mean = self.mean\n if self.mean_constraint:\n mean = self.mean_constraint(mean)\n stddev = self.stddev\n if self.stddev_constraint:\n stddev = self.stddev_constraint(stddev)\n return generated_random_variables.Independent(\n generated_random_variables.Normal(loc=mean, scale=stddev).distribution,\n reinterpreted_batch_ndims=len(shape))\n\n def get_config(self):\n return {\n 'mean_initializer':\n serialize(self.mean_initializer),\n 'stddev_initializer':\n serialize(self.stddev_initializer),\n 'mean_regularizer':\n regularizers.serialize(self.mean_regularizer),\n 'stddev_regularizer':\n regularizers.serialize(self.stddev_regularizer),\n 'mean_constraint':\n constraints.serialize(self.mean_constraint),\n 'stddev_constraint':\n constraints.serialize(self.stddev_constraint),\n 'seed': self.seed,\n }\n\n\nclass TrainableHeNormal(TrainableNormal):\n \"\"\"Trainable normal initialized per He et al. 2015, given a ReLU nonlinearity.\n\n The distribution is initialized to a Normal scaled by `sqrt(2 / fan_in)`,\n where `fan_in` is the number of input units. A ReLU nonlinearity is assumed\n for this initialization scheme.\n\n References:\n He K, Zhang X, Ren S, Sun J. Delving deep into rectifiers: Surpassing\n human-level performance on imagenet classification. In Proceedings of the\n IEEE international conference on computer vision 2015 (pp. 1026-1034).\n https://arxiv.org/abs/1502.01852\n \"\"\"\n\n def __init__(self, seed=None):\n super(TrainableHeNormal, self).__init__(\n mean_initializer=tf.keras.initializers.he_normal(seed),\n seed=seed)\n\n def get_config(self):\n return {\n 'seed': self.seed,\n }\n\n\nclass TrainableGlorotNormal(TrainableNormal):\n \"\"\"Trainable normal initialized per Glorot and Bengio, 2010.\n\n The distribution is initialized to a Normal scaled by `sqrt(2 / fan_in +\n fan_out)`, where `fan_in` is the number of input units and `fan_out` is the\n number of output units.\n\n References:\n Glorot X, Bengio Y. Understanding the difficulty of training deep\n feedforward neural networks. In Proceedings of the thirteenth international\n conference on artificial intelligence and statistics 2010 Mar 31 (pp.\n 249-256). http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf\n \"\"\"\n\n def __init__(self, seed=None):\n super(TrainableGlorotNormal, self).__init__(\n mean_initializer=tf.keras.initializers.GlorotNormal(seed),\n seed=seed)\n\n def get_config(self):\n return {\n 'seed': self.seed,\n }\n\n\nclass RandomSign(tf.keras.initializers.Initializer):\n \"\"\"Initializer that generates tensors initialized to +/- 1.\n\n Attributes:\n probs: probability of +1.\n dtype: tensorflow dtype.\n seed: A Python integer. Used to create random seeds. See\n `tf.set_random_seed`\n \"\"\"\n\n def __init__(self, probs=1.0, seed=None, dtype=tf.float32):\n self.probs = probs\n self.seed = seed\n self.dtype = dtype\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n bernoulli = tfp.distributions.Bernoulli(probs=self.probs,\n dtype=dtype)\n return 2. * bernoulli.sample(shape, self.seed) - 1.\n\n def get_config(self):\n return {\n 'dtype': self.dtype.name,\n 'seed': self.seed,\n 'probs': self.probs\n }\n\n\n# Compatibility aliases, following tf.keras\n\n# pylint: disable=invalid-name\nscaled_normal_std_dev = ScaledNormalStdDev\ntrainable_half_cauchy = TrainableHalfCauchy\ntrainable_normal = TrainableNormal\ntrainable_he_normal = TrainableHeNormal\ntrainable_glorot_normal = TrainableGlorotNormal\nrandom_sign = RandomSign\n# pylint: enable=invalid-name\n\n# Utility functions, following tf.keras\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(config, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n config,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name='initializers')\n\n\ndef get(identifier, value=None):\n \"\"\"Getter for loading from strings; falls back to Keras as needed.\"\"\"\n if value is None:\n value = identifier\n if identifier is None:\n return None\n elif isinstance(identifier, dict):\n try:\n return deserialize(identifier)\n except ValueError:\n pass\n elif isinstance(identifier, six.string_types):\n config = {'class_name': str(identifier), 'config': {}}\n try:\n return deserialize(config)\n except ValueError:\n pass\n elif callable(identifier):\n return identifier\n return tf.keras.initializers.get(value)\n" ]
[ [ "tensorflow.compat.v2.keras.initializers.he_normal", "tensorflow.compat.v2.keras.utils.serialize_keras_object", "tensorflow.compat.v2.keras.initializers.get", "tensorflow.compat.v2.keras.initializers.GlorotNormal", "tensorflow.compat.v2.random.truncated_normal", "tensorflow.compat.v2.keras.initializers.TruncatedNormal" ] ]
3778/icd-prediction-mimic
[ "fb8dfc3140e6cf690690b04eddc735f4f20612cf" ]
[ "MIMIC_train_baselines.py" ]
[ "# Copyright 2020, 37.78 Tecnologia Ltda.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n## Train Logistic Regression and Constant models\n\nimport argparse\nimport tensorflow as tf\n\nfrom constants import DATA_DIR, SAVE_DIR\nimport datasets\nimport feature_extraction as fx\nimport model_functions as fun\nimport utils\n\ndef main(args):\n\n save_path = SAVE_DIR + args.MODEL_NAME\n\n # Clear session\n tf.keras.backend.clear_session()\n\n # Load data and embeddings\n mimic = datasets.MIMIC_Dataset()\n mimic.load_preprocessed()\n mimic.split()\n\n # Get model class\n model = utils.get_model(args)\n\n if args.MODEL_NAME == 'lr':\n tfidf = fx.TFIDF(args)\n tfidf.fit(mimic)\n tfidf.transform(mimic)\n\n # Instantiate callback\n f1_callback = fun.f1_callback_save(model, validation_data=(tfidf.x_val, mimic.y_val),\n best_name= save_path)\n\n callbacks = [f1_callback] \n\n \n # Fit\n model.fit(tfidf.x_train, mimic.y_train, validation_data=(tfidf.x_val, mimic.y_val), callbacks=callbacks)\n\n\n # Save model state after last epoch\n if args.save_last_epoch:\n model.save_model(f'{save_path}ep{args.epochs}')\n\n # Restore weights from the best epoch based on F1 val with optimized threshold\n model = utils.get_model(args, load_path = save_path)\n\n # Predict\n y_pred_train = model.predict(tfidf.x_train)\n y_pred_val = model.predict(tfidf.x_val)\n y_pred_test = model.predict(tfidf.x_test)\n\n\n exp = fun.Experiments(y_true = [mimic.y_train, mimic.y_val, mimic.y_test],\n y_pred = [y_pred_train, y_pred_val, y_pred_test])\n\n # Compute best threshold\n exp.sweep_thresholds(subset=[0,1,0])\n\n print(f'''\n Metrics @ {exp.sweep_results['best_threshold']}''')\n # Compute metrics @ best threshold\n exp.metrics(threshold=exp.sweep_results['best_threshold']) \n\n\n elif args.MODEL_NAME == 'cte':\n\n # model.fit(mimic.y_train, most_occ_train=mimic.all_icds_train)\n model.fit(most_occ_train=mimic.all_icds_train) \n\n # Predict\n y_pred_train = model.predict(mimic.x_train, mlb=mimic.mlb)\n y_pred_val = model.predict(mimic.x_val, mlb=mimic.mlb)\n y_pred_test = model.predict(mimic.x_test, mlb=mimic.mlb)\n\n exp = fun.Experiments(y_true = [mimic.y_train, mimic.y_val, mimic.y_test],\n y_pred = [y_pred_train, y_pred_val, y_pred_test])\n\n print(f\"\"\"\n Metrics @ {args.k}\"\"\")\n # Compute metrics @ k\n exp.metrics(k=args.k) \n\n\n\ndef arg_parser():\n\n parser = argparse.ArgumentParser(description='Train model for MIMIC-III dataset and compute metrics.')\n parser.add_argument('-model', type=str, dest='MODEL_NAME', choices=['lr', 'cte'], default = 'lr',help='Model for training.')\n parser.add_argument('-epochs', type=int, dest='epochs', default=10, help='Number of epochs.')\n parser.add_argument('-tfidf_maxfeatures', type=int, dest='max_features', default=20000, help='Max features for TF-IDF.')\n parser.add_argument('-batch_size', type=int, dest='batch_size', default=32, help='Batch Size.')\n parser.add_argument('-lr', type=float, dest='lr', default=0, help='Learning Rate. 0 for article optimized value.')\n parser.add_argument('-k', type=int, dest='k', default=15, help='Fixed k-size of predictions for Constant Model.')\n parser.add_argument('-save_last_epoch', type=bool, dest='save_last_epoch', default=False, help='Also save model state at last epoch (additionally to best epoch)')\n parser.add_argument('--verbose', type=int, dest='verbose', default=2, help='Verbose when training.')\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n\n args = arg_parser()\n\n main(args)" ]
[ [ "tensorflow.keras.backend.clear_session" ] ]
rosydavis/rdavis_msee_project_csun2017
[ "d23159d19b5b3ea47ddd4a0f9684477346560fc2" ]
[ "move_images.py" ]
[ "# File: move_images.py\n# Author: Rosy Davis, [email protected]\n# Last modified: 2017 Nov. 28\n#\n# A utility script to copy DWT images from a folder that keeps them placed by file name\n# (as is true of the source MP3s in the FMA dataset) to folders that split them by dataset\n# split (test, train, val) and genre (folk, hip-hop, et cetera). \n#\n# Note that this does not move the source files, but instead copies them. Wavelet image\n# files are small, and this ensures that the source images remain in place so they can be\n# reused. For example, for the FMA dataset, which has three differently-sized subsets, any\n# training image in the \"small\" dataset will also appear as a training image in the\n# \"large\" dataset. By copying instead of moving, the source image will remain at the path\n# equivalent to the path for the source audio, and can be reused if it is desirable to \n# work with both the small and the large datasets.\n\n# Parse passed-in arguments:\nimport argparse\n\n# File system utilities:\nimport os\nimport shutil\n\n# Used for error checking:\nimport numpy as np\n\n# FMA dataset utilities\nimport fma.utils as fma_utils # Utilities provided for loading and manipulating the\n\t\t\t\t\t\t\t\t\t # Free Music Archive dataset.\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"input_dir\", \n\t\t\t\t\thelp = \"Directory of images currently stored at FMA-style paths.\")\nparser.add_argument(\"output_dir\", \n\t\t\t\t\thelp = \"Directory of images to be saved in a by-class hierarchy.\")\nparser.add_argument(\"-z\", \"--size\", \n\t\t\t\t\thelp = \"Specify the dataset size to use\",\n\t\t\t\t\tchoices = [\"small\", \"medium\", \"large\"])\nparser.add_argument(\"-s\", \"--split\", \n\t\t\t\t\thelp = \"Specify the split to use\",\n\t\t\t\t\tchoices = [\"training\", \"validation\", \"test\"])\nparser.add_argument(\"-w\", \"--wavelet\", \n\t\t\t\t\thelp = \"Specify the wavelet type to use\",\n\t\t\t\t\tchoices = [\"dwt\", \"cwt\"])\n\n# By default, generate training data for small dataset:\nrequested_subset = \"small\"\nrequested_split = \"training\"\nrequested_wavelet = \"dwt\"\n\n# Override as necessary from arguments:\nargs = parser.parse_args()\ninput_dir = os.path.join(args.input_dir, '')\noutput_dir = os.path.join(args.output_dir, '')\nif args.size:\n\trequested_subset = args.size\nif args.split:\n\trequested_split = args.split\nif args.wavelet:\n\trequested_wavelet = args.wavelet\n\nif requested_split == \"training\":\n\trequested_split_path = \"train\"\nelif requested_split == \"validation\":\n\trequested_split_path = \"validation\"\nelif requested_split == \"test\":\n\trequested_split_path = \"test\"\n\n\n\n\n\n# Load the metadata files\ntracks = fma_utils.load(input_dir + 'tracks.csv')\nfeatures = fma_utils.load(input_dir + 'features.csv')\n\n# Make sure everything in features is in tracks and vice versa\nnp.testing.assert_array_equal(features.index, tracks.index)\n\n# Use the specified data subset:\nsubset = tracks['set', 'subset'] <= requested_subset\nsplit = tracks['set', 'split'] == requested_split\nrel_track_ids = tracks.loc[subset & split].index\n\ny_values = tracks.loc[subset & split, ('track', 'genre_top')]\nunique_genres = y_values.unique().categories\n\n\n\n\n\n\n# Copy files:\nfor track_id in rel_track_ids:\n\ttry:\n\t\ty_str = y_values.loc[track_id].lower()\n\texcept:\n\t\t# print(\"Skipping {}; bad genre...\".format(track_id))\n\t\tcontinue\n\t\n\ttrackstr = \"{:06d}\".format(track_id)\n\n\ttry:\n\t\tcurr_path = os.path.join(input_dir, \n\t\t\t\t\t\tos.path.join(requested_wavelet,\n\t\t\t\t\t\t\tos.path.join(\"noframe\",\n\t\t\t\t\t\t\t\tos.path.join(trackstr[0:3],\n\t\t\t\t\t\t\t\t\t\t\t \"{}_small.png\".format(trackstr)))))\n\t\tassert(os.path.isfile(curr_path))\n\texcept:\n\t\t# print(\"Skipping {}; file '{}' not found...\".format(track_id, curr_path))\n\t\tcontinue\n\t# print(curr_path) \n\t\n\tnew_path = os.path.join(output_dir, \n\t\t\t\t\tos.path.join(\"byclass\",\n\t\t\t\t\t\t os.path.join(requested_subset,\n\t\t\t\t\t\t\t os.path.join(requested_wavelet,\n\t\t\t\t\t\t\t\t os.path.join(requested_split_path, \n\t\t\t\t\t\t\t\t\t os.path.join(y_str, \"{}.png\".format(trackstr)))))))\n\t# print(new_path) \n\t\n\tdirectory = os.path.dirname(new_path)\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\n\tshutil.copyfile(curr_path, new_path)\n\t\t" ]
[ [ "numpy.testing.assert_array_equal" ] ]
ari-s/XpyY
[ "384500b8112a4475f2df3e736f324ab8724f66c4" ]
[ "inputfilter/csv.py" ]
[ "import numpy,csv\n\ndef csv(infile,delimiter=','):\n '''reads csv with arbitrary delimiter, returns numpy array of strings'''\n with open(infile) as f:\n rv = [ l.strip().split(delimiter) for l in f\n if l.strip() # no empty lines\n and not l.startswith('#') # no comments\n ]\n width = max(map(len,rv)) # make array rectangular\n for l in rv:\n for i in range(len(l),width):\n l.append('')\n return numpy.array(rv).transpose()\n\ndef tsv(infile):\n return csv(infile,'\\t')\n" ]
[ [ "numpy.array" ] ]
jaideepmurkute/Active-Learning---Supervised-Machine-Learning-With-Minimal-Data
[ "ba3f4e471b0a01d87848f5153f2d9f79c0eff6b1" ]
[ "mnist_fashion_lc.py" ]
[ "import sys\r\nimport os\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.model_selection import train_test_split\r\n#For sample ranking function from https://github.com/davefernig/alp\r\nfrom active_learning.active_learning import ActiveLearner\r\nfrom keras.datasets import fashion_mnist\r\n\r\nfrom collections import Counter\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom copy import deepcopy\r\n\r\n\r\ndef main():\r\n classifier_random=[LogisticRegression(solver='lbfgs',multi_class='multinomial',max_iter=1000) for i in range(10)]\r\n classifier_active = [LogisticRegression(solver='lbfgs',multi_class='multinomial',max_iter=1000) for i in range(10)]\r\n \r\n k = 0\r\n active_results = {'least_confident':[]}\r\n passive_results = []\r\n\r\n (X_train_set, y_train_set), (X_test_set, y_test_set) = fashion_mnist.load_data()\r\n\r\n x_train, x_test, y_train, y_test = train_test_split(X_train_set,y_train_set)\r\n\r\n x_train = x_train.reshape(x_train.shape[0],x_train.shape[1]*x_train.shape[2])\r\n\r\n x_test = x_test.reshape(x_test.shape[0],x_test.shape[1]*x_test.shape[2])\r\n \r\n X_labeled, X_unlabeled, y_labeled, y_oracle = train_test_split(x_train,y_train,test_size = 0.99)\r\n \r\n for model in classifier_random:\r\n model.classes_ = np.arange(10)\r\n model.fit(X_labeled, y_labeled)\r\n \r\n for model in classifier_active:\r\n model.classes_ = np.arange(10)\r\n model.fit(X_labeled, y_labeled)\r\n \r\n X_labeled_rand = deepcopy(X_labeled)\r\n y_labeled_rand = deepcopy(y_labeled)\r\n X_labeled_active = deepcopy(X_labeled)\r\n y_labeled_active = deepcopy(y_labeled)\r\n \r\n batch_size = 32\r\n \r\n new_sample_size = [32]*20\r\n\r\n seen_examples_count = 32\r\n for new_sample_size in new_sample_size:\r\n seen_examples_count = seen_examples_count + new_sample_size\r\n num_samples.append(new_sample_size)\r\n \r\n random_queries = np.random.choice(X_unlabeled.shape[0], new_sample_size, replace=False)\r\n \r\n X_labeled_rand = np.concatenate((X_labeled_rand, X_unlabeled[random_queries, :]))\r\n y_labeled_rand = np.concatenate((y_labeled_rand, y_oracle[random_queries]))\r\n \r\n predictions = []\r\n for model in classifier_random:\r\n model.fit(X_labeled_rand, y_labeled_rand)\r\n predictions.append(model.predict(X_test))\r\n\r\n prediction_stack = np.stack(predictions)\r\n commitee_decision = np.apply_along_axis(\\\r\n lambda x: Counter(x).most_common()[0][0],\\\r\n 0, prediction_stack)\r\n matches = np.sum(commitee_decision == y_test)\r\n average_accuracy = matches / np.shape(X_test)[0]\r\n passive_results.append(average_accuracy)\r\n\r\n \r\n al_obj = ActiveLearner(strategy='least_confident')\r\n for model in classifier_active:\r\n model.classes_ = np.arange(10)\r\n indexes = al_obj.rank(classifier_active, X_unlabeled, new_sample_size)\r\n \r\n X_labeled_active = np.concatenate((X_labeled_active, X_unlabeled[indexes, :]))\r\n y_labeled_active = np.concatenate((y_labeled_active, y_oracle[indexes]))\r\n\r\n predictions = []\r\n \r\n for model in classifier_active:\r\n model.fit(X_labeled_active, y_labeled_active)\r\n curr_pred = model.predict(X_test)\r\n predictions.append(curr_pred)\r\n \r\n prediction_stack = np.stack(predictions)\r\n commitee_decision = np.apply_along_axis(\\\r\n lambda x: Counter(x).most_common()[0][0],\\\r\n 0, prediction_stack)\r\n matches = np.sum(commitee_decision == y_test)\r\n average_accuracy = matches / np.shape(X_test)[0]\r\n active_results['least_confident'].append(average_accuracy)\r\n \r\n k = k + 1\r\n\r\n np.savetxt('./misc/random_model_accuracy.txt', passive_results)\r\n np.savetxt('./misc/active_model_accuracy.txt', active_results['least_confident'])\r\n \r\n\r\nif __name__ == '__main__':\r\n main()\r\n \r\n" ]
[ [ "numpy.sum", "numpy.savetxt", "numpy.random.choice", "numpy.arange", "numpy.shape", "sklearn.linear_model.LogisticRegression", "numpy.stack", "numpy.concatenate", "sklearn.model_selection.train_test_split" ] ]
jiafeng5513/BinocularNet
[ "c26262cef69f99f9db832ec5610cc03bf50aed88" ]
[ "comparisons/SfmLeaner_pytorch/kitti_eval/depth_evaluation_utils.py" ]
[ "# Mostly based on the code written by Clement Godard:\n# https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py\nimport numpy as np\nfrom collections import Counter\nfrom path import Path\nfrom scipy.misc import imread\nfrom tqdm import tqdm\nimport datetime\n\n\nclass test_framework_KITTI(object):\n def __init__(self, root, test_files, seq_length=3, min_depth=1e-3, max_depth=100, step=1, use_gps=True):\n self.root = root\n self.min_depth, self.max_depth = min_depth, max_depth\n self.use_gps = use_gps\n self.calib_dirs, self.gt_files, self.img_files, self.displacements, self.cams = read_scene_data(self.root,\n test_files,\n seq_length,\n step,\n self.use_gps)\n\n def __getitem__(self, i):\n tgt = imread(self.img_files[i][0]).astype(np.float32)\n depth = generate_depth_map(self.calib_dirs[i], self.gt_files[i], tgt.shape[:2], self.cams[i])\n return {'tgt': tgt,\n 'ref': [imread(img).astype(np.float32) for img in self.img_files[i][1]],\n 'path':self.img_files[i][0],\n 'gt_depth': depth,\n 'displacements': np.array(self.displacements[i]),\n 'mask': generate_mask(depth, self.min_depth, self.max_depth)\n }\n\n def __len__(self):\n return len(self.img_files)\n\n\n###############################################################################\n# EIGEN\n\ndef getXYZ(lat, lon, alt):\n \"\"\"Helper method to compute a R(3) pose vector from an OXTS packet.\n Unlike KITTI official devkit, we use sinusoidal projection (https://en.wikipedia.org/wiki/Sinusoidal_projection)\n instead of mercator as it is much simpler.\n Initially Mercator was used because it renders nicely for Odometry vizualisation, but we don't need that here.\n In order to avoid problems for potential other runs closer to the pole in the future,\n we stick to sinusoidal which keeps the distances cleaner than mercator (and that's the only thing we want here)\n See https://github.com/utiasSTARS/pykitti/issues/24\n \"\"\"\n er = 6378137. # earth radius (approx.) in meters\n scale = np.cos(lat * np.pi / 180.)\n tx = scale * lon * np.pi * er / 180.\n ty = er * lat * np.pi / 180.\n tz = alt\n t = np.array([tx, ty, tz])\n return t\n\n\ndef get_displacements_from_GPS(root, date, scene, indices, tgt_index, precision_warning_threshold=2):\n \"\"\"gets displacement magntidues between middle frame and other frames, this is, to a scaling factor\n the mean output PoseNet should have for translation. Since the scaling is the same factor for depth maps and\n for translations, it will be used to determine how much predicted depth should be multiplied to.\"\"\"\n\n first_pose = None\n displacements = []\n oxts_root = root/date/scene/'oxts'\n if len(indices) == 0:\n return 0\n reordered_indices = [indices[tgt_index]] + [*indices[:tgt_index]] + [*indices[tgt_index + 1:]]\n already_warned = False\n for index in reordered_indices:\n oxts_data = np.genfromtxt(oxts_root/'DataFlow'/'{:010d}.txt'.format(index))\n\n if not already_warned:\n position_precision = oxts_data[23]\n if position_precision > precision_warning_threshold:\n print(\"Warning for scene {} frame {} : bad position precision from oxts ({:.2f}m). \"\n \"You might want to get displacements from speed\".format(scene, index, position_precision))\n already_warned = True\n\n lat, lon, alt = oxts_data[:3]\n pose = getXYZ(lat, lon, alt)\n if first_pose is None:\n first_pose = pose\n else:\n displacements.append(np.linalg.norm(pose - first_pose))\n return displacements\n\n\ndef get_displacements_from_speed(root, date, scene, indices, tgt_index):\n \"\"\"get displacement magnitudes by integrating over speed values.\n Might be a good alternative if the GPS is not good enough\"\"\"\n if len(indices) == 0:\n return []\n oxts_root = root/date/scene/'oxts'\n with open(oxts_root/'timestamps.txt') as f:\n timestamps = np.array([datetime.datetime.strptime(ts[:-3], \"%Y-%m-%d %H:%M:%S.%f\").timestamp() for ts in f.read().splitlines()])\n speeds = np.zeros((len(indices), 3))\n for i, index in enumerate(indices):\n oxts_data = np.genfromtxt(oxts_root/'DataFlow'/'{:010d}.txt'.format(index))\n speeds[i] = oxts_data[[6,7,10]]\n displacements = np.zeros((len(indices), 3))\n # Perform the integration operation, using trapezoidal method\n for i0, (i1, i2) in enumerate(zip(indices, indices[1:])):\n displacements[i0 + 1] = displacements[i0] + 0.5*(speeds[i0] + speeds[i0 + 1]) * (timestamps[i1] - timestamps[i2])\n # Set the origin of displacements at tgt_index\n displacements -= displacements[tgt_index]\n # Finally, get the displacement magnitude relative to tgt and discard the middle value (which is supposed to be 0)\n displacements_mag = np.linalg.norm(displacements, axis=1)\n return np.concatenate([displacements_mag[:tgt_index], displacements_mag[tgt_index + 1:]])\n\n\ndef read_scene_data(data_root, test_list, seq_length=3, step=1, use_gps=True):\n data_root = Path(data_root)\n gt_files = []\n calib_dirs = []\n im_files = []\n cams = []\n displacements = []\n demi_length = (seq_length - 1) // 2\n shift_range = step * np.arange(-demi_length, demi_length + 1)\n\n print('getting test metadata ... ')\n for sample in tqdm(test_list):\n tgt_img_path = data_root/sample\n date, scene, cam_id, _, index = sample[:-4].split('/')\n\n scene_length = len(tgt_img_path.parent.files('*.png'))\n\n ref_indices = shift_range + np.clip(int(index), step*demi_length, scene_length - step*demi_length - 1)\n\n ref_imgs_path = [tgt_img_path.dirname()/'{:010d}.png'.format(i) for i in ref_indices]\n vel_path = data_root/date/scene/'velodyne_points'/'DataFlow'/'{}.bin'.format(index[:10])\n\n if tgt_img_path.isfile():\n gt_files.append(vel_path)\n calib_dirs.append(data_root/date)\n im_files.append([tgt_img_path,ref_imgs_path])\n cams.append(int(cam_id[-2:]))\n\n args = (data_root, date, scene, ref_indices, demi_length)\n if use_gps:\n displacements.append(get_displacements_from_GPS(*args))\n else:\n displacements.append(get_displacements_from_speed(*args))\n else:\n print('{} missing'.format(tgt_img_path))\n\n return calib_dirs, gt_files, im_files, displacements, cams\n\n\ndef load_velodyne_points(file_name):\n # adapted from https://github.com/hunse/kitti\n points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)\n points[:,3] = 1\n return points\n\n\ndef read_calib_file(path):\n # taken from https://github.com/hunse/kitti\n float_chars = set(\"0123456789.e+- \")\n data = {}\n with open(path, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n value = value.strip()\n data[key] = value\n if float_chars.issuperset(value):\n # try to cast to float array\n try:\n data[key] = np.array(list(map(float, value.split(' '))))\n except ValueError:\n # casting error: DataFlow[key] already eq. value, so pass\n pass\n\n return data\n\n\ndef sub2ind(matrixSize, rowSub, colSub):\n m, n = matrixSize\n return rowSub * (n-1) + colSub - 1\n\n\ndef generate_depth_map(calib_dir, velo_file_name, im_shape, cam=2):\n # load calibration files\n cam2cam = read_calib_file(calib_dir/'calib_cam_to_cam.txt')\n velo2cam = read_calib_file(calib_dir/'calib_velo_to_cam.txt')\n velo2cam = np.hstack((velo2cam['R'].reshape(3,3), velo2cam['T'][..., np.newaxis]))\n velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))\n\n # compute projection matrix velodyne->image plane\n R_cam2rect = np.eye(4)\n R_cam2rect[:3,:3] = cam2cam['R_rect_00'].reshape(3,3)\n P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3,4)\n P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)\n\n # load velodyne points and remove all behind image plane (approximation)\n # each row of the velodyne DataFlow is forward, left, up, reflectance\n velo = load_velodyne_points(velo_file_name)\n velo = velo[velo[:, 0] >= 0, :]\n\n # project the points to the camera\n velo_pts_im = np.dot(P_velo2im, velo.T).T\n velo_pts_im[:, :2] = velo_pts_im[:,:2] / velo_pts_im[:,-1:]\n\n # check if in bounds\n # use minus 1 to get the exact same value as KITTI matlab code\n velo_pts_im[:, 0] = np.round(velo_pts_im[:,0]) - 1\n velo_pts_im[:, 1] = np.round(velo_pts_im[:,1]) - 1\n val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)\n val_inds = val_inds & (velo_pts_im[:,0] < im_shape[1]) & (velo_pts_im[:,1] < im_shape[0])\n velo_pts_im = velo_pts_im[val_inds, :]\n\n # project to image\n depth = np.zeros((im_shape))\n depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]\n\n # find the duplicate points and choose the closest depth\n inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])\n dupe_inds = [item for item, count in Counter(inds).items() if count > 1]\n for dd in dupe_inds:\n pts = np.where(inds == dd)[0]\n x_loc = int(velo_pts_im[pts[0], 0])\n y_loc = int(velo_pts_im[pts[0], 1])\n depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()\n depth[depth < 0] = 0\n return depth\n\n\ndef generate_mask(gt_depth, min_depth, max_depth):\n mask = np.logical_and(gt_depth > min_depth,\n gt_depth < max_depth)\n # crop used by Garg ECCV16 to reprocude Eigen NIPS14 results\n # if used on gt_size 370x1224 produces a crop of [-218, -3, 44, 1180]\n gt_height, gt_width = gt_depth.shape\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\n\n crop_mask = np.zeros(mask.shape)\n crop_mask[crop[0]:crop[1],crop[2]:crop[3]] = 1\n mask = np.logical_and(mask, crop_mask)\n return mask\n" ]
[ [ "numpy.eye", "numpy.fromfile", "numpy.zeros", "numpy.dot", "numpy.round", "numpy.logical_and", "numpy.cos", "scipy.misc.imread", "numpy.arange", "numpy.where", "numpy.array", "numpy.concatenate", "numpy.linalg.norm" ] ]
DFNaiff/Dissertation
[ "8db72a0e588042a582053625ec58cde6a661f2a9" ]
[ "tests_dissertation/source1d/test1a_mcmc.py" ]
[ "# -*- coding: utf-8 -*-\nimport sys\nsys.path.insert(0,\"../../src2\")\nimport math\nimport functools\nimport time\n\nimport torch\nimport numpy as np\nfrom scipy.special import gamma\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport emcee\n\nfrom source_1d_likelihood_fn import compute_log_likelihood_2\n\nnp.random.seed(100)\ntorch.manual_seed(100)\n#%%\ndef logit_t(x,a=0,b=1):\n return torch.log(((x-a)/(b-a))/(1.0-(x-a)/(b-a)))\ndef sigmoid(x,a=0,b=1):\n return (b-a)*1.0/(1.0+np.exp(-x)) + a\ndef dsigmoid(x,a=0,b=1):\n return (b-a)*np.exp(x)/((1+np.exp(x))**2)\ndef exp(x):\n return np.exp(x)\ndef dexp(x):\n return np.exp(x)\n\ndef unwarped_logjoint_np(x0,Ts,q0,rho):\n ll = compute_log_likelihood_2(x0,Ts,q0,rho)\n ll += -np.log(1+(q0/10.0)**2)\n ll += -np.log(1+(rho/0.1)**2)\n return ll\n\ndef logjoint_np(x):\n x0,Ts,q0,rho = x[0],x[1],x[2],x[3]\n ll = unwarped_logjoint_np(sigmoid(x0),sigmoid(Ts,b=0.4),\n exp(q0),exp(rho)) + \\\n np.log(dsigmoid(x0)) + np.log(dsigmoid(Ts,b=0.4)) + \\\n np.log(dexp(q0)) + np.log(dexp(rho))\n return ll\n\ncounter=0\ndef logjoint_emcee(x):\n global counter\n counter += 1\n print(counter)\n return logjoint_np(x)\n\n#%%\nndim, nwalkers = 4, 10\np0 = [np.random.rand(ndim) for i in range(nwalkers)]\n\nsampler = emcee.EnsembleSampler(nwalkers, ndim, logjoint_emcee)\nsampler.run_mcmc(p0, 10000)\nnp.savez(\"testheat_1a_emcee\",sampler=sampler)\n#%%\n" ]
[ [ "numpy.savez", "torch.manual_seed", "numpy.random.seed", "numpy.exp", "torch.log", "numpy.log", "numpy.random.rand" ] ]
Rensvandeschoot/automated-systematic-review
[ "fe06a570a806e1f14d3de5186511a04edf851cf3" ]
[ "asreview/models/embedding.py" ]
[ "import gzip\nimport io\nfrom multiprocessing import Process, Queue, cpu_count\nfrom pathlib import Path\nfrom urllib.request import urlopen\n\nimport numpy as np\n\nfrom asreview.utils import get_data_home\n\n\nEMBEDDING_EN = {\n \"url\": \"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.vec.gz\", # noqa\n \"name\": 'fasttext.cc.en.300.vec'\n}\n\n\ndef _embedding_reader(filename, input_queue, block_size=1000):\n \"\"\" Process that reads the word embeddings from a file.\n\n Parameters\n ----------\n filename: str\n File of trained embedding vectors.\n input_queue: Queue\n Queue to store jobs in.\n block_size: int\n Number of lines for each job.\n \"\"\"\n\n with open(filename, 'r', encoding='utf-8', newline='\\n') as f:\n # Throw away the first line, since we don't care about the dimensions.\n f.readline()\n\n i_line = 0\n buffer = []\n # Read the embedding file line by line.\n for line in f:\n i_line += 1\n buffer.append(line)\n # If the buffer is full, write it to the queue.\n if i_line == block_size:\n input_queue.put(buffer)\n i_line = 0\n buffer = []\n if i_line > 0:\n input_queue.put(buffer)\n\n # Put the string \"DONE\" in the queue, to ensure that the\n # worker processes finish.\n\n input_queue.put(\"DONE\")\n\n\ndef _embedding_worker(input_queue, output_queue, emb_vec_dim, word_index=None):\n \"\"\" Process that reads the word embeddings from a file.\n\n Parameters\n ----------\n input_queue: Queue\n Queue in which the jobs are submitted.\n output_queue: Queue\n Queue to store the embedding in dictionary form.\n emb_vec_dim: int\n Dimension of each embedding vector.\n word_index: dict\n Dictionary of the sample embedding.\n \"\"\"\n\n badInput = False\n badValues = {}\n while True:\n embedding = {}\n buffer = input_queue.get()\n if buffer == \"DONE\":\n break\n\n for line in buffer:\n line = line.rstrip()\n values = line.split(' ')\n\n if len(values) != emb_vec_dim + 1:\n if not badInput:\n print(\"Error: bad input in embedding vector.\")\n badInput = True\n badValues = values\n break\n else:\n word = values[0]\n if word_index is not None and word not in word_index:\n continue\n coefs = values[1:emb_vec_dim + 1]\n\n # store the results\n embedding[word] = np.asarray(coefs, dtype=np.float32)\n output_queue.put(embedding)\n\n # We removed the \"DONE\" from the input queue, so put it back in for\n # the other processes.\n input_queue.put(\"DONE\")\n\n # Store the results in the output queue\n if badInput:\n output_queue.put({\"ErrorBadInputValues\": badValues})\n output_queue.put(\"DONE\")\n\n\ndef _embedding_aggregator(output_queue, n_worker):\n \"\"\" Process that aggregates the results of the workers.\n This should be the main/original process.\n\n Parameters\n ----------\n output_queue: Queue\n This queue is the output queue of the workers.\n n_worker: int\n The number of worker processes.\n\n Returns\n -------\n Aggregated embedding dictionary.\n \"\"\"\n\n embedding = {}\n\n num_done = 0\n while num_done < n_worker:\n new_embedding = output_queue.get()\n if new_embedding == \"DONE\":\n num_done += 1\n else:\n embedding.update(new_embedding)\n\n return embedding\n\n\ndef download_embedding(url=EMBEDDING_EN['url'], name=EMBEDDING_EN['name'],\n data_home=None, verbose=1):\n \"\"\"Download word embedding file.\n\n Download word embedding file, unzip the file and save to the\n file system.\n\n Parameters\n ----------\n url: str\n The URL of the gzipped word embedding file\n name: str\n The filename of the embedding file.\n data_home: str\n The location of the ASR datasets. Default `asreview.utils.get_data_home()`\n verbose: int\n The verbosity. Default 1.\n\n \"\"\"\n\n if data_home is None:\n data_home = get_data_home()\n\n out_fp = Path(data_home, name)\n\n if verbose:\n print(f'download {url}')\n\n r = urlopen(url)\n compressed_file = io.BytesIO(r.read())\n\n if verbose:\n print(f'save to {out_fp}')\n\n decompressed_file = gzip.GzipFile(fileobj=compressed_file)\n\n with open(out_fp, 'wb') as out_file:\n for line in decompressed_file:\n out_file.write(line)\n\n\ndef load_embedding(fp, word_index=None, n_jobs=None, verbose=1):\n \"\"\"Load embedding matrix from file.\n\n The embedding matrix needs to be stored in the\n FastText format.\n\n Parameters\n ----------\n fp: str\n File path of the trained embedding vectors.\n word_index: dict\n Sample word embeddings.\n n_jobs: int\n Number of processes to parse the embedding (+1 process for reading).\n verbose: int\n The verbosity. Default 1.\n\n\n Returns\n -------\n dict:\n The embedding weights stored in a dict with the word as key and\n the weights as values.\n \"\"\"\n\n # Maximum number of jobs in the queue.\n queue_size = 500\n\n # Set the number of reader processes to use.\n if n_jobs is None:\n n_jobs = 1\n elif n_jobs == -1:\n n_jobs = cpu_count()-1\n\n input_queue = Queue(queue_size)\n output_queue = Queue()\n\n with open(fp, 'r', encoding='utf-8', newline='\\n') as f:\n n_words, emb_vec_dim = list(map(int, f.readline().split(' ')))\n\n if verbose == 1:\n print(f\"Reading {n_words} vectors with {emb_vec_dim} dimensions.\")\n\n worker_procs = []\n p = Process(target=_embedding_reader, args=(fp, input_queue),\n daemon=True)\n worker_procs.append(p)\n for _ in range(n_jobs):\n p = Process(\n target=_embedding_worker,\n args=(input_queue, output_queue, emb_vec_dim, word_index),\n daemon=True)\n worker_procs.append(p)\n\n # Start workers.\n for proc in worker_procs:\n proc.start()\n embedding = _embedding_aggregator(output_queue, n_jobs)\n\n # Merge dictionaries of workers\n\n # Join workers\n for proc in worker_procs:\n proc.join()\n\n if \"ErrorBadInputValues\" in embedding:\n badValues = embedding[\"ErrorBadInputValues\"]\n raise ValueError(f\"Check embedding matrix, bad format: {badValues}\")\n\n if verbose == 1:\n print(f\"Found {len(embedding)} word vectors.\")\n\n return embedding\n\n\ndef sample_embedding(embedding, word_index, verbose=1):\n \"\"\"Sample embedding matrix\n\n Parameters\n ----------\n embedding: dict\n A dictionary with the words and embedding vectors.\n word_index: dict\n A word_index like the output of Keras Tokenizer.word_index.\n verbose: int\n The verbosity. Default 1.\n\n Returns\n -------\n (np.ndarray, list):\n The embedding weights strored in a two dimensional\n numpy array and a list with the corresponding words.\n \"\"\"\n\n n_words, emb_vec_dim = len(word_index), len(next(iter(embedding.values())))\n\n if verbose == 1:\n print(f\"Creating matrix with {n_words} vectors \"\n f\"with dimension {emb_vec_dim}.\")\n\n # n+1 because 0 is preserved in the tokenizing process.\n embedding_matrix = np.zeros((n_words + 1, emb_vec_dim))\n\n for word, i in word_index.items():\n coefs = embedding.get(word)\n if coefs is not None:\n embedding_matrix[i] = coefs\n if verbose == 1:\n print('Shape of embedding matrix: ', embedding_matrix.shape)\n\n return embedding_matrix\n" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
joyjeni/-Learn-Artificial-Intelligence-with-TensorFlow
[ "8ae05456241a3ead3dcb83dd315797380d7acacf" ]
[ "section3/snippets.py" ]
[ "import tensorflow as tf\n\n# ===============================================\n# Previously was snippets.py of: 3_2_RNNs\n# ===============================================\n\n# i = input_gate, j = new_input, f = forget_gate, o = output_gate\n# Get 4 copies of feeding [inputs, m_prev] through the \"Sigma\" diagram.\n# Note that each copy has its own distinct set of weights.\nlstm_matrix = self._linear1([inputs, m_prev])\ni, j, f, o = tf.split(\n value=lstm_matrix, num_or_size_splits=4, axis=1)\n# Feed each of the gates through a sigmoid.\ni = sigmoid(i)\nf = sigmoid(f + self._forget_bias)\no = sigmoid(o)\n\nc = f * c_prev + i * self._activation(j)\nm = o * self._activation(c)\n\nnew_state = LSTMStateTuple(c, m)\nreturn m, new_state\n\n# ===============================================\n# RNN illustration\n# ===============================================\n\nhidden_size = 32\n\n\ndef rnn_step(x, h_prev):\n # Project inputs to each have dimension hidden_size.\n combined_inputs = tf.layers.Dense(hidden_size)(tf.concat([x, h_prev], axis=1))\n # Compute the next hidden state.\n h = tf.tanh(combined_inputs)\n return h\n\n\n# ===============================================\n# Bidirectional RNNs\n# ===============================================\noutputs_tuple, final_state_tuple = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=tf.nn.rnn_cell.LSTMCell(128),\n cell_bw=tf.nn.rnn_cell.LSTMCell(128),\n inputs=inputs,\n dtype=tf.float32)\n# Concatenate the forward and backward outputs.\n# Shape: (batch_size, max_seq_len, 2 * state_size)\noutputs = tf.concat(outputs_tuple, -1)\n\n# ===============================================\n# Stacked RNNs\n# ===============================================\n\n\ndef lstm_cell():\n return tf.nn.rnn_cell.LSTMCell(128)\n\n\ncell = tf.nn.rnn_cell.MultiRNNCell([\n lstm_cell() for _ in range(2)])\noutputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)\n\n" ]
[ [ "tensorflow.tanh", "tensorflow.nn.rnn_cell.LSTMCell", "tensorflow.nn.dynamic_rnn", "tensorflow.concat", "tensorflow.layers.Dense", "tensorflow.split" ] ]
colinzuo/MLAPP_Solution
[ "6d4bab23455169310547462fe2fc2cb71a915ef0" ]
[ "practice/toolbox/knn.py" ]
[ "import numpy as np\r\n\r\nfrom toolbox.sqDistance import *\r\nfrom toolbox.oneOfK import *\r\n\r\n\r\nclass KnnModel():\r\n def fit(self, X, y, K, C=None):\r\n self.X = X\r\n self.y = y\r\n self.K = K\r\n if C is not None:\r\n self.C = C\r\n else:\r\n self.C = np.size(np.unique(y))\r\n\r\n def predict(self, Xtest):\r\n yhat, yprob = knnClassify(self.X, self.y, Xtest, self.K, self.C)\r\n return yhat, yprob\r\n\r\n\r\ndef knnClassify(Xtrain, ytrain, Xtest, K, C):\r\n Ntrain = Xtrain.shape[0]\r\n Nclasses = C\r\n if K > Ntrain:\r\n print(\"reducing K = %d to Ntrain = %d\", K, Ntrain-1)\r\n K = Ntrain - 1\r\n dst = sqDistance(Xtest, Xtrain)\r\n ypred = np.zeros(Xtest.shape[0])\r\n if K == 1:\r\n closest = np.argmin(dst, axis=1)\r\n ypred = ytrain[closest]\r\n ypredProb, _ = oneOfK(ypred, Nclasses)\r\n else:\r\n closest = np.argsort(dst, axis=1)\r\n ypredProb = np.zeros((Xtest.shape[0], Nclasses))\r\n for i in range(Xtest.shape[0]):\r\n labels = ytrain[closest[i, 0:K]]\r\n hist, bin_edges = np.histogram(labels, bins=np.arange(1, Nclasses+2), density=True)\r\n ypredProb[i, :] = hist\r\n max = np.argmax(ypredProb, axis=1)\r\n ypred = max + 1\r\n ypred = ypred[:, np.newaxis]\r\n return ypred, ypredProb\r\n\r\n\r\nif __name__ == '__main__':\r\n Xtrain = np.array([[1, 2], [11, 12], [21, 22], [3, 4], [13, 14], [23, 24]])\r\n Xtest = np.array([[2, 3], [12, 13], [22, 23]])\r\n ytrain = np.array([1, 2, 3, 1, 2, 3])\r\n ypred, ypredProb = knnClassify(Xtrain, ytrain, Xtest, 1, C=3)\r\n print(\"Done\")" ]
[ [ "numpy.zeros", "numpy.argmin", "numpy.argsort", "numpy.argmax", "numpy.arange", "numpy.array", "numpy.unique" ] ]
cwaluga/singularities_dolfin
[ "dd379f71f384717a63906fd701df542a1603b03b" ]
[ "src/extrapolate.py" ]
[ "#! /usr/bin/env python\n\n\"\"\"\nExtrapolation of correction parameters.\n\"\"\"\n\n__author__ = \"Christian Waluga ([email protected])\"\n__copyright__ = \"Copyright (c) 2013 %s\" % __author__\n\nfrom dolfin import *\nfrom correction import *\nfrom meshtools import *\nfrom singular import *\nimport math\n\ndef extrapolate_gamma_least_squares(h, g, angle):\n\n from scipy.optimize import leastsq\n p = 2.0 - 2.0*pi/angle\n fitfunc = lambda c, x: c[0] + c[1]*x**p\n errfunc = lambda c, x, y: (y - fitfunc(c, x))/x\n cinit = [g[-1] , 0.0, 0.0]\n c = leastsq(errfunc, cinit, args = (h, g), full_output = 1)\n return c[0][0], lambda x: fitfunc(c[0], x)\n\n\ndef extrapolate_gamma_romberg(h, g, angle):\n\n import numpy as np\n\n N = len(h)-1\n T = np.zeros((N,N))\n \n # compute Aitken-Neville tableau\n p = 2.0 - 2.0*pi/angle\n for i in range(0, N):\n T[i,0] = (h[i]**p * g[i+1] - h[i+1]**p * g[i])/(h[i]**p - h[i+1]**p)\n for i in range(1, N):\n for k in range(1, i+1):\n T[i,k] = T[i,k-1] + (T[i,k-1] - T[i-1,k-1])/((h[i-k]/h[i])**(p+1) - 1.0)\n\n return T[N-1,N-1], T\n\n\ndef extrapolate_gamma_richardson(h, g, angle):\n\n p = 2.0 - 2.0*pi/angle\n return g[-2] + (g[-1] - g[-2])/(1.0-(h[-1]/h[-2])**p)\n\n\ndef extrapolate_gamma(corner, angle, corner_mesh, func, method, maxit, \\\n refine_method, extrapolation, start_at, maxlevel, initial_gamma):\n\n if corner_mesh.size(2) is 0: return 0.0\n \n if refine_method == 'bulirsch':\n \n # refine meshes according to Bulirsch-series (1, 1/2, 1/3, 1/4, 1/6, 1/8, ...)\n meshes = [corner_mesh,refine(corner_mesh),refine3(corner_mesh)]\n \n for i in xrange(3, maxlevel):\n meshes.append(refine(meshes[-2]))\n\n elif refine_method == 'midpoint':\n\n # refine meshes by simple subdivision (1, 1/2, 1/4, 1/8, 1/16, ...)\n meshes = [corner_mesh]\n\n for i in xrange(1, maxlevel):\n meshes.append(refine(meshes[-1]))\n\n mesh = meshes[0]\n min_angle = find_min_angle(mesh, corner)\n\n # compute gammas using one-level algorithm\n if initial_gamma is None:\n initial_gamma = evaluate_fit(corner_mesh.size(2), angle, func == math.sin)\n g = compute_gammas(meshes, angle, min_angle, corner, initial_gamma = initial_gamma, \\\n maxit = maxit, func = func, method = method)\n \n import numpy as np\n \n h = [mesh.hmin() for mesh in meshes]\n\n x = np.asarray(h)\n y = np.asarray(g)\n\n if extrapolation == 'none':\n gamma_asymptotic = g[-1] # just use value computed on the highest level\n\n elif extrapolation == 'least-squares': # extrapolate by a least-squares fit\n gamma_asymptotic, fitfunc = extrapolate_gamma_least_squares(x[start_at:], y[start_at:], angle)\n \n elif extrapolation == 'romberg':\n gamma_asymptotic, tableau = extrapolate_gamma_romberg(x[start_at:], y[start_at:], angle)\n\n elif extrapolation == 'richardson':\n gamma_asymptotic = extrapolate_gamma_richardson(x[start_at:], y[start_at:], angle)\n\n # plot gamma\n if False: # just for debugging\n gammaa, fitfunc = extrapolate_gamma_least_squares(x[start_at:], y[start_at:], angle)\n\n import pylab\n fig = pylab.figure()\n plt, = pylab.semilogx(1./x, y, 'k*')\n xx = np.linspace(h[-1], h[0], 100)\n yy = fitfunc(xx)\n pylab.ylim((min(g)-0.05,max(g)+0.05))\n plt, = pylab.semilogx(1./xx, yy, 'r-')\n plt, = pylab.semilogx(1./xx, gamma_asymptotic*np.ones((len(xx),1)), 'b-')\n plt, = pylab.semilogx(1./xx, initial_gamma*np.ones((len(xx),1)), 'g-')\n pylab.savefig('output/gamma-{0}-{1}.pdf'.format(corner[0],corner[1]))\n\n return gamma_asymptotic, (h,g)\n\n\ndef extrapolate_gammas(corners, angles, corner_meshes, method = 'one-level-exact', maxit = 20, \\\n refine_method = 'bulirsch', extrapolation = 'least-squares', start_at = 3, \\\n maxlevel = 10, funcs = None, initial_gamma = None):\n\n g_asympt, data = [], []\n \n if funcs is None: # set all corners to Dirichlet by default\n funcs = [ math.sin for c in corners ]\n\n # for each corner, compute gamma\n for i in range(len(corners)):\n\n corner = corners[i]\n angle = angles[i]\n corner_mesh = corner_meshes[i]\n\n if method == 'fit':\n g, d = evaluate_fit(corner_mesh.size(2), angle, funcs[i] == math.sin), None\n else:\n g, d = extrapolate_gamma(corner, angle, corner_mesh, method = method, maxit = maxit, \\\n refine_method = refine_method, extrapolation = extrapolation, \\\n start_at = start_at, maxlevel = maxlevel, func = funcs[i], \\\n initial_gamma = initial_gamma)\n \n g_asympt.append(g)\n data.append(d)\n\n return g_asympt, data\n" ]
[ [ "numpy.asarray", "numpy.linspace", "scipy.optimize.leastsq", "numpy.zeros" ] ]
ananyashreyjain/astropy
[ "a8b8d4c4d2dcc9be28385600f56066cef92a38ad" ]
[ "astropy/utils/iers/tests/test_iers.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport os\nimport urllib.request\n\nimport pytest\nimport numpy as np\n\nfrom ....tests.helper import assert_quantity_allclose, catch_warnings\nfrom .. import iers\nfrom .... import units as u\nfrom ....table import QTable\nfrom ....time import Time, TimeDelta\nfrom ....utils.exceptions import AstropyWarning\n\nFILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)\n\ntry:\n iers.IERS_A.open('finals2000A.all') # check if IERS_A is available\nexcept OSError:\n HAS_IERS_A = False\nelse:\n HAS_IERS_A = True\n\nIERS_A_EXCERPT = os.path.join(os.path.dirname(__file__), 'iers_a_excerpt')\n\n\nclass TestBasic():\n \"\"\"Basic tests that IERS_B returns correct values\"\"\"\n\n def test_simple(self):\n iers.IERS.close()\n assert iers.IERS.iers_table is None\n iers_tab = iers.IERS.open()\n assert iers.IERS.iers_table is not None\n assert isinstance(iers.IERS.iers_table, QTable)\n assert iers_tab['UT1_UTC'].unit is u.second\n assert iers_tab['PM_x'].unit is u.arcsecond\n assert iers_tab['PM_y'].unit is u.arcsecond\n jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])\n jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])\n ut1_utc = iers_tab.ut1_utc(jd1, jd2)\n assert isinstance(ut1_utc, u.Quantity)\n assert ut1_utc.unit is u.second\n # IERS files change at the 0.1 ms level; see gh-6981\n assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,\n 0.4131816, 0.41328895] * u.s,\n atol=0.1*u.ms)\n # should be future-proof; surely we've moved to another planet by then\n with pytest.raises(IndexError):\n ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)\n # also check it returns the right status\n ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)\n assert np.all(status2 == iers.FROM_IERS_B)\n ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)\n assert status4 == iers.TIME_BEYOND_IERS_RANGE\n\n # check it works via Time too\n t = Time(jd1, jd2, format='jd', scale='utc')\n ut1_utc3 = iers_tab.ut1_utc(t)\n assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,\n 0.4131816, 0.41328895] * u.s,\n atol=0.1*u.ms)\n\n # Table behaves properly as a table (e.g. can be sliced)\n assert len(iers_tab[:2]) == 2\n\n def test_open_filename(self):\n iers.IERS.close()\n iers.IERS.open(iers.IERS_B_FILE)\n assert iers.IERS.iers_table is not None\n assert isinstance(iers.IERS.iers_table, QTable)\n iers.IERS.close()\n with pytest.raises(FILE_NOT_FOUND_ERROR):\n iers.IERS.open('surely this does not exist')\n\n def test_open_network_url(self):\n iers.IERS_A.close()\n iers.IERS_A.open(\"file:\" + urllib.request.pathname2url(IERS_A_EXCERPT))\n assert iers.IERS_A.iers_table is not None\n assert isinstance(iers.IERS_A.iers_table, QTable)\n iers.IERS_A.close()\n\n\nclass TestIERS_AExcerpt():\n def test_simple(self):\n # Test the IERS A reader. It is also a regression tests that ensures\n # values do not get overridden by IERS B; see #4933.\n iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)\n\n assert iers_tab['UT1_UTC'].unit is u.second\n assert 'P' in iers_tab['UT1Flag']\n assert 'I' in iers_tab['UT1Flag']\n assert 'B' in iers_tab['UT1Flag']\n assert np.all((iers_tab['UT1Flag'] == 'I') |\n (iers_tab['UT1Flag'] == 'P') |\n (iers_tab['UT1Flag'] == 'B'))\n\n assert iers_tab['dX_2000A'].unit is u.marcsec\n assert iers_tab['dY_2000A'].unit is u.marcsec\n assert 'P' in iers_tab['NutFlag']\n assert 'I' in iers_tab['NutFlag']\n assert 'B' in iers_tab['NutFlag']\n assert np.all((iers_tab['NutFlag'] == 'P') |\n (iers_tab['NutFlag'] == 'I') |\n (iers_tab['NutFlag'] == 'B'))\n\n assert iers_tab['PM_x'].unit is u.arcsecond\n assert iers_tab['PM_y'].unit is u.arcsecond\n assert 'P' in iers_tab['PolPMFlag']\n assert 'I' in iers_tab['PolPMFlag']\n assert 'B' in iers_tab['PolPMFlag']\n assert np.all((iers_tab['PolPMFlag'] == 'P') |\n (iers_tab['PolPMFlag'] == 'I') |\n (iers_tab['PolPMFlag'] == 'B'))\n\n t = Time([57053., 57054., 57055.], format='mjd')\n ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)\n assert status[0] == iers.FROM_IERS_B\n assert np.all(status[1:] == iers.FROM_IERS_A)\n # These values are *exactly* as given in the table, so they should\n # match to double precision accuracy.\n assert_quantity_allclose(ut1_utc,\n [-0.4916557, -0.4925323, -0.4934373] * u.s,\n atol=0.1*u.ms)\n\n\n dcip_x,dcip_y, status = iers_tab.dcip_xy(t, return_status=True)\n assert status[0] == iers.FROM_IERS_B\n assert np.all(status[1:] == iers.FROM_IERS_A)\n # These values are *exactly* as given in the table, so they should\n # match to double precision accuracy.\n print(dcip_x)\n print(dcip_y)\n assert_quantity_allclose(dcip_x,\n [-0.086, -0.093, -0.087] * u.marcsec,\n atol=1.*u.narcsec)\n assert_quantity_allclose(dcip_y,\n [0.094, 0.081, 0.072] * u.marcsec,\n atol=1*u.narcsec)\n\n pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)\n assert status[0] == iers.FROM_IERS_B\n assert np.all(status[1:] == iers.FROM_IERS_A)\n assert_quantity_allclose(pm_x,\n [0.003734, 0.004581, 0.004623] * u.arcsec,\n atol=0.1*u.marcsec)\n assert_quantity_allclose(pm_y,\n [0.310824, 0.313150, 0.315517] * u.arcsec,\n atol=0.1*u.marcsec)\n\n # Table behaves properly as a table (e.g. can be sliced)\n assert len(iers_tab[:2]) == 2\n\n\[email protected](str('not HAS_IERS_A'))\nclass TestIERS_A():\n\n def test_simple(self):\n \"\"\"Test that open() by default reads a 'finals2000A.all' file.\"\"\"\n # Ensure we remove any cached table (gh-5131).\n iers.IERS_A.close()\n iers_tab = iers.IERS_A.open()\n jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])\n jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])\n ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)\n assert np.all(status == iers.FROM_IERS_B)\n assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,\n 0.4131816, 0.41328895] * u.s,\n atol=0.1*u.ms)\n ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)\n assert status2 == iers.TIME_BEYOND_IERS_RANGE\n\n tnow = Time.now()\n\n ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)\n assert status3 == iers.FROM_IERS_A_PREDICTION\n assert ut1_utc3 != 0.\n\n\nclass TestIERS_Auto():\n\n def setup_class(self):\n \"\"\"Set up useful data for the tests.\n \"\"\"\n self.N = 40\n self.ame = 30.0\n self.iers_a_file_1 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-02-30-test')\n self.iers_a_file_2 = os.path.join(os.path.dirname(__file__), 'finals2000A-2016-04-30-test')\n self.iers_a_url_1 = os.path.normpath('file://' + os.path.abspath(self.iers_a_file_1))\n self.iers_a_url_2 = os.path.normpath('file://' + os.path.abspath(self.iers_a_file_2))\n self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)\n\n def teardown_method(self, method):\n \"\"\"Run this after every test.\n \"\"\"\n iers.IERS_Auto.close()\n\n def test_interpolate_error_formatting(self):\n \"\"\"Regression test: make sure the error message in\n IERS_Auto._check_interpolate_indices() is formatted correctly.\n \"\"\"\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):\n with iers.conf.set_temp('auto_max_age', self.ame):\n with pytest.raises(ValueError) as err:\n iers_table = iers.IERS_Auto.open()\n delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)\n assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)\n\n def test_auto_max_age_none(self):\n \"\"\"Make sure that iers.INTERPOLATE_ERROR's advice about setting\n auto_max_age = None actually works.\n \"\"\"\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):\n with iers.conf.set_temp('auto_max_age', None):\n iers_table = iers.IERS_Auto.open()\n delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)\n assert isinstance(delta, np.ndarray)\n assert delta.shape == (self.N,)\n assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)\n\n def test_auto_max_age_minimum(self):\n \"\"\"Check that the minimum auto_max_age is enforced.\n \"\"\"\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):\n with iers.conf.set_temp('auto_max_age', 5.0):\n with pytest.raises(ValueError) as err:\n iers_table = iers.IERS_Auto.open()\n delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)\n assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'\n\n @pytest.mark.remote_data\n def test_no_auto_download(self):\n with iers.conf.set_temp('auto_download', False):\n t = iers.IERS_Auto.open()\n assert type(t) is iers.IERS_B\n\n @pytest.mark.remote_data\n def test_simple(self):\n\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):\n\n dat = iers.IERS_Auto.open()\n assert dat['MJD'][0] == 57359.0 * u.d\n assert dat['MJD'][-1] == 57539.0 * u.d\n\n # Pretend we are accessing at a time 7 days after start of predictive data\n predictive_mjd = dat.meta['predictive_mjd']\n dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d\n\n # Look at times before and after the test file begins. 0.1292905 is\n # the IERS-B value from MJD=57359. The value in\n # finals2000A-2016-02-30-test has been replaced at this point.\n assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)\n assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)\n\n # Now pretend we are accessing at time 60 days after start of predictive data.\n # There will be a warning when downloading the file doesn't give new data\n # and an exception when extrapolating into the future with insufficient data.\n dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d\n assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)\n with catch_warnings(iers.IERSStaleWarning) as warns:\n with pytest.raises(ValueError) as err:\n dat.ut1_utc(Time(60000, format='mjd').jd)\n assert 'interpolating from IERS_Auto using predictive values' in str(err)\n assert len(warns) == 1\n assert 'IERS_Auto predictive values are older' in str(warns[0].message)\n\n # Warning only if we are getting return status\n with catch_warnings(iers.IERSStaleWarning) as warns:\n dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)\n assert len(warns) == 1\n assert 'IERS_Auto predictive values are older' in str(warns[0].message)\n\n # Now set auto_max_age = None which says that we don't care how old the\n # available IERS-A file is. There should be no warnings or exceptions.\n with iers.conf.set_temp('auto_max_age', None):\n with catch_warnings(iers.IERSStaleWarning) as warns:\n dat.ut1_utc(Time(60000, format='mjd').jd)\n assert not warns\n\n # Now point to a later file with same values but MJD increased by\n # 60 days and see that things work. dat._time_now is still the same value\n # as before, i.e. right around the start of predictive values for the new file.\n # (In other words this is like downloading the latest file online right now).\n with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):\n\n # Look at times before and after the test file begins. This forces a new download.\n assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1292905)\n assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)\n\n # Now the time range should be different.\n assert dat['MJD'][0] == 57359.0 * u.d\n assert dat['MJD'][-1] == (57539.0 + 60) * u.d\n" ]
[ [ "numpy.array", "numpy.all", "numpy.arange" ] ]
huangyuyao/bevutils
[ "24e5c4954b17ed58e27697447ab667c65f59b7e0" ]
[ "bevutils/layers/perspective_transformer.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom ..functional import epipolar as E\n\nclass PerspectiveTransformerLayer(nn.Module):\n\n def __init__(self, bv_size, pv_size, intrinsics, translate_z = -10.0, rotation_order='xyz', device='cuda:0', dtype=torch.float32):\n '''\n `translate_z` is a hyperparameter to be chose in range (-Inf, 1.0), the perspective view will be roughly scaled (1-translate_z) times.\n '''\n super(PerspectiveTransformerLayer, self).__init__()\n self.dtype = dtype\n self.dev = torch.device(device) if device else None\n self.rot_order = rotation_order\n self.bv_size, self.pv_size = bv_size, pv_size\n self.register_buffer('intrinsics', self._prepare_intrinsics(intrinsics))\n self.register_buffer('inv_intrinsics', torch.inverse(self.intrinsics))\n self.register_buffer('n', torch.tensor([[0], [0], [1]], device=self.dev, dtype=self.dtype))\n self.register_buffer('tz', torch.tensor([translate_z], device=self.dev, dtype=self.dtype))\n self.register_buffer('bv_grid', self._prepare_coord_grid(*bv_size))\n bv_pivot, pv_pivot = self._prepare_pivots(bv_size, pv_size, self.inv_intrinsics)\n self.register_buffer('bv_pivot', bv_pivot)\n self.register_buffer('pv_pivot', pv_pivot)\n\n def _prepare_intrinsics(self, intrinsics):\n if isinstance(intrinsics, list) or isinstance(intrinsics, np.array):\n intrinsics = torch.tensor(intrinsics, requires_grad=False, device=self.dev, dtype=self.dtype)\n assert isinstance(intrinsics, torch.Tensor)\n assert intrinsics.shape == (3, 3)\n return intrinsics\n \n def _prepare_pivots(self, bv_size, pv_size, inv_intrinsics):\n bv_pivot = torch.tensor([[bv_size[1]/2.0], [bv_size[0]], [1.0]], device=self.dev, dtype=self.dtype)\n pv_pivot = torch.tensor([[pv_size[1]/2.0], [pv_size[0]], [1.0]], device=self.dev, dtype=self.dtype)\n bv_pivot = inv_intrinsics @ bv_pivot\n pv_pivot = inv_intrinsics @ pv_pivot\n return bv_pivot, pv_pivot\n\n def _prepare_coord_grid(self, H, W):\n xgrid = torch.arange(W, requires_grad=False, device=self.dev, dtype=self.dtype).repeat(H, 1).view((H, W, 1, 1))\n ygrid = torch.arange(H, requires_grad=False, device=self.dev, dtype=self.dtype).unsqueeze_(1).repeat(1, W).view(H, W, 1, 1)\n grid = torch.cat((xgrid, ygrid, torch.ones_like(xgrid, device=self.dev, dtype=self.dtype)), dim=-2)\n return grid\n\n def forward(self, pv, rx=0.0, ry=0.0, rz=0.0):\n '''\n REFERENCES:\n - Homography: refers to https://en.wikipedia.org/wiki/Homography_(computer_vision)\n - Bilinear Interpolation: refers to https://medium.com/@shanlins/spatial-transformer-networks-stn-and-its-implementation-2638d58d41f8\n '''\n B, C, Hp, Wp, Hb, Wb = *pv.shape, *self.bv_size\n # get constrained homography\n R = E.torch.make_rotation_matrix(rx, ry, rz, self.rot_order, device=pv.device, dtype=self.dtype)\n H = E.torch.make_constrained_homography(R, self.tz, self.intrinsics, self.inv_intrinsics, self.bv_pivot, self.pv_pivot)\n # get coordinates on perspective view for each grid: `pv_coord` with shape (B, Hb, Wb, 2, 1)\n bv_grid = self.bv_grid.expand(B, Hb, Wb, 3, 1)\n pv_coord = torch.matmul(H[:, None, None, :, :], bv_grid)\n pv_coord = pv_coord[:, :, :, 0:2, :] / pv_coord[:, :, :, 2:3, :]\n # gather pixels acoording to `pv_coord`\n x = pv_coord[:,None,:,:,0,0] # with shape (B, 1, Hb, Wb)\n y = pv_coord[:,None,:,:,1,0]\n mask = (~((x >= 0) & (x < Wp) & (y >= 0) & (y < Hp))).expand(B, C, Hb, Wb)\n x0 = x.clamp_(0, Wp-2).to(torch.long)\n y0 = y.clamp_(0, Hp-2).to(torch.long)\n offset_00 = y0 * Wp + x0\n offset_01 = offset_00 + 1\n offset_10 = offset_00 + Wp\n offset_11 = offset_10 + 1\n pv = pv.view(B, C, Hp*Wp) # with shape (B, C, Hp*Wp)\n pvmap = [\n torch.gather(pv, -1, offset_00.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),\n torch.gather(pv, -1, offset_01.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),\n torch.gather(pv, -1, offset_10.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),\n torch.gather(pv, -1, offset_11.expand(B, C, Hb, Wb).view(B, C, Hb*Wb))] # pv maps: with shape (B, C, Hb*Wb)\n # combine pv pixels\n x0, x1, y0, y1 = (x - x0.to(self.dtype)), ((x0+1).to(self.dtype) - x), (y - y0.to(self.dtype)), ((y0+1).to(self.dtype) - y)\n weights = [(x1 * y1), (x0 * y1), (x1 * y0), (x0 * y0)] # weight : with shape (B, 1, Hb, Wb)\n bvmap = sum([w.expand(B, C, Hb, Wb) * p.view(B, C, Hb, Wb) for w, p in zip(weights, pvmap)]) # bvmap with shape (B, C, Hb, Wb)\n #__import__('pdb').set_trace()\n bvmap[mask] = 0.0\n return bvmap\n" ]
[ [ "torch.ones_like", "torch.inverse", "torch.tensor", "torch.arange", "torch.device", "torch.matmul" ] ]
danielballan/suitcase-tiff
[ "eb401cd4f2f1bd637ec23c10472e0579f0cefc66" ]
[ "suitcase/tiff/tests.py" ]
[ "from . import export\nimport numpy\nfrom numpy.testing import assert_array_equal\nimport pytest\nimport tifffile\n\nexpected = numpy.ones((10, 10))\n\n\[email protected]('stack_images', [True, False])\ndef test_export(tmp_path, example_data, stack_images):\n ''' runs a test using the plan that is passed through to it\n\n ..note::\n\n Due to the `events_data` `pytest.fixture` this will run multiple tests\n each with a range of detectors and a range of event_types. see\n `suitcase.utils.conftest` for more info\n\n '''\n\n collector = example_data()\n artifacts = export(collector, tmp_path, file_prefix='',\n stack_images=stack_images)\n\n for filename in artifacts['stream_data']:\n actual = tifffile.imread(str(filename))\n if len(actual.shape) == 3:\n for img in actual:\n assert_array_equal(img, expected)\n else:\n assert_array_equal(actual, expected)\n" ]
[ [ "numpy.ones", "numpy.testing.assert_array_equal" ] ]
yjf18340/webots
[ "7c35a359848bafe81fe0229ac2ed587528f4c73e" ]
[ "projects/samples/robotbenchmark/visual_tracking/controllers/visual_tracking/visual_tracking.py" ]
[ "\"\"\"Sample Webots controller for the visual tracking benchmark.\"\"\"\n\nfrom controller import Robot, Node\nimport base64\nimport os\nimport sys\nimport tempfile\n\ntry:\n import numpy as np\nexcept ImportError:\n sys.exit(\"Warning: 'numpy' module not found. Please check the Python modules installation instructions \" +\n \"at 'https://www.cyberbotics.com/doc/guide/using-python'.\")\ntry:\n import cv2\nexcept ImportError:\n sys.exit(\"Warning: 'cv2' module not found. Please check the Python modules installation instructions \" +\n \"at 'https://www.cyberbotics.com/doc/guide/using-python'.\")\n\n\ndef cleanup():\n \"\"\"Remove device image files.\"\"\"\n # Ignore errors if file doesn't exist.\n try:\n os.remove(deviceImagePath + '/display.jpg')\n except OSError:\n pass\n try:\n os.remove(deviceImagePath + '/camera.jpg')\n except OSError:\n pass\n\n\ndef sendDeviceImage(robot, device):\n \"\"\"Send the rendering device image to the robot window.\"\"\"\n if device.getNodeType() == Node.DISPLAY:\n deviceName = 'display'\n fileName = deviceName + '.jpg'\n device.imageSave(None, deviceImagePath + '/' + fileName)\n elif device.getNodeType() == Node.CAMERA:\n deviceName = 'camera'\n fileName = deviceName + '.jpg'\n device.saveImage(deviceImagePath + '/' + fileName, 80)\n else:\n return\n with open(deviceImagePath + '/' + fileName, 'rb') as f:\n fileString = f.read()\n fileString64 = base64.b64encode(fileString).decode()\n robot.wwiSendText(\"image[\" + deviceName + \"]:data:image/jpeg;base64,\" + fileString64)\n f.close()\n\n\n# Set path to store temporary device images\ndeviceImagePath = os.getcwd()\ntry:\n imageFile = open(deviceImagePath + \"/image.jpg\", 'w')\n imageFile.close()\nexcept IOError:\n deviceImagePath = tempfile.gettempdir()\n\n# Get pointer to the robot.\nrobot = Robot()\n\n# Set the controller time step based on the current world's time step.\ntimestep = int(robot.getBasicTimeStep() * 4)\n\n# Get camera motors.\npanHeadMotor = robot.getMotor('PRM:/r1/c1/c2-Joint2:12')\ntiltHeadMotor = robot.getMotor('PRM:/r1/c1/c2/c3-Joint2:13')\n# Other camera motor not used in this controller.\n# tiltNeckMotor = robot.getMotor('PRM:/r1/c1-Joint2:11')\n\n# Initialize motors in order to use velocity control instead of position control.\npanHeadMotor.setPosition(float('+inf'))\ntiltHeadMotor.setPosition(float('+inf'))\n# Set initial motors velocity.\npanHeadMotor.setVelocity(0.0)\ntiltHeadMotor.setVelocity(0.0)\n\n# Get and enable the camera device.\ncamera = robot.getCamera('PRM:/r1/c1/c2/c3/i1-FbkImageSensor:F1')\ncamera.enable(timestep)\nwidth = camera.getWidth()\nheight = camera.getHeight()\n\n# Get the display device.\n# The display can be used to visually show the tracked position.\ndisplay = robot.getDisplay('display')\n# Show camera image in the display background.\ndisplay.attachCamera(camera)\ndisplay.setColor(0xFF0000)\n\n# Variables needed to draw the target on the display.\ntargetPoint = []\ntargetRadius = 0\n\n# Main loop: perform a simulation step until the simulation is over.\nwhile robot.step(timestep) != -1:\n # Remove previously detected blob info from the display if needed.\n if targetPoint:\n # Erase the previous drawing by setting the pixels alpha value to 0 (transparent).\n display.setAlpha(0.0)\n radius = targetRadius\n if radius < 5:\n # Minimum red dot size.\n radius = 5\n size = 2 * radius + 1\n display.fillRectangle(targetPoint[0] - radius,\n targetPoint[1] - radius, size, size)\n\n # Send the camera image to the robot window.\n # sendDeviceImage(robot, camera)\n\n # Get camera image.\n rawString = camera.getImage()\n\n # Create mask for yellow pixels based on the camera image.\n index = 0\n maskRGB = np.zeros([height, width], np.uint8)\n for j in range(0, height):\n for i in range(0, width):\n # Camera image pixel format\n if sys.version_info.major > 2: # Python 3 code\n b = rawString[index]\n g = rawString[index + 1]\n r = rawString[index + 2]\n else: # Python 2.7 code\n b = ord(rawString[index])\n g = ord(rawString[index + 1])\n r = ord(rawString[index + 2])\n index += 4\n # Yellow color threshold.\n if b < 50 and g > 180 and r > 180:\n maskRGB[j][i] = True\n\n # Find blobs contours in the mask.\n contours = cv2.findContours(maskRGB.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n # Only proceed if at least one blob is found.\n if not contours:\n continue\n\n # Choose the largest blob.\n blob = max(contours, key=cv2.contourArea)\n\n # Compute the minimum enclosing circle and centroid of the blob.\n ((x, y), radius) = cv2.minEnclosingCircle(blob)\n targetPoint = [int(x), int(y)]\n targetRadius = int(radius)\n\n # Show detected blob in the display: draw the circle and centroid.\n display.setAlpha(1.0)\n if targetRadius > 0:\n display.setColor(0x00FFFF)\n display.drawOval(targetPoint[0], targetPoint[1], targetRadius, targetRadius)\n display.setColor(0xFF0000)\n display.fillOval(int(targetPoint[0]), int(targetPoint[1]), 5, 5)\n # Send the display image to the robot window.\n sendDeviceImage(robot, display)\n\n # Move the head and camera in order to center the target object.\n # Compute distance in pixels between the target point and the center.\n dx = targetPoint[0] - width / 2\n dy = targetPoint[1] - height / 2\n # The speed factor 1.5 has been chosen empirically.\n panHeadMotor.setVelocity(-1.5 * dx / width)\n tiltHeadMotor.setVelocity(-1.5 * dy / height)\n\n# Cleanup code.\ncleanup()\n" ]
[ [ "numpy.zeros" ] ]
tpedelose/apls
[ "5afcadb1e75e5b2f0c0e0c8be4419251f61f23e7" ]
[ "apls/apls_utils.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 6 14:05:30 2019\n\n@author: avanetten\n\"\"\"\n\nimport osmnx_funcs\nimport numpy as np\nfrom osgeo import gdal, ogr, osr\nimport scipy.spatial\nimport geopandas as gpd\nimport rasterio as rio\nimport affine as af\nimport shapely\nimport time\nimport os\nimport sys\nimport cv2\nimport skimage\nimport subprocess\nimport matplotlib.pyplot as plt\nfrom math import sqrt, radians, cos, sin, asin\n# import logging\n\n# add apls path and import apls_tools\npath_apls_src = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(path_apls_src)\n\n\n###############################################################################\ndef pixelToGeoCoord(xPix, yPix, inputRaster, sourceSR='', geomTransform='',\n targetSR=''):\n '''From spacenet geotools'''\n # If you want to gauruntee lon lat output, specify TargetSR otherwise, geocoords will be in image geo reference\n # targetSR = osr.SpatialReference()\n # targetSR.ImportFromEPSG(4326)\n # Transform can be performed at the polygon level instead of pixel level\n\n if targetSR == '':\n performReprojection = False\n targetSR = osr.SpatialReference()\n targetSR.ImportFromEPSG(4326)\n else:\n performReprojection = True\n\n if geomTransform == '':\n srcRaster = gdal.Open(inputRaster)\n geomTransform = srcRaster.GetGeoTransform()\n\n source_sr = osr.SpatialReference()\n source_sr.ImportFromWkt(srcRaster.GetProjectionRef())\n\n geom = ogr.Geometry(ogr.wkbPoint)\n xOrigin = geomTransform[0]\n yOrigin = geomTransform[3]\n pixelWidth = geomTransform[1]\n pixelHeight = geomTransform[5]\n\n xCoord = (xPix * pixelWidth) + xOrigin\n yCoord = (yPix * pixelHeight) + yOrigin\n geom.AddPoint(xCoord, yCoord)\n\n if performReprojection:\n if sourceSR == '':\n srcRaster = gdal.Open(inputRaster)\n sourceSR = osr.SpatialReference()\n sourceSR.ImportFromWkt(srcRaster.GetProjectionRef())\n coord_trans = osr.CoordinateTransformation(sourceSR, targetSR)\n geom.Transform(coord_trans)\n\n return (geom.GetX(), geom.GetY())\n\n\n###############################################################################\ndef nodes_near_point(x, y, kdtree, kd_idx_dic, x_coord='x', y_coord='y',\n n_neighbors=-1,\n radius_m=150,\n verbose=False):\n \"\"\"\n Get nodes near the given point.\n\n Notes\n -----\n if n_neighbors < 0, query based on distance,\n else just return n nearest neighbors\n\n Arguments\n ---------\n x : float\n x coordinate of point\n y: float\n y coordinate of point\n kdtree : scipy.spatial.kdtree\n kdtree of nondes in graph\n kd_idx_dic : dict\n Dictionary mapping kdtree entry to node name\n x_coord : str\n Name of x_coordinate, can be 'x' or 'lon'. Defaults to ``'x'``.\n y_coord : str\n Name of y_coordinate, can be 'y' or 'lat'. Defaults to ``'y'``.\n n_neighbors : int\n Neareast number of neighbors to return. If < 0, ignore.\n Defaults to ``-1``.\n radius_meters : float\n Radius to search for nearest neighbors\n Returns\n -------\n kd_idx_dic, kdtree, arr : tuple\n kd_idx_dic maps kdtree entry to node name\n kdree is the actual kdtree\n arr is the numpy array of node positions\n \"\"\"\n\n point = [x, y]\n\n # query kd tree for nodes of interest\n if n_neighbors > 0:\n node_names, idxs_refine, dists_m_refine = _query_kd_nearest(\n kdtree, kd_idx_dic, point, n_neighbors=n_neighbors)\n else:\n node_names, idxs_refine, dists_m_refine = _query_kd_ball(\n kdtree, kd_idx_dic, point, radius_m)\n\n if verbose:\n print((\"subgraph node_names:\", node_names))\n\n # get subgraph\n # G_sub = G_.subgraph(node_names)\n\n return node_names, dists_m_refine # G_sub\n\n\n###############################################################################\ndef _nodes_near_origin(G_, node, kdtree, kd_idx_dic,\n x_coord='x', y_coord='y', radius_m=150, verbose=False):\n '''Get nodes a given radius from the desired node. G_ should be the \n maximally simplified graph'''\n\n # get node coordinates\n n_props = G_.node[node]\n x0, y0 = n_props[x_coord], n_props[y_coord]\n point = [x0, y0]\n\n # query kd tree for nodes of interest\n node_names, idxs_refine, dists_m_refine = _query_kd_ball(\n kdtree, kd_idx_dic, point, radius_m)\n if verbose:\n print((\"subgraph node_names:\", node_names))\n\n # get subgraph\n # G_sub = G_.subgraph(node_names)\n\n return node_names, dists_m_refine # G_sub\n\n\n###############################################################################\ndef G_to_kdtree(G_, x_coord='x', y_coord='y', verbose=False):\n \"\"\"\n Create kd tree from node positions.\n\n Notes\n -----\n (x, y) = (lon, lat)\n kd_idx_dic maps kdtree entry to node name:\n kd_idx_dic[i] = n (n in G.nodes())\n x_coord can be in utm (meters), or longitude\n\n Arguments\n ---------\n G_ : networkx graph\n Input networkx graph, with nodes assumed to have a dictioary of\n properties that includes position\n x_coord : str\n Name of x_coordinate, can be 'x' or 'lon'. Defaults to ``'x'``.\n y_coord : str\n Name of y_coordinate, can be 'y' or 'lat'. Defaults to ``'y'``.\n\n Returns\n -------\n kd_idx_dic, kdtree, arr : tuple\n kd_idx_dic maps kdtree entry to node name\n kdree is the actual kdtree\n arr is the numpy array of node positions\n \"\"\"\n\n nrows = len(G_.nodes())\n ncols = 2\n kd_idx_dic = {}\n arr = np.zeros((nrows, ncols))\n # populate node array\n t1 = time.time()\n for i, n in enumerate(G_.nodes()):\n n_props = G_.nodes[n]\n if x_coord == 'lon':\n lat, lon = n_props['lat'], n_props['lon']\n x, y = lon, lat\n else:\n x, y = n_props[x_coord], n_props[y_coord]\n\n arr[i] = [x, y]\n kd_idx_dic[i] = n\n\n # now create kdtree from numpy array\n kdtree = scipy.spatial.KDTree(arr)\n if verbose:\n print(\"Time to create k-d tree:\", time.time() - t1, \"seconds\")\n return kd_idx_dic, kdtree, arr\n\n\n###############################################################################\ndef _query_kd_nearest(kdtree, kd_idx_dic, point, n_neighbors=10,\n distance_upper_bound=10000, keep_point=True):\n '''\n Query the kd-tree for neighbors\n Return nearest node names, distances, nearest node indexes\n If not keep_point, remove the origin point from the list\n '''\n\n dists_m, idxs = kdtree.query(point, k=n_neighbors,\n distance_upper_bound=distance_upper_bound)\n\n idxs_refine = list(np.asarray(idxs))\n # print(\"apls_utils.query_kd_neareast - idxs_refilne:\", idxs_refine)\n # print(\"apls_utils.query_kd_neareast - dists_m_refilne:\", dists_m)\n dists_m_refine = list(dists_m)\n node_names = [kd_idx_dic[i] for i in idxs_refine]\n\n return node_names, idxs_refine, dists_m_refine\n\n\n###############################################################################\ndef _query_kd_ball(kdtree, kd_idx_dic, point, r_meters, keep_point=True):\n '''\n Query the kd-tree for neighbors within a distance r of the point\n Return nearest node names, distances, nearest node indexes\n if not keep_point, remove the origin point from the list\n '''\n\n dists_m, idxs = kdtree.query(point, k=500, distance_upper_bound=r_meters)\n # keep only points within distance and greaater than 0?\n if not keep_point:\n f0 = np.where((dists_m <= r_meters) & (dists_m > 0))\n else:\n f0 = np.where((dists_m <= r_meters))\n idxs_refine = list(np.asarray(idxs)[f0])\n dists_m_refine = list(dists_m[f0])\n node_names = [kd_idx_dic[i] for i in idxs_refine]\n\n return node_names, idxs_refine, dists_m_refine\n\n\n###############################################################################\ndef _get_graph_extent(G_):\n '''min and max x and y'''\n xall = [G_.node[n]['x'] for n in G_.nodes()]\n yall = [G_.node[n]['y'] for n in G_.nodes()]\n xmin, xmax = np.min(xall), np.max(xall)\n ymin, ymax = np.min(yall), np.max(yall)\n dx, dy = xmax-xmin, ymax-ymin\n return xmin, xmax, ymin, ymax, dx, dy\n\n\n###############################################################################\ndef _latlon2pixel(lat, lon, input_raster='', targetsr='', geom_transform=''):\n '''\n Convert latitude, longitude coords to pixexl coords.\n From spacenet geotools\n '''\n\n sourcesr = osr.SpatialReference()\n sourcesr.ImportFromEPSG(4326)\n\n geom = ogr.Geometry(ogr.wkbPoint)\n # geom.AddPoint(lon, lat)\n geom.AddPoint(lat, lon)\n\n if targetsr == '':\n src_raster = gdal.Open(input_raster)\n targetsr = osr.SpatialReference()\n targetsr.ImportFromWkt(src_raster.GetProjectionRef())\n coord_trans = osr.CoordinateTransformation(sourcesr, targetsr)\n if geom_transform == '':\n src_raster = gdal.Open(input_raster)\n transform = src_raster.GetGeoTransform()\n else:\n transform = geom_transform\n\n x_origin = transform[0]\n # print(x_origin)\n y_origin = transform[3]\n # print(y_origin)\n pixel_width = transform[1]\n # print(pixel_width)\n pixel_height = transform[5]\n # print(pixel_height)\n geom.Transform(coord_trans)\n # print(geom.GetPoint())\n x_pix = (geom.GetPoint()[0] - x_origin) / pixel_width\n y_pix = (geom.GetPoint()[1] - y_origin) / pixel_height\n\n return (x_pix, y_pix)\n\n\n###############################################################################\ndef _wmp2pixel(x, y, input_raster='', targetsr='', geom_transform=''):\n '''\n Convert wmp coords to pixexl coords.\n '''\n\n sourcesr = osr.SpatialReference()\n sourcesr.ImportFromEPSG(3857)\n\n geom = ogr.Geometry(ogr.wkbPoint)\n geom.AddPoint(x, y)\n\n if targetsr == '':\n src_raster = gdal.Open(input_raster)\n targetsr = osr.SpatialReference()\n targetsr.ImportFromWkt(src_raster.GetProjectionRef())\n coord_trans = osr.CoordinateTransformation(sourcesr, targetsr)\n if geom_transform == '':\n src_raster = gdal.Open(input_raster)\n transform = src_raster.GetGeoTransform()\n else:\n transform = geom_transform\n\n x_origin = transform[0]\n # print(x_origin)\n y_origin = transform[3]\n # print(y_origin)\n pixel_width = transform[1]\n # print(pixel_width)\n pixel_height = transform[5]\n # print(pixel_height)\n geom.Transform(coord_trans)\n # print(geom.GetPoint())\n x_pix = (geom.GetPoint()[0] - x_origin) / pixel_width\n y_pix = (geom.GetPoint()[1] - y_origin) / pixel_height\n\n return (x_pix, y_pix)\n\n\n###############################################################################\ndef _set_pix_coords(G_, im_test_file=''):\n '''Get pixel coords. Update G_ and get control_points, and graph_coords'''\n\n if len(G_.nodes()) == 0:\n return G_, [], []\n\n control_points, cp_x, cp_y = [], [], []\n for n in G_.nodes():\n u_x, u_y = G_.nodes[n]['x'], G_.nodes[n]['y']\n control_points.append([n, u_x, u_y])\n lat, lon = G_.nodes[n]['lat'], G_.nodes[n]['lon']\n if len(im_test_file) > 0:\n pix_x, pix_y = _latlon2pixel(lat, lon, input_raster=im_test_file)\n else:\n print(\"set_pix_coords(): oops, no image file\")\n pix_x, pix_y = 0, 0\n # update G_\n G_.nodes[n]['pix_col'] = pix_x\n G_.nodes[n]['pix_row'] = pix_y\n G_.nodes[n]['x_pix'] = pix_x\n G_.nodes[n]['y_pix'] = pix_y\n # add to arrays\n cp_x.append(pix_x)\n cp_y.append(pix_y)\n # get line segements in pixel coords\n seg_endpoints = []\n for (u, v) in G_.edges():\n ux, uy = G_.nodes[u]['pix_col'], G_.nodes[u]['pix_row']\n vx, vy = G_.nodes[v]['pix_col'], G_.nodes[v]['pix_row']\n seg_endpoints.append([(ux, uy), (vx, vy)])\n gt_graph_coords = (cp_x, cp_y, seg_endpoints)\n\n return G_, control_points, gt_graph_coords\n\n\n###############################################################################\ndef convertTo8Bit(rasterImageName, outputRaster,\n outputPixType='Byte',\n outputFormat='GTiff',\n rescale_type='rescale',\n percentiles=[2, 98]):\n '''\n This does a relatively poor job of converting to 8bit, as opening in qgis\n the images look very different.\n rescale_type = [clip, rescale]\n if resceale, each band is rescaled to its own min and max\n if clip, scaling is done sctricly between 0 65535\n '''\n\n srcRaster = gdal.Open(rasterImageName)\n nbands = srcRaster.RasterCount\n if nbands == 3:\n cmd = ['gdal_translate', '-ot', outputPixType, '-of', outputFormat,\n '-co', '\"PHOTOMETRIC=rgb\"']\n else:\n cmd = ['gdal_translate', '-ot', outputPixType, '-of', outputFormat]\n\n for bandId in range(srcRaster.RasterCount):\n bandId = bandId+1\n band = srcRaster.GetRasterBand(bandId)\n if rescale_type == 'rescale':\n bmin = band.GetMinimum()\n bmax = band.GetMaximum()\n # if not exist minimum and maximum values\n if bmin is None or bmax is None:\n (bmin, bmax) = band.ComputeRasterMinMax(1)\n # else, rescale\n band_arr_tmp = band.ReadAsArray()\n bmin = np.percentile(band_arr_tmp.flatten(), percentiles[0])\n bmax = np.percentile(band_arr_tmp.flatten(), percentiles[1])\n\n else:\n bmin, bmax = 0, 65535\n\n cmd.append('-scale_{}'.format(bandId))\n cmd.append('{}'.format(bmin))\n cmd.append('{}'.format(bmax))\n cmd.append('{}'.format(0))\n cmd.append('{}'.format(255))\n\n cmd.append(rasterImageName)\n cmd.append(outputRaster)\n print(cmd)\n subprocess.call(cmd)\n\n return\n\n\n###############################################################################\n# def edit_node_props(props, new):\n# pass\n\n\n###############################################################################\ndef create_buffer_geopandas(inGDF, buffer_distance_meters=2,\n buffer_cap_style=1, dissolve_by='class',\n projectToUTM=True, verbose=False):\n \"\"\"\n Create a buffer around the lines of the geojson\n\n Arguments\n ---------\n inGDF : geodataframe\n Geodataframe from a SpaceNet geojson.\n buffer_distance_meters : float\n Width of buffer around geojson lines. Formally, this is the distance\n to each geometric object. Optional. Defaults to ``2``.\n buffer_cap_style : int\n Cap_style of buffer, see: (https://shapely.readthedocs.io/en/stable/manual.html#constructive-methods)\n Defaults to ``1`` (round).\n dissolve_by : str\n Method for differentiating rows in geodataframe, and creating unique\n mask values. Defaults to ``'class'``.\n projectToUTM : bool\n Switch to project gdf to UTM coordinates. Defaults to ``True``.\n verbose : bool\n Switch to print relevant values. Defaults to ``False``.\n\n Returns\n -------\n gdf_buffer : geopandas dataframe\n Dataframe created from geojson\n\n \"\"\"\n\n # inGDF = gpd.read_file(geoJsonFileName)\n if len(inGDF) == 0:\n return []\n\n # if we want a geojson instead of gdf for input\n # try:\n # inGDF = gpd.read_file(geoJsonFileName)\n # except:\n # return []\n\n # Transform gdf Roadlines into UTM so that Buffer makes sense\n if projectToUTM:\n tmpGDF = osmnx_funcs.project_gdf(inGDF, inGDF.crs)\n else:\n tmpGDF = inGDF\n\n if verbose:\n print(\"inGDF.columns:\", tmpGDF.columns)\n gdf_utm_buffer = tmpGDF.copy()\n\n # perform Buffer to produce polygons from Line Segments\n gdf_utm_buffer['geometry'] = tmpGDF.buffer(buffer_distance_meters,\n cap_style=buffer_cap_style)\n\n gdf_utm_dissolve = gdf_utm_buffer.dissolve(by=dissolve_by)\n gdf_utm_dissolve.crs = gdf_utm_buffer.crs\n if projectToUTM:\n gdf_buffer = gdf_utm_dissolve.to_crs(inGDF.crs)\n else:\n gdf_buffer = gdf_utm_dissolve\n if verbose:\n print(\"gdf_buffer['geometry'].values[0]:\",\n gdf_buffer['geometry'].values[0])\n\n # add the dissolve_by column back into final gdf, since it's now the index\n gdf_buffer[dissolve_by] = gdf_buffer.index.values\n\n return gdf_buffer\n\n\n###############################################################################\ndef _get_road_buffer(geoJson, im_vis_file, output_raster,\n buffer_meters=2, burnValue=1,\n # max_mask_val=1,\n buffer_cap_style=6,\n useSpacenetLabels=False,\n plot_file='', figsize=(11, 3), fontsize=6,\n dpi=800, show_plot=False,\n valid_road_types=set([]), verbose=False):\n '''\n Wrapper around create_buffer_geopandas(), with plots\n Get buffer around roads defined by geojson and image files\n valid_road_types serves as a filter of valid types (no filter if len==0)\n https://wiki.openstreetmap.org/wiki/Key:highway\n valid_road_types = set(['motorway', 'trunk', 'primary', 'secondary',\n 'tertiary',\n 'motorway_link', 'trunk_link', 'primary_link',\n 'secondary_link', 'tertiary_link',\n 'unclassified', 'residential', 'service' ])\n '''\n\n # get buffer\n\n # filter out roads of the wrong type\n try:\n inGDF_raw = gpd.read_file(geoJson)\n except:\n mask_gray = np.zeros(cv2.imread(im_vis_file, 0).shape)\n cv2.imwrite(output_raster, mask_gray)\n return [], []\n\n if useSpacenetLabels:\n inGDF = inGDF_raw\n # use try/except to handle empty label files\n try:\n inGDF['type'] = inGDF['road_type'].values\n inGDF['class'] = 'highway'\n inGDF['highway'] = 'highway'\n except:\n pass\n\n else:\n # filter out roads of the wrong type\n if (len(valid_road_types) > 0) and (len(inGDF_raw) > 0):\n if 'highway' in inGDF_raw.columns:\n inGDF = inGDF_raw[inGDF_raw['highway'].isin(valid_road_types)]\n # set type tag\n inGDF['type'] = inGDF['highway'].values\n inGDF['class'] = 'highway'\n else:\n inGDF = inGDF_raw[inGDF_raw['type'].isin(valid_road_types)]\n # set highway tag\n inGDF['highway'] = inGDF['type'].values\n\n if verbose:\n print(\"gdf.type:\", inGDF['type'])\n if len(inGDF) != len(inGDF_raw):\n print(\"len(inGDF), len(inGDF_raw)\",\n len(inGDF), len(inGDF_raw))\n print(\"gdf['type']:\", inGDF['type'])\n else:\n inGDF = inGDF_raw\n try:\n inGDF['type'] = inGDF['highway'].values\n inGDF['class'] = 'highway'\n except:\n pass\n\n gdf_buffer = create_buffer_geopandas(inGDF,\n buffer_distance_meters=buffer_meters,\n buffer_cap_style=buffer_cap_style,\n dissolve_by='class',\n projectToUTM=True)\n\n # make sure gdf is not null\n if len(gdf_buffer) == 0:\n mask_gray = np.zeros(cv2.imread(im_vis_file, 0).shape)\n cv2.imwrite(output_raster, mask_gray)\n # create label image\n else:\n gdf_to_array(gdf_buffer, im_vis_file, output_raster,\n burnValue=burnValue)\n # load mask\n mask_gray = cv2.imread(output_raster, 0)\n # mask_gray = np.clip(mask_gray, 0, max_mask_val)\n\n if plot_file:\n\n fig, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4, figsize=figsize)\n\n # road lines\n try:\n gdfRoadLines = gpd.read_file(geoJson)\n gdfRoadLines.plot(ax=ax0, marker='o', color='red')\n except:\n ax0.imshow(mask_gray)\n ax0.axis('off')\n ax0.set_aspect('equal')\n ax0.set_title('Unfiltered Roads from GeoJson', fontsize=fontsize)\n\n # first show raw image\n im_vis = cv2.imread(im_vis_file, 1)\n img_mpl = cv2.cvtColor(im_vis, cv2.COLOR_BGR2RGB)\n ax1.imshow(img_mpl)\n ax1.axis('off')\n ax1.set_title('Raw Image', fontsize=fontsize)\n\n # plot mask\n ax2.imshow(mask_gray)\n ax2.axis('off')\n ax2.set_title('Roads Mask (' + str(np.round(buffer_meters))\n + ' meter buffer)', fontsize=fontsize)\n\n # plot combined\n ax3.imshow(img_mpl)\n # overlay mask\n # set zeros to nan\n z = mask_gray.astype(float)\n z[z == 0] = np.nan\n # change palette to orange\n palette = plt.cm.gray\n palette.set_over('orange', 1.0)\n ax3.imshow(z, cmap=palette, alpha=0.4,\n norm=matplotlib.colors.Normalize(vmin=0.5, vmax=0.9, clip=False))\n ax3.set_title('Raw Image + Buffered Roads', fontsize=fontsize)\n ax3.axis('off')\n\n #plt.axes().set_aspect('equal', 'datalim')\n\n # plt.tight_layout()\n plt.savefig(plot_file, dpi=dpi)\n if not show_plot:\n plt.close()\n\n return mask_gray, gdf_buffer\n\n\n##############################################################################\ndef gdf_to_array(gdf, im_file, output_raster, burnValue=150,\n mask_burn_val_key='', compress=True, NoData_value=0,\n verbose=False):\n \"\"\"\n Create buffer around geojson for desired geojson feature, save as mask\n\n Notes\n -----\n https://gis.stackexchange.com/questions/260736/how-to-burn-a-different-value-for-each-polygon-in-a-json-file-using-gdal-rasteri/260737\n\n\n Arguments\n ---------\n image_path : gdf\n Input geojson\n im_file : str\n Path to image file corresponding to gdf.\n output_raster : str\n Output path of saved mask (should end in .tif).\n burnValue : int\n Value to burn to mask. Superceded by mask_burn_val_key.\n Defaults to ``150``.\n mask_burn_val_key : str\n Column name in gdf to use for mask burning. Supercedes burnValue.\n Defaults to ``''`` (in which case burnValue is used).\n compress : bool\n Switch to compress output raster. Defaults to ``True``.\n NoData_value : int\n Value to assign array if no data exists. If this value is <0\n (e.g. -9999), a null value will show in the image. Defaults to ``0``.\n verbose : bool\n Switch to print relevant values. Defaults to ``False``.\n\n Returns\n -------\n None\n \"\"\"\n\n gdata = gdal.Open(im_file)\n\n # set target info\n if compress:\n target_ds = gdal.GetDriverByName('GTiff').Create(output_raster,\n gdata.RasterXSize,\n gdata.RasterYSize, 1,\n gdal.GDT_Byte,\n ['COMPRESS=LZW'])\n else:\n target_ds = gdal.GetDriverByName('GTiff').Create(output_raster,\n gdata.RasterXSize,\n gdata.RasterYSize, 1,\n gdal.GDT_Byte)\n\n target_ds.SetGeoTransform(gdata.GetGeoTransform())\n if verbose:\n print(\"gdata.GetGeoTransform():\", gdata.GetGeoTransform())\n\n # set raster info\n raster_srs = osr.SpatialReference()\n raster_srs.ImportFromWkt(gdata.GetProjectionRef())\n target_ds.SetProjection(raster_srs.ExportToWkt())\n if verbose:\n print(\"target_ds:\", target_ds)\n\n band = target_ds.GetRasterBand(1)\n band.SetNoDataValue(NoData_value)\n\n outdriver = ogr.GetDriverByName('MEMORY')\n outDataSource = outdriver.CreateDataSource('memData')\n tmp = outdriver.Open('memData', 1)\n outLayer = outDataSource.CreateLayer(\"states_extent\", raster_srs,\n geom_type=ogr.wkbMultiPolygon)\n # burn\n burnField = \"burn\"\n idField = ogr.FieldDefn(burnField, ogr.OFTInteger)\n outLayer.CreateField(idField)\n featureDefn = outLayer.GetLayerDefn()\n for j, geomShape in enumerate(gdf['geometry'].values):\n if verbose:\n print(j, \"geomshape:\", geomShape)\n outFeature = ogr.Feature(featureDefn)\n outFeature.SetGeometry(ogr.CreateGeometryFromWkt(geomShape.wkt))\n if len(mask_burn_val_key) > 0:\n burnVal = int(gdf[mask_burn_val_key].values[j])\n if verbose:\n print(\"burnVal:\", burnVal)\n else:\n burnVal = burnValue\n outFeature.SetField(burnField, burnVal)\n outLayer.CreateFeature(outFeature)\n # if verbose:\n # print (\"outFeature:\", outFeature)\n outFeature = 0\n\n if len(mask_burn_val_key) > 0:\n gdal.RasterizeLayer(target_ds, [1], outLayer,\n options=[\"ATTRIBUTE=%s\" % burnField])\n else:\n gdal.RasterizeLayer(target_ds, [1], outLayer, burn_values=[burnVal])\n\n outLayer = 0\n outDatSource = 0\n tmp = 0\n return\n\n\n###############################################################################\ndef geojson_to_arr(image_path, geojson_path, mask_path_out_gray,\n buffer_distance_meters=2, buffer_cap_style=1,\n dissolve_by='speed_mph', mask_burn_val_key='burnValue',\n min_burn_val=0, max_burn_val=255,\n verbose=False):\n \"\"\"\n Create buffer around geojson for desired geojson feature, save as mask\n\n Arguments\n ---------\n image_path : str\n Path to input image corresponding to the geojson file.\n geojson_path : str\n Path to geojson file.\n mask_path_out_gray : str\n Output path of saved mask (should end in .tif).\n buffer_distance_meters : float\n Width of buffer around geojson lines. Formally, this is the distance\n to each geometric object. Optional. Defaults to ``2``.\n buffer_cap_style : int\n Cap_style of buffer, see: (https://shapely.readthedocs.io/en/stable/manual.html#constructive-methods)\n Defaults to ``1`` (round).\n dissolve_by : str\n Method for differentiating rows in geodataframe, and creating unique\n mask values. Defaults to ``'speed_m/s'``.\n mask_burn_value : str\n Column to name burn value in geodataframe. Defaults to ``'burnValue'``.\n min_burn_val : int\n Minimum value to burn to mask. Rescale all values linearly with this\n minimum value. If <= 0, ignore. Defaultst to ``0``.\n max_burn_val : int\n Maximum value to burn to mask. Rescale all values linearly with this\n maxiumum value. If <= 0, ignore. Defaultst to ``256``.\n verbose : bool\n Switch to print relevant values. Defaults to ``False``.\n\n Returns\n -------\n gdf_buffer : geopandas dataframe\n Dataframe created from geojson\n \"\"\"\n\n # get gdf_buffer\n try:\n inGDF = gpd.read_file(geojson_path)\n except TypeError:\n print(\"Empty mask for path:\", geojson_path)\n # create emty mask\n h, w = cv2.imread(image_path, 0).shape[:2]\n mask_gray = np.zeros((h, w)).astype(np.uint8)\n skimage.io.imsave(mask_path_out_gray, mask_gray)\n # cv2.imwrite(mask_path_out, mask_gray)\n return []\n\n gdf_buffer = create_buffer_geopandas(\n inGDF, buffer_distance_meters=buffer_distance_meters,\n buffer_cap_style=buffer_cap_style, dissolve_by=dissolve_by,\n projectToUTM=False, verbose=verbose)\n\n if verbose:\n print(\"gdf_buffer.columns:\", gdf_buffer.columns)\n print(\"gdf_buffer:\", gdf_buffer)\n\n # set burn values\n burn_vals_raw = gdf_buffer[dissolve_by].values.astype(float)\n if verbose:\n print(\"burn_vals_raw:\", burn_vals_raw)\n if (max_burn_val > 0) and (min_burn_val >= 0):\n scale_mult = (max_burn_val - min_burn_val) / np.max(burn_vals_raw)\n # scale_mult = max_burn_val / np.max(burn_vals_raw)\n burn_vals = min_burn_val + scale_mult * burn_vals_raw\n else:\n burn_vals = burn_vals_raw\n if verbose:\n print(\"np.unique burn_vals:\", np.sort(np.unique(burn_vals)))\n gdf_buffer[mask_burn_val_key] = burn_vals\n\n # create mask\n gdf_to_array(gdf_buffer, image_path, mask_path_out_gray,\n mask_burn_val_key=mask_burn_val_key,\n verbose=verbose)\n\n return gdf_buffer\n\n\n###############################################################################\ndef _create_speed_arr(image_path, geojson_path, mask_path_out_gray,\n bin_conversion_func, mask_burn_val_key='burnValue',\n buffer_distance_meters=2, buffer_cap_style=1,\n dissolve_by='speed_m/s', bin_conversion_key='speed_mph',\n verbose=False):\n '''\n Similar to create_arr_from_geojson()\n Create buffer around geojson for speeds, use bin_conversion_func to\n assign values to the mask\n '''\n\n # get gdf_buffer\n try:\n inGDF = gpd.read_file(geojson_path)\n except:\n print(\"Empty mask for path:\", geojson_path)\n # create emty mask\n h, w = cv2.imread(image_path, 0).shape[:2]\n mask_gray = np.zeros((h, w)).astype(np.uint8)\n skimage.io.imsave(mask_path_out_gray, mask_gray)\n # cv2.imwrite(mask_path_out, mask_gray)\n return []\n\n gdf_buffer = create_buffer_geopandas(\n inGDF, buffer_distance_meters=buffer_distance_meters,\n buffer_cap_style=buffer_cap_style, dissolve_by=dissolve_by,\n projectToUTM=True, verbose=verbose)\n\n # set burn values\n speed_arr = gdf_buffer[bin_conversion_key].values\n burnVals = [bin_conversion_func(s) for s in speed_arr]\n gdf_buffer[mask_burn_val_key] = burnVals\n\n # create mask\n gdf_to_array(gdf_buffer, image_path, mask_path_out_gray,\n mask_burn_val_key=mask_burn_val_key, verbose=verbose)\n\n return gdf_buffer\n\n\n###############################################################################\ndef create_speed_gdf_v0(image_path, geojson_path, mask_path_out_gray,\n bin_conversion_func, mask_burn_val_key='burnValue',\n buffer_distance_meters=2, buffer_cap_style=1,\n dissolve_by='speed_m/s', bin_conversion_key='speed_mph',\n verbose=False):\n '''\n Create buffer around geojson for speeds, use bin_conversion_func to\n assign values to the mask\n '''\n\n # get gdf_buffer\n try:\n inGDF = gpd.read_file(geojson_path)\n except:\n print(\"Empty mask for path:\", geojson_path)\n # create emty mask\n h, w = cv2.imread(image_path, 0).shape[:2]\n mask_gray = np.zeros((h, w)).astype(np.uint8)\n skimage.io.imsave(mask_path_out_gray, mask_gray)\n # cv2.imwrite(mask_path_out, mask_gray)\n return []\n\n # project\n projGDF = osmnx_funcs.project_gdf(inGDF)\n if verbose:\n print(\"inGDF.columns:\", inGDF.columns)\n\n gdf_utm_buffer = projGDF.copy()\n # perform Buffer to produce polygons from Line Segments\n gdf_utm_buffer['geometry'] = gdf_utm_buffer.buffer(buffer_distance_meters,\n buffer_cap_style)\n gdf_utm_dissolve = gdf_utm_buffer.dissolve(by=dissolve_by)\n gdf_utm_dissolve.crs = gdf_utm_buffer.crs\n gdf_buffer = gdf_utm_dissolve.to_crs(inGDF.crs)\n if verbose:\n print(\"gdf_buffer['geometry'].values[0]:\",\n gdf_buffer['geometry'].values[0])\n\n # set burn values\n speed_arr = gdf_buffer[bin_conversion_key].values\n burnVals = [bin_conversion_func(s) for s in speed_arr]\n gdf_buffer[mask_burn_val_key] = burnVals\n\n # create mask\n gdf_to_array(gdf_buffer, image_path, mask_path_out_gray,\n mask_burn_val_key=mask_burn_val_key, verbose=verbose)\n\n return gdf_buffer\n\n\n###############################################################################\ndef convert_array_to_multichannel(in_arr, n_channels=7, burnValue=255,\n append_total_band=False, verbose=False):\n '''Take input array with multiple values, and make each value a unique\n channel. Assume a zero value is background, while value of 1 is the \n first channel, 2 the second channel, etc.'''\n\n h, w = in_arr.shape[:2]\n # scikit image wants it in this format by default\n out_arr = np.zeros((n_channels, h, w), dtype=np.uint8)\n #out_arr = np.zeros((h,w,n_channels), dtype=np.uint8)\n\n for band in range(n_channels):\n val = band + 1\n band_out = np.zeros((h, w), dtype=np.uint8)\n if verbose:\n print(\"band:\", band)\n band_arr_bool = np.where(in_arr == val)\n band_out[band_arr_bool] = burnValue\n out_arr[band, :, :] = band_out\n #out_arr[:,:,band] = band_out\n\n if append_total_band:\n tot_band = np.zeros((h, w), dtype=np.uint8)\n band_arr_bool = np.where(in_arr > 0)\n tot_band[band_arr_bool] = burnValue\n tot_band = tot_band.reshape(1, h, w)\n out_arr = np.concatenate((out_arr, tot_band), axis=0).astype(np.uint8)\n\n if verbose:\n print(\"out_arr.shape:\", out_arr.shape)\n return out_arr\n\n\n# Helper Functions\n###############################################################################\ndef CreateMultiBandGeoTiff(OutPath, Array):\n '''\n Author: Jake Shermeyer\n Array has shape:\n Channels, Y, X?\n '''\n driver = gdal.GetDriverByName('GTiff')\n DataSet = driver.Create(OutPath, Array.shape[2], Array.shape[1],\n Array.shape[0], gdal.GDT_Byte,\n ['COMPRESS=LZW'])\n for i, image in enumerate(Array, 1):\n DataSet.GetRasterBand(i).WriteArray(image)\n del DataSet\n\n return OutPath\n\n\n###############################################################################\ndef geomGeo2geomPixel(geom, affineObject=[], input_raster='',\n gdal_geomTransform=[]):\n '''spacenet utilities v3 geotools.py'''\n # This function transforms a shapely geometry in geospatial coordinates into pixel coordinates\n # geom must be shapely geometry\n # affineObject = rasterio.open(input_raster).affine\n # gdal_geomTransform = gdal.Open(input_raster).GetGeoTransform()\n # input_raster is path to raster to gather georectifcation information\n if not affineObject:\n if input_raster != '':\n affineObject = rio.open(input_raster).transform\n elif gdal_geomTransform != []:\n affineObject = af.Affine.from_gdal(gdal_geomTransform)\n else:\n return geom\n\n affineObjectInv = ~affineObject\n\n geomTransform = shapely.affinity.affine_transform(geom,\n [affineObjectInv.a,\n affineObjectInv.b,\n affineObjectInv.d,\n affineObjectInv.e,\n affineObjectInv.xoff,\n affineObjectInv.yoff]\n )\n\n return geomTransform\n\n\n###############################################################################\ndef geomPixel2geomGeo(geom, affineObject=[], input_raster='', gdal_geomTransform=[]):\n '''spacenet utilities v3 geotools.py'''\n # This function transforms a shapely geometry in pixel coordinates into geospatial coordinates\n # geom must be shapely geometry\n # affineObject = rasterio.open(input_raster).affine\n # gdal_geomTransform = gdal.Open(input_raster).GetGeoTransform()\n # input_raster is path to raster to gather georectifcation information\n if not affineObject:\n if input_raster != '':\n affineObject = rio.open(input_raster).transform\n elif gdal_geomTransform != []:\n affineObject = af.Affine.from_gdal(gdal_geomTransform)\n else:\n return geom\n\n geomTransform = shapely.affinity.affine_transform(geom,\n [affineObject.a,\n affineObject.b,\n affineObject.d,\n affineObject.e,\n affineObject.xoff,\n affineObject.yoff]\n )\n\n return geomTransform\n\n\n###############################################################################\n# Haversine formula example in Python\n# Author: Wayne Dyck\n# def distance_haversine(lat1, lon1, lat2, lon2, earth_radius_km=6371):\n# #lat1, lon1 = origin\n# #lat2, lon2 = destination\n#\n# dlat = math.radians(lat2-lat1)\n# dlon = math.radians(lon2-lon1)\n# a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\n# * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\n# c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n# d = earth_radius_km * c\n#\n# return d\n\n\n###############################################################################\ndef _haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points in m\n on the earth (specified in decimal degrees)\n http://stackoverflow.com/questions/15736995/how-can-i-\n quickly-estimate-the-distance-between-two-latitude-longitude-points\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = list(map(radians, [lon1, lat1, lon2, lat2]))\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n m = 1000. * km\n return m\n\n###############################################################################\n\n\ndef get_gsd(im_test_file):\n '''return gsd in meters'''\n srcImage = gdal.Open(im_test_file)\n geoTrans = srcImage.GetGeoTransform()\n ulX = geoTrans[0]\n ulY = geoTrans[3]\n # xDist = geoTrans[1]\n yDist = geoTrans[5]\n # rtnX = geoTrans[2]\n # rtnY = geoTrans[4]\n\n # get haversine distance\n # dx = _haversine(ulX, ulY, ulX+xDist, ulY) #haversine(lon1, lat1, lon2, lat2)\n # haversine(lon1, lat1, lon2, lat2)\n dy = _haversine(ulX, ulY, ulX, ulY+yDist)\n\n return dy # dx\n\n\n###############################################################################\ndef get_extent(srcFileImage):\n gdata = gdal.Open(srcFileImage)\n geo = gdata.GetGeoTransform()\n # data = gdata.ReadAsArray()\n\n xres = geo[1]\n yres = geo[5]\n # xmin = geo[0]\n # xmax = geo[0] + (xres * gdata.RasterXSize)\n # ymin = geo[3] + (yres * gdata.RasterYSize)\n # ymax = geo[3]\n xmin = geo[0] + xres * 0.5\n xmax = geo[0] + (xres * gdata.RasterXSize) - xres * 0.5\n ymin = geo[3] + (yres * gdata.RasterYSize) + yres * 0.5\n ymax = geo[3] - yres * 0.5\n\n return xmin, ymin, xmax, ymax\n\n\n###############################################################################\ndef get_pixel_dist_from_meters(im_test_file, len_meters):\n '''For the input image, we want a buffer or other distance in meters,\n this function determines the pixel distance by calculating the GSD'''\n gsd = get_gsd(im_test_file)\n pix_width = max(1, np.rint(len_meters/gsd))\n\n return gsd, pix_width\n\n\n###############################################################################\ndef get_unique(seq, idfun=None):\n '''https://www.peterbe.com/plog/uniqifiers-benchmark'''\n # order preserving\n if idfun is None:\n def idfun(x): return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n # in old Python versions:\n # if seen.has_key(marker)\n # but in new ones:\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n return result\n\n\n###############################################################################\ndef _get_node_positions(G_, x_coord='x', y_coord='y'):\n '''Get position array for all nodes'''\n nrows = len(G_.nodes())\n ncols = 2\n arr = np.zeros((nrows, ncols))\n # populate node array\n for i, n in enumerate(G_.nodes()):\n n_props = G_.node[n]\n x, y = n_props[x_coord], n_props[y_coord]\n arr[i] = [x, y]\n return arr\n" ]
[ [ "numpy.rint", "numpy.zeros", "matplotlib.pyplot.savefig", "numpy.concatenate", "numpy.asarray", "matplotlib.pyplot.subplots", "numpy.max", "numpy.min", "matplotlib.pyplot.close", "numpy.round", "numpy.where", "numpy.unique" ] ]
allydunham/sequence_unet
[ "e0d3d6b73ad79c596130ed6e1a58b41a4ad7e299" ]
[ "models/classifier/regularisation.py" ]
[ "\"\"\"\nExperiment testing various regularisations on the Sequence UNET model\n\"\"\"\nimport os\nimport sys\n\nimport utils\nfrom tensorflow.keras import optimizers\n\nfrom proteinnetpy.data import ProteinNetDataset, ProteinNetMap\nfrom proteinnetpy.data import make_length_filter\n\nimport metrics\nimport pn_maps\nfrom seq_unet import sequence_unet\n\ndef load_data(validation=False):\n \"\"\"\n Input data for PSSM top model\n \"\"\"\n if validation:\n pn_path = 'data/proteinnet/casp12/validation'\n else:\n pn_path = 'data/proteinnet/casp12/training_95'\n\n filter_func = make_length_filter(min_length=32, max_length=2000)\n data = ProteinNetDataset(path=pn_path, preload=False, filter_func=filter_func)\n func = pn_maps.SequenceUNETMapFunction(num_layers=6, threshold=0.01)\n return ProteinNetMap(data, func=func, static=True, filter_errors=True)\n\ndef main():\n \"\"\"Main script\"\"\"\n root = 'models/classifier/regularisation'\n if not os.path.isdir(root):\n os.mkdir(root)\n\n # dropout, kernel, batch\n regularisation = (\n (0, None, False),\n (0.05, None, False),\n (0.1, None, False),\n (0, \"l2\", False),\n (0, None, True),\n (0.05, \"l2\", False),\n (0.05, None, True),\n (0.05, \"l2\", True),\n (0, \"l2\", True),\n )\n\n for dropout, kernel, batch in regularisation:\n model_dir = f\"{root}/d{dropout}_{kernel}_{batch}\"\n\n if os.path.isdir(model_dir):\n print(f\"Model {model_dir} already exists, skipping\", file=sys.stderr)\n continue\n\n model = sequence_unet(filters=32, kernel_size=5, num_layers=6, dropout=dropout,\n kernel_regulariser=kernel, batch_normalisation=batch)\n\n optimiser = optimizers.Adam(lr=0.01, epsilon=0.01)\n loss = metrics.masked_binary_crossentropy\n acc = metrics.masked_accuracy\n model.compile(optimizer=optimiser, loss=loss, metrics=[acc])\n\n # Create sample train script\n command = utils.model_bsub(f\"reg_d{dropout}_{kernel}_{batch}\", model_dir,\n ram=10000, epochs=150, validation_epochs=1,\n checkpoint=None, big_job=True, save_format='tf')\n\n # Use this to setup a model directory for the experiment(s)\n utils.make_experiment_dir(model, model_dir, load_data, command, save_format='tf')\n\nif __name__ == \"__main__\":\n # No argparse as these scripts serve as the config for experiments\n main()\n" ]
[ [ "tensorflow.keras.optimizers.Adam" ] ]
eragasa/pypospack
[ "21cdecaf3b05c87acc532d992be2c04d85bfbc22" ]
[ "tests/pyposmat/visualization/Pyposmat3DScatterWithProjections/dev__contours.py" ]
[ "from mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt,numpy as np\nplt.clf()\nfig = plt.figure(1)\nax = fig.gca(projection='3d')\nX, Y, Z = axes3d.get_test_data(0.05)\nax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)\ncset = ax.contourf(X, Y, Z, zdir='z', offset=-100,\n levels=np.linspace(-100,100,1200),cmap=plt.cm.jet)\ncset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=plt.cm.jet)\ncset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=plt.cm.jet)\nax.set_xlabel('X')\nax.set_xlim(-40, 40)\nax.set_ylabel('Y')\nax.set_ylim(-40, 40)\nax.set_zlabel('Z')\nax.set_zlim(-100, 100) \n\nfig.savefig('withcontours.eps')\n\n" ]
[ [ "matplotlib.pyplot.figure", "numpy.linspace", "matplotlib.pyplot.clf" ] ]
eugene-yang/libact
[ "d86b7b850560138defb7be51986bfafd3d45f81b" ]
[ "libact/query_strategies/multiclass/hierarchical_sampling.py" ]
[ "\"\"\" Hierarchical Sampling for Active Learning (HS)\n\nThis module contains a class that implements Hierarchical Sampling for Active\nLearning (HS).\n\n\"\"\"\nfrom __future__ import division\n\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering\n\nfrom libact.base.interfaces import QueryStrategy\nfrom libact.utils import inherit_docstring_from, seed_random_state, zip\n\nNO_NODE = -1\nNO_LABEL = -1\n\n\nclass HierarchicalSampling(QueryStrategy):\n\n \"\"\"Hierarchical Sampling for Active Learning (HS)\n\n HS is an active learning scheme that exploits cluster structure in data.\n The original C++ implementation by the authors can be found at:\n http://www.cs.columbia.edu/~djhsu/code/HS.tar.gz\n\n Parameters\n ----------\n classes: list\n List of distinct classes in data.\n\n active_selecting: {True, False}, optional (default=True)\n False (random selecting): sample weight of a pruning is its number of\n unseen leaves.\n True (active selecting): sample weight of a pruning is its weighted\n error bound.\n\n subsample_qs: {:py:class:`libact.base.interfaces.query_strategies`, None}, optional (default=None)\n Subsample query strategy used to sample a node in the selected pruning.\n RandomSampling is used if None.\n\n random_state : {int, np.random.RandomState instance, None}, optional (default=None)\n If int or None, random_state is passed as parameter to generate\n np.random.RandomState instance. if np.random.RandomState instance,\n random_state is the random number generate.\n\n Attributes\n ----------\n m : int\n number of nodes\n\n classes: list\n List of distinct classes in data.\n\n n : int\n number of leaf nodes\n\n num_class : int\n number of classes\n\n parent : np.array instance, shape = (m)\n parent indices\n\n left_child : np.array instance, shape = (m)\n left child indices\n\n right_child : np.array instance, shape = (m)\n right child indices\n\n size : np.array instance, shape = (m)\n number of leaves in subtree\n\n depth : np.array instance, shape = (m)\n maximum depth in subtree\n\n count : np.array instance, shape = (m, num_class)\n node class label counts\n\n total : np.array instance, shape = (m)\n total node class labels seen (total[i] = Sum_j count[i][j])\n\n lower_bound : np.array instance, shape = (m, num_class)\n upper bounds on true node class label counts\n\n upper_bound : np.array instance, shape = (m, num_class)\n lower bounds on true node class label counts\n\n admissible: np.array instance, shape = (m, num_class)\n flag indicating if (node,label) is admissible\n\n best_label: np.array instance, shape = (m)\n best admissible label\n\n random_states\\_ : np.random.RandomState instance\n The random number generator using.\n\n\n Examples\n --------\n Here is an example of declaring a HierarchicalSampling query_strategy\n object:\n\n .. code-block:: python\n\n from libact.query_strategies import UncertaintySampling\n from libact.query_strategies.multiclass import HierarchicalSampling\n\n sub_qs = UncertaintySampling(\n dataset, method='sm', model=SVM(decision_function_shape='ovr'))\n\n qs = HierarchicalSampling(\n dataset, # Dataset object\n dataset.get_num_of_labels(),\n active_selecting=True,\n subsample_qs=sub_qs\n )\n\n\n References\n ----------\n\n .. [1] Sanjoy Dasgupta and Daniel Hsu. \"Hierarchical sampling for active\n learning.\" ICML 2008.\n \"\"\"\n\n def __init__(self, dataset, classes, active_selecting=True,\n subsample_qs=None, random_state=None):\n super(HierarchicalSampling, self).__init__(dataset)\n X = np.array(next(zip(*self.dataset.get_entries())))\n cluster = AgglomerativeClustering()\n cluster.fit(X)\n childrens = cluster.children_\n\n if subsample_qs is not None:\n if not isinstance(subsample_qs, QueryStrategy):\n raise TypeError(\"subsample_qs has to be a QueryStrategy\")\n self.sub_qs = subsample_qs\n else:\n self.sub_qs = None\n\n self.active_selecting = active_selecting\n self.random_state_ = seed_random_state(random_state)\n self.n = len(childrens) + 1\n self.m = self.n * 2 - 1\n self.num_class = len(classes)\n self.classes = list(classes)\n self.class_id = dict(zip(self.classes, range(self.num_class)))\n\n self.parent = np.full(self.m, NO_NODE, dtype=int)\n self.size = np.zeros(self.m, dtype=int)\n self.depth = np.zeros(self.m, dtype=int)\n for i, (left_child, right_child) in enumerate(childrens):\n parent = i + self.n\n self.parent[left_child] = parent\n self.parent[right_child] = parent\n self.left_child = np.concatenate([np.full(self.n, NO_NODE), childrens[:,0]]).astype(int)\n self.right_child = np.concatenate([np.full(self.n, NO_NODE), childrens[:,1]]).astype(int)\n\n for i in range(self.n):\n node = i\n cur_depth = 0\n while node != NO_NODE:\n assert node >= 0 and node < self.m\n self.size[node] += 1\n self.depth[node] = max(self.depth[node], cur_depth)\n cur_depth += 1\n node = self.parent[node]\n\n self.count = np.zeros((self.m, self.num_class), dtype=int)\n self.total = np.zeros(self.m, dtype=int)\n self.upper_bound = np.ones((self.m, self.num_class), dtype=float)\n self.lower_bound = np.zeros((self.m, self.num_class), dtype=float)\n self.admissible = np.zeros((self.m, self.num_class), dtype=bool)\n self.best_label = np.full(self.m, NO_LABEL, dtype=int)\n self.split = np.zeros(self.m, dtype=bool)\n self.cost = self.size.copy()\n\n self.prunings = [self.m-1]\n\n for i, entry in enumerate(self.dataset.data):\n if entry[1] != None:\n self.update(i, entry[1])\n\n @inherit_docstring_from(QueryStrategy)\n def update(self, entry_id, label):\n if label not in self.class_id:\n raise ValueError(\n 'Unknown class of entry %d: %s, expected: %s' %\n (entry_id, label, list(self.class_id.keys()))\n )\n class_id = self.class_id[label]\n root_pruning = self._find_root_pruning(entry_id)\n self._update(entry_id, class_id, root_pruning)\n self._prune_node(root_pruning)\n\n @inherit_docstring_from(QueryStrategy)\n def make_query(self):\n pruning = self._select_pruning()\n if self.sub_qs is None:\n ask_id = int(self._sample_node(pruning))\n else:\n _, scores = self.sub_qs.make_query(return_score=True)\n leaves = set(self._find_leaves(pruning))\n leaf_scores = [(score, node) for node, score in scores if node in leaves]\n ask_id = max(leaf_scores)[1]\n return ask_id\n\n def report_entry_label(self, entry_id):\n \"\"\"\n Return the best label of the asked entry.\n\n Parameters\n ----------\n entry_id : int\n The index of the sample to ask.\n\n Returns\n -------\n label: object\n The best label of the given sample.\n \"\"\"\n\n pruning = self._find_root_pruning(entry_id)\n return self.classes[self._best_label(pruning)]\n\n def report_all_label(self):\n \"\"\"\n Return the best label of the asked entry.\n\n Parameters\n ----------\n\n Returns\n -------\n labels: list of object, shape=(m)\n The best label of all samples.\n \"\"\"\n\n labels = np.empty(len(self.dataset), dtype=int)\n for pruning in self.prunings:\n best_label = self._best_label(pruning)\n leaves = self._find_leaves(pruning)\n labels[leaves] = best_label\n return labels\n\n def _best_label(self, pruning):\n if self.best_label[pruning] != NO_LABEL:\n return self.best_label[pruning]\n if self.parent[pruning] != NO_NODE:\n return self.best_label[self.parent[pruning]]\n return 0 # default label is 0 if no admissble label for root\n\n def _find_root_pruning(self, entry_id):\n node = entry_id\n while node != NO_NODE and node not in self.prunings:\n node = self.parent[node]\n return node\n\n def _find_leaves(self, node):\n if node == NO_NODE:\n return []\n if self.size[node] == 1:\n return [node]\n return (self._find_leaves(self.left_child[node]) +\n self._find_leaves(self.right_child[node]))\n\n def _select_pruning(self):\n if self.active_selecting:\n sample_weight = []\n for pruning in self.prunings:\n best_label = self.best_label[pruning]\n if best_label == NO_LABEL:\n w = self.size[pruning]\n else:\n w = self.size[pruning] - self.lower_bound[pruning][best_label]\n sample_weight.append(w)\n else:\n sample_weight = self.size[self.prunings] - self.total[self.prunings]\n sample_weight = sample_weight / sum(sample_weight)\n return self.random_state_.choice(self.prunings, p=sample_weight)\n\n def _sample_node(self, node):\n num_unseen_leaves = self.size[node] - self.total[node]\n if num_unseen_leaves == 0:\n return NO_NODE\n if self.size[node] == 1:\n return node\n assert self.left_child[node] != NO_NODE and self.right_child[node] != NO_NODE\n p_left = (self.size[self.left_child[node]] - self.total[self.left_child[node]]) / num_unseen_leaves\n if self.random_state_.rand() < p_left:\n return self._sample_node(self.left_child[node])\n else:\n return self._sample_node(self.right_child[node])\n\n def _update(self, entry_id, label, root_pruning):\n node = entry_id\n while node != NO_NODE:\n self.count[node, label] += 1\n self.total[node] += 1\n assert self.total[node] <= self.size[node]\n\n for l in range(self.num_class):\n frac = self.count[node, l] / self.total[node]\n delta = self._get_delta(frac, node)\n mean = frac * self.size[node]\n err = delta * self.size[node]\n self.lower_bound[node][l] = max(self.count[node][l], mean - err)\n self.upper_bound[node][l] = min(self.size[node] - (self.total[node] - self.count[node, l]), mean + err)\n\n max_count = 0\n for l in range(self.num_class):\n self.admissible[node, l] = True\n for k in range(self.num_class):\n if l != k and self.lower_bound[node, l] <= 2 * self.upper_bound[node, k] - self.size[node]:\n self.admissible[node, l] = False\n if self.admissible[node, l] and self.count[node, l] > max_count:\n max_count = self.count[node, l]\n self.best_label[node] = l\n\n if self.best_label[node] != NO_LABEL:\n basic_cost = self.size[node] - self.lower_bound[node][self.best_label[node]]\n else:\n basic_cost = self.size[node]\n\n if self.size[node] == 1:\n self.cost[node] = basic_cost\n else:\n split_cost = self.cost[self.left_child[node]] + self.cost[self.right_child[node]]\n if split_cost < basic_cost and self.best_label[node] != NO_LABEL:\n self.cost[node] = split_cost\n self.split[node] = True\n else:\n self.cost[node] = basic_cost\n\n if node != root_pruning:\n node = self.parent[node]\n else:\n break\n\n def _prune_node(self, root_pruning):\n self.prunings.remove(root_pruning)\n node_set = [root_pruning]\n while len(node_set) > 0:\n node = node_set.pop()\n if self.split[node]:\n node_set.append(self.left_child[node])\n node_set.append(self.right_child[node])\n else:\n self.prunings.append(node)\n\n def _get_delta(self, frac, node):\n fs_corr = 1.0 - self.total[node] / self.size[node]\n return fs_corr / self.total[node] + \\\n np.sqrt(fs_corr * frac * (1. - frac) / self.total[node])\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.sqrt", "numpy.full", "sklearn.cluster.AgglomerativeClustering" ] ]
saltastroops/imephu
[ "0c302a73d01fe3ad018e7adf4b91e0beaecc6709" ]
[ "tests/conftest.py" ]
[ "\"\"\"pytest configuration.\"\"\"\nimport io\nimport pathlib\nimport time\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom typer.testing import CliRunner\n\nimport imephu\nfrom imephu.annotation.general import TextAnnotation\nfrom imephu.cli import app\nfrom imephu.salt.finder_chart import FinderChart\n\nrunner = CliRunner()\n\n\[email protected](autouse=True)\ndef no_http_requests(monkeypatch):\n \"\"\"Prevent any real HTTP requests.\n\n Taken (with wording slightly adapted) from\n https://blog.jerrycodes.com/no-http-requests/.\n \"\"\"\n\n def urlopen_mock(self, method, url, *args, **kwargs):\n raise RuntimeError(\n f\"The test was about to make a {method} request to \"\n f\"{self.scheme}://{self.host}{url}\"\n )\n\n monkeypatch.setattr(\n \"urllib3.connectionpool.HTTPConnectionPool.urlopen\", urlopen_mock\n )\n\n\[email protected]()\ndef check_finder(file_regression):\n \"\"\"\n Return a function for checking finder charts.\n\n The finder chart is saved as a png, and the png is compared against a previously\n saved version. If no version exists already, the file is saved and the test fails.\n The saved file should be put under version control.\n\n If the saved png and the previously saved version differ, the test fails.\n\n In case you need to update the saved files, run ``pytest`` with the\n ``--force-regen`` flag.\n\n Parameters\n ----------\n file_regression: file regression fixture\n The file regression fixture from the pytest-regressions plugin.\n\n Returns\n -------\n function\n The function for checking a finder chart.\n \"\"\"\n\n def _check_finder(finder_chart):\n np.random.seed(0)\n try:\n contents = io.BytesIO()\n finder_chart.save(contents, format=\"png\")\n file_regression.check(contents.getvalue(), binary=True, extension=\".png\")\n finally:\n np.random.seed()\n\n return _check_finder\n\n\[email protected]()\ndef check_cli(fits_file, tmp_path_factory, file_regression):\n \"\"\"\n Return a function for checking the command line interface.\n\n Parameters\n ----------\n tmp_path_factory: fixture for creating a temporary directory\n Temporary directory.\n file_regression: fixture for regression checking\n Fixture for file regression checking.\n\n Returns\n -------\n function\n Function for checking the command line interface.\n \"\"\"\n\n def _check_cli(\n instrument_yaml,\n fits_source_yaml=\"fits-source:\\n image-survey: POSS2/UKSTU Red\",\n ):\n configuration = f\"\"\"\\\n{fits_source_yaml}\ntelescope: SALT\npi-family-name: Doe\nproposal-code: 2022-1-SCI-042\nposition-angle: 30d\ntarget:\n name: Magrathea\n ra: 0h 40m 00s\n dec: -60d\n magnitude-range:\n bandpass: V\n minimum: 17\n maximum: 17.3\n{instrument_yaml}\n\"\"\"\n np.random.seed(0)\n try:\n tmp = tmp_path_factory.mktemp(f\"finder-chart-{time.time_ns()}\")\n config = tmp / \"config.yaml\"\n config.write_text(configuration)\n output = tmp / \"finder_chart.png\"\n with mock.patch.object(\n imephu.cli, \"load_fits\", autospec=True\n ) as mock_load_fits:\n fits = fits_file.read_bytes()\n mock_load_fits.return_value = io.BytesIO(fits)\n runner.invoke(app, [\"--config\", config, \"--out\", output])\n finder_chart = output.read_bytes()\n file_regression.check(finder_chart, binary=True, extension=\".png\")\n finally:\n np.random.seed()\n\n return _check_cli\n\n\[email protected]()\ndef fits_file():\n \"\"\"\n Return the path of an example FITS file.\n\n The FITS file whose path is returned shows a 10 arcsecond by 10 arcsecond sky area\n centered on the right ascension 10 degrees and the declination -60 degrees.\n\n Returns\n -------\n `pathlib.Path`\n The path to the example FITS file.\n \"\"\"\n return pathlib.Path(__file__).parent / \"data\" / \"ra10_dec-60.fits\"\n\n\[email protected]()\ndef fits_file2():\n \"\"\"\n Return the path of an example FITS file.\n\n The FITS file whose path is returned shows a 10 arcsecond by 10 arcsecond sky area\n centered on the right ascension 9.75 degrees and the declination -60 degrees.\n\n Returns\n -------\n `pathlib.Path`\n The path to the example FITS file.\n \"\"\"\n return pathlib.Path(__file__).parent / \"data\" / \"ra9.75_dec-60.fits\"\n\n\[email protected]()\ndef fits_center():\n \"\"\"Return the sky coordinates for the center of the example FITS file.\"\"\"\n return SkyCoord(ra=10 * u.deg, dec=-60 * u.deg)\n\n\[email protected]()\ndef fits_center2():\n \"\"\"Return the sky coordinates for the center of the example FITS file.\"\"\"\n return SkyCoord(ra=9.75 * u.deg, dec=-60 * u.deg)\n\n\[email protected]()\ndef mos_mask_xml():\n \"\"\"Return a function for generating XML describing a MOS mask.\"\"\"\n\n def _mask_xml(center, position_angle, reference_stars, slits):\n xml = f\"\"\"\\\n<?xml version=\"1.0\" ?>\n<slitmask>\n<header>\n<parameter name=\"VERSION\" value=\"1.1\" />\n<parameter name=\"PROPOSALCODE\" value=\"INDEF\" />\n<parameter name=\"MASKNUM\" value=\"0\" />\n<parameter name=\"PI\" value=\"INDEF\" />\n<parameter name=\"CREATOR\" value=\"Someone\" />\n<parameter name=\"ROTANGLE\" value=\"{position_angle.to_value(u.deg)}\" />\n<parameter name=\"CENTERRA\" value=\"{center.ra.to_value(u.deg)}\" />\n<parameter name=\"CENTERDEC\" value=\"{center.dec.to_value(u.deg)}\" />\n<parameter name=\"EQUINOX\" value=\"2000.0\" />\n<parameter name=\"NSMODE\" value=\"0\" />\n<parameter name=\"COOSYS\" value=\"RADEC\" />\n<parameter name=\"VALIDATED\" value=\"FALSE\" />\n<parameter name=\"SPECLENGTH\" value=\"12400\" />\n<parameter name=\"SPECOFFSET\" value=\"0\" />\n<parameter name=\"SPECPOLSPLIT\" value=\"0\" />\n<parameter name=\"SPECHEIGHT\" value=\"0\" />\n</header>\n\"\"\"\n id = 1\n for star in reference_stars:\n xml += f\"\"\"\n <refstar\n id=\"{id}\"\n xce=\"{star.ra.to_value(u.deg)}\"\n yce=\"{star.dec.to_value(u.deg)}\"\n radius=\"0.5\" mag=\"0.0\"\n />\"\"\"\n id += 1\n\n for slit in slits:\n xml += f\"\"\"\n <slit\n id=\"{id}\"\n xce=\"{slit.center.ra.to_value(u.deg)}\"\n yce=\"{slit.center.dec.to_value(u.deg)}\"\n width=\"{slit.width.to_value(u.arcsec)}\"\n length=\"{slit.height.to_value(u.arcsec)}\"\n tilt=\"{slit.tilt.to_value(u.deg)}\"\n priority=\"1.0\"\n mag=\"0.0\"\n />\"\"\"\n\n xml += \"</slitmask>\"\n\n return xml\n\n return _mask_xml\n\n\[email protected]()\ndef mock_from_survey(fits_file, fits_file2):\n \"\"\"Return a fixture for mocking getting a finder chart from an image survey.\n\n This fixture mocks the ``from_survey`` method of the\n `~imephu.finder_chart.FinderChart` class. The mock method always returns a finder\n chart with the FITS image of the `fits_file` fixture when called the first time and\n a finder chart with the FITS image of the `fits_file2` fixture when called the\n second time.\n\n .. warning::\n\n The mock function ignores any arguments - you always get the same finder chart.\n In particular this implies that you always should use the `fits_center` fixture\n for the center of the FITS image when calling the function for the first time,\n and the fits_center2 fixture when calling it for the second time.\n \"\"\"\n with mock.patch.object(FinderChart, \"from_survey\", autospec=True) as mock_load_fits:\n mock_load_fits.side_effect = [\n FinderChart(open(fits_file, \"rb\")),\n FinderChart(open(fits_file2, \"rb\")),\n ]\n yield mock_load_fits\n\n\[email protected]()\ndef legend():\n \"\"\"Return a fixture for adding a legend to a finder chart.\"\"\"\n\n def _legend(text, wcs):\n return TextAnnotation(\n SkyCoord(ra=\"00h40m36s\", dec=\"-59d55m30s\"),\n text,\n wcs=wcs,\n color=\"blue\",\n horizontalalignment=\"left\",\n )\n\n return _legend\n" ]
[ [ "numpy.random.seed" ] ]
dieterv77/statsmodels
[ "844381797a475a01c05a4e162592a5a6e3a48032" ]
[ "statsmodels/tsa/vector_ar/tests/example_svar.py" ]
[ "import numpy as np\nimport statsmodels.api as sm\nimport pandas as pd\n\nmdatagen = sm.datasets.macrodata.load().data\nmdata = mdatagen[['realgdp','realcons','realinv']]\nnames = mdata.dtype.names\nstart = pd.datetime(1959, 3, 31)\nend = pd.datetime(2009, 9, 30)\n#qtr = pd.DatetimeIndex(start=start, end=end, freq=pd.datetools.BQuarterEnd())\nqtr = pd.DatetimeIndex(start=start, end=end, freq='BQ-MAR')\ndata = pd.DataFrame(mdata, index=qtr)\ndata = (np.log(data)).diff().dropna()\n\n#define structural inputs\nA = np.asarray([[1, 0, 0],['E', 1, 0],['E', 'E', 1]])\nB = np.asarray([['E', 0, 0], [0, 'E', 0], [0, 0, 'E']])\nA_guess = np.asarray([0.5, 0.25, -0.38])\nB_guess = np.asarray([0.5, 0.1, 0.05])\nmymodel = SVAR(data, svar_type='AB', A=A, B=B, freq='Q')\nres = mymodel.fit(maxlags=3, maxiter=10000, maxfun=10000, solver='bfgs')\nres.irf(periods=30).plot(impulse='realgdp', plot_stderr=True,\n stderr_type='mc', repl=100)\n" ]
[ [ "pandas.DatetimeIndex", "pandas.datetime", "pandas.DataFrame", "numpy.asarray", "numpy.log" ] ]
JiuShiNewBee/mypyfesom2
[ "d84adad116888f83b89813e1a86ce8a233171138" ]
[ "pyfesom2/fesom_plot_tools.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# This file is part of pyfesom2\n# Original code by Dmitry Sidorenko, 2013\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntry:\n from mpl_toolkits.basemap import Basemap\nexcept KeyError:\n # dirty hack to avoid KeyError: 'PROJ_LIB' problem with basemap\n import conda\n import os\n\n conda_file_dir = conda.__file__\n conda_dir = conda_file_dir.split(\"lib\")[0]\n proj_lib = os.path.join(os.path.join(conda_dir, \"share\"), \"proj\")\n os.environ[\"PROJ_LIB\"] = proj_lib\n\n from mpl_toolkits.basemap import Basemap\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom .regriding import fesom2regular\nfrom netCDF4 import Dataset, MFDataset, num2date\nimport matplotlib as mpl\n\n# mpl.use('Qt5Agg')\n# %matplotlib inline\nimport matplotlib.pylab as plt\nimport numpy as np\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom cmocean import cm as cmo\nfrom matplotlib import cm\nimport sys, os\n\n# sys.path.append(os.path.join(os.path.dirname(__file__), \"../\"))\n# import pyfesom2 as pf\nfrom cartopy.util import add_cyclic_point\n\n# from scipy.interpolate import griddata\n# import scipy.spatial.qhull as qhull\n# from scipy.interpolate import LinearNDInterpolator, CloughTocher2DInterpolator\nfrom cartopy.util import add_cyclic_point\n\n# import xarray as xr\nimport shapely.vectorized\nimport joblib\nfrom .transect import *\nimport matplotlib\nfrom .ut import mask_ne\n\n\ndef ftriplot(\n mesh,\n data2,\n contours,\n cmap=[],\n oce=\"global\",\n do_cbar=True,\n mlabels=[0, 0, 0, 0],\n plabels=[0, 0, 0, 0],\n extend=\"both\",\n data_on_elem=0,\n\tblat=45,\n):\n if cmap == []:\n cmap = plt.cm.jet\n if oce == \"global\":\n data2 = np.copy(data2)\n\n elem2 = mesh.elem[mesh.no_cyclic_elem, :]\n\n if data_on_elem == 0:\n d = data2[elem2].mean(axis=1)\n else:\n data2 = data2[mesh.no_cyclic_elem]\n d = data2\n\n k = [i for (i, val) in enumerate(d) if not np.isnan(val)]\n elem2 = elem2[k, :]\n\n if data_on_elem == 1:\n data2 = data2[k]\n\n print(\"ftriplot, number of dummy points:\", len(d) - len(k))\n map = Basemap(projection=\"robin\", lon_0=0)\n x, y = map(mesh.x2, mesh.y2)\n map.drawmapboundary(fill_color=\"0.9\")\n map.drawcoastlines()\n map.drawparallels(np.arange(-90, 90, 30), labels=plabels) # [1,0,0,0]\n map.drawmeridians(\n np.arange(map.lonmin, map.lonmax + 30, 60), labels=mlabels\n ) # [0,0,0,1]\n # data2[data2>900]=np.nan\n eps = (contours.max() - contours.min()) / 50.0\n data2[data2 <= contours.min()] = contours.min() + eps\n data2[data2 >= contours.max()] = contours.max() - eps\n if data_on_elem:\n im = plt.tripcolor(x, y, elem2, facecolors=data2, cmap=cmap)\n else:\n im = plt.tricontourf(\n x, y, elem2, data2, levels=contours, cmap=cmap, extend=extend\n )\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n\n # \t\tn=642155-1\n # \t\tn=83089-1\n # \t\tplt.plot(x[n-1], y[n-1], markersize=10, marker='o')\n elif oce == \"np\":\n data2 = np.copy(data2)\n elem2 = mesh.elem # [mesh.no_cyclic_elem,:]\n d = data2[elem2].mean(axis=1)\n k = [i for (i, val) in enumerate(d) if not np.isnan(val)]\n elem2 = elem2[k, :]\n print(\"ftriplot, number of dummy points:\", len(d) - len(k))\n map = Basemap(projection=\"nplaea\", boundinglat=blat, lon_0=0, resolution=\"l\")\n x, y = map(mesh.x2, mesh.y2)\n map.drawcoastlines()\n map.drawparallels(np.arange(-80.0, 81.0, 20.0), labels=plabels)\n map.drawmeridians(np.arange(-180.0, 181.0, 20.0), labels=mlabels) # [0,1,0,0]\n map.drawmapboundary(fill_color=\"0.9\")\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n # data2[data2>900]=np.nan\n eps = (contours.max() - contours.min()) / 100.0\n data2[data2 <= contours.min()] = contours.min() + eps\n data2[data2 >= contours.max()] = contours.max() - eps\n im = plt.tricontourf(\n x, y, elem2, data2, levels=contours, cmap=cmap, extend=extend\n )\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n elif oce == \"sp\":\n data2 = np.copy(data2)\n elem2 = mesh.elem # [mesh.no_cyclic_elem,:]\n d = data2[elem2].mean(axis=1)\n k = [i for (i, val) in enumerate(d) if not np.isnan(val)]\n elem2 = elem2[k, :]\n print(\"ftriplot, number of dummy points:\", len(d) - len(k))\n #map = Basemap(projection=\"splaea\", boundinglat=-20, lon_0=180, resolution=\"l\")\n map = Basemap(projection='spstere',boundinglat=blat,lon_0=0,resolution='l')\n x, y = map(mesh.x2, mesh.y2)\n map.drawcoastlines()\n map.drawparallels(np.arange(-80.0, 81.0, 20.0), labels=plabels)\n map.drawmeridians(np.arange(-180.0, 181.0, 20.0), labels=mlabels)\n map.drawmapboundary(fill_color=\"0.9\")\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n # data2[data2>900]=np.nan\n eps = (contours.max() - contours.min()) / 100.0\n data2[data2 <= contours.min()] = contours.min() + eps\n data2[data2 >= contours.max()] = contours.max() - eps\n im = plt.tricontourf(\n #x, y, elem2, data2, levels=contours, cmap=cmap, extend=extend\n x, y, data2, levels=contours, cmap=cmap, extend=extend\n )\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n return (im, map, cbar if (do_cbar) else False)\n\n\ndef wplot_xy(xx, yy, zz, contours, cmap=[], do_cbar=True, oce=\"global\"):\n import numpy as np\n import matplotlib.pyplot as plt\n from mpl_toolkits.basemap import Basemap\n from matplotlib.colors import LinearSegmentedColormap\n\n if cmap == []:\n cmap = plt.cm.jet\n eps = (contours.max() - contours.min()) / 100.0\n zz[zz <= contours.min()] = contours.min() + eps\n zz[zz >= contours.max()] = contours.max() - eps\n\n if oce == \"global\":\n\n map = Basemap(projection=\"robin\", lon_0=0, llcrnrlon=-180.0, urcrnrlon=180.0)\n xxx, yyy = map(xx, yy)\n\n map.drawmapboundary(fill_color=\"0.9\")\n map.drawcoastlines()\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n map.drawparallels(np.arange(-90, 90, 45), labels=[1, 0, 0, 0])\n map.drawmeridians([-120.0, 0.0, 120.0], labels=[0, 0, 0, 1])\n im = plt.contourf(xxx, yyy, zz, levels=contours, cmap=cmap, extend=\"both\")\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n return (im, map, cbar)\n else:\n return (im, map)\n elif oce == \"np\":\n map = Basemap(projection=\"nplaea\", boundinglat=45, lon_0=0, resolution=\"l\")\n xxx, yyy = map(xx, yy)\n\n map.drawmapboundary(fill_color=\"0.9\")\n map.drawcoastlines()\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n map.drawparallels(np.arange(-80.0, 81.0, 20.0), labels=[0, 0, 0, 0])\n map.drawmeridians(\n np.arange(-180.0, 181.0, 20.0), labels=[0, 0, 0, 0]\n ) # [0,1,0,0]\n im = plt.contourf(xxx, yyy, zz, levels=contours, cmap=cmap, extend=\"both\")\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n return (im, map, cbar)\n else:\n return (im, map)\n elif oce == \"sp\":\n map = Basemap(projection=\"splaea\", boundinglat=-20, lon_0=180, resolution=\"l\")\n xxx, yyy = map(xx, yy)\n\n map.drawmapboundary(fill_color=\"0.9\")\n map.drawcoastlines()\n map.fillcontinents(color=\".7\", lake_color=\".7\")\n map.drawparallels(np.arange(-80.0, 81.0, 20.0), labels=[0, 0, 0, 0])\n map.drawmeridians(np.arange(-180.0, 181.0, 20.0), labels=[0, 0, 0, 0])\n im = plt.contourf(xxx, yyy, zz, levels=contours, cmap=cmap, extend=\"both\")\n if do_cbar:\n cbar = map.colorbar(im, \"bottom\", size=\"5%\", pad=\"2%\")\n return (im, map, cbar)\n else:\n return (im, map)\n\n\ndef wplot_yz(y, z, v, contours, cmap=[]):\n import numpy as np\n import matplotlib.pyplot as plt\n from matplotlib.colors import LinearSegmentedColormap\n\n if cmap == []:\n cmap = plt.cm.jet\n\n im = plt.contourf(y, z, v, levels=contours, cmap=cmap, extend=\"both\")\n cbar = plt.colorbar(orientation=\"horizontal\")\n plt.grid()\n return (im, cbar)\n\n\ndef movingaverage(interval, window_size):\n import numpy as np\n\n window = np.ones(int(window_size)) / float(window_size)\n ret = list(interval)\n for i in range(window_size):\n ret = ret + [ret[-1]]\n ret = np.convolve(np.array(ret), window, \"valid\")\n return ret\n\n\ndef plot(\n mesh,\n data,\n cmap=None,\n influence=80000,\n box=[-180, 180, -89, 90],\n res=[360, 180],\n interp=\"nn\",\n mapproj=\"merc\",\n levels=None,\n ptype=\"cf\",\n units=None,\n figsize=(10, 10),\n rowscol=(1, 1),\n titles=None,\n distances_path=None,\n inds_path=None,\n qhull_path=None,\n basepath=None,\n):\n \"\"\"\n Plots interpolated 2d field on the map.\n\n Parameters\n ----------\n mesh: mesh object\n FESOM2 mesh object\n data: np.array or list of np.arrays\n FESOM 2 data on nodes (for u,v,u_ice and v_ice one have to first interpolate from elements to nodes).\n Can be ether one np.ndarray or list of np.ndarrays.\n cmap: str\n Name of the colormap from cmocean package or from the standard matplotlib set.\n By default `Spectral_r` will be used.\n influence: float\n Radius of influence for interpolation, in meters.\n box: list\n Map boundaries in -180 180 -90 90 format that will be used for interpolation (default [-180 180 -89 90]).\n res: list\n Number of points along each axis that will be used for interpolation (for lon and lat),\n default [360, 180].\n interp: str\n Interpolation method. Options are 'nn' (nearest neighbor), 'idist' (inverce distance), \"linear\" and \"cubic\".\n mapproj: str\n Map projection. Options are Mercator (merc), Plate Carree (pc),\n North Polar Stereo (np), South Polar Stereo (sp), Robinson (rob)\n levels: list\n Levels for contour plot in format min max numberOfLevels.\n If not provided min/max values from data will be used with 40 levels.\n ptype: str\n Plot type. Options are contourf (\\'cf\\') and pcolormesh (\\'pcm\\')\n units: str\n Units for color bar.\n figsize: tuple\n figure size in inches\n rowscol: tuple\n number of rows and columns.\n titles: str or list\n Title of the plot (if string) or subplots (if list of strings)\n distances_path : string\n Path to the file with distances. If not provided and dumpfile=True, it will be created.\n inds_path : string\n Path to the file with inds. If not provided and dumpfile=True, it will be created.\n qhull_path : str\n Path to the file with qhull (needed for linear and cubic interpolations). If not provided and dumpfile=True, it will be created.\n basepath: str\n path where to store additional interpolation files. If None (default),\n the path of the mesh will be used.\n \"\"\"\n if not isinstance(data, list):\n data = [data]\n if titles:\n if not isinstance(titles, list):\n titles = [titles]\n if len(titles) != len(data):\n raise ValueError(\n \"The number of titles do not match the number of data fields, please adjust titles (or put to None)\")\n\n if (rowscol[0] * rowscol[1]) < len(data):\n raise ValueError(\n \"Number of rows*columns is smaller than number of data fields, please adjust rowcol.\")\n\n if cmap:\n if isinstance(cmap, (matplotlib.colors.Colormap)):\n colormap = cmap\n elif cmap in cmo.cmapnames:\n colormap = cmo.cmap_d[cmap]\n elif cmap in plt.cm.datad:\n colormap = plt.get_cmap(cmap)\n else:\n raise ValueError(\n \"Get unrecognised name for the colormap `{}`. Colormaps should be from standard matplotlib set of from cmocean package.\".format(\n cmap\n )\n )\n else:\n colormap = plt.get_cmap(\"Spectral_r\")\n\n radius_of_influence = influence\n\n left, right, down, up = box\n lonNumber, latNumber = res\n\n # flf = Dataset(ifile)\n lonreg = np.linspace(left, right, lonNumber)\n latreg = np.linspace(down, up, latNumber)\n lonreg2, latreg2 = np.meshgrid(lonreg, latreg)\n\n interpolated = []\n for datainstance in data:\n\n if interp == \"nn\":\n ofesom = fesom2regular(\n datainstance,\n mesh,\n lonreg2,\n latreg2,\n distances_path=distances_path,\n inds_path=inds_path,\n radius_of_influence=radius_of_influence,\n basepath=basepath,\n )\n interpolated.append(ofesom)\n elif interp == \"idist\":\n ofesom = fesom2regular(\n datainstance,\n mesh,\n lonreg2,\n latreg2,\n distances_path=distances_path,\n inds_path=inds_path,\n radius_of_influence=radius_of_influence,\n how=\"idist\",\n k=5,\n basepath=basepath,\n )\n interpolated.append(ofesom)\n elif interp == \"linear\":\n ofesom = fesom2regular(\n datainstance,\n mesh,\n lonreg2,\n latreg2,\n how=\"linear\",\n qhull_path=qhull_path,\n basepath=basepath,\n )\n interpolated.append(ofesom)\n elif interp == \"cubic\":\n ofesom = fesom2regular(\n datainstance, mesh, lonreg2, latreg2, basepath=basepath, how=\"cubic\"\n )\n interpolated.append(ofesom)\n\n # nearth = cfeature.NaturalEarthFeature(\"physical\", \"ocean\", \"50m\")\n # main_geom = [contour for contour in nearth.geometries()][0]\n\n # mask = shapely.vectorized.contains(main_geom, lonreg2, latreg2)\n # m2 = np.where(((lonreg2 == -180.0) & (latreg2 > 71.5)), True, mask)\n # m2 = np.where(\n # ((lonreg2 == -180.0) & (latreg2 < 70.95) & (latreg2 > 68.96)), True, m2\n # )\n # m2 = np.where(((lonreg2 == -180.0) & (latreg2 < 65.33)), True, m2)\n\n m2 = mask_ne(lonreg2, latreg2)\n\n # m2 = np.where(((lonreg2 == 180.)&(latreg2>71.5)), True, m2)\n # m2 = np.where(((lonreg2 == 180.)&(latreg2<70.95)&(latreg2>68.96)), True, m2)\n # m2 = np.where(((lonreg2 == 180.)&(latreg2<65.33)), True, m2)\n\n for i, interpolated_instance in enumerate(interpolated):\n interpolated[i] = np.ma.masked_where(m2, interpolated[i])\n interpolated[i] = np.ma.masked_equal(interpolated[i], 0)\n\n if mapproj == \"merc\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.Mercator()),\n constrained_layout=True,\n figsize=figsize,\n )\n elif mapproj == \"pc\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.PlateCarree()),\n constrained_layout=True,\n figsize=figsize,\n )\n elif mapproj == \"np\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.NorthPolarStereo()),\n constrained_layout=True,\n figsize=figsize,\n )\n elif mapproj == \"sp\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.SouthPolarStereo()),\n constrained_layout=True,\n figsize=figsize,\n )\n elif mapproj == \"rob\":\n fig, ax = plt.subplots(\n rowscol[0],\n rowscol[1],\n subplot_kw=dict(projection=ccrs.Robinson()),\n constrained_layout=True,\n figsize=figsize,\n )\n if isinstance(ax, np.ndarray):\n ax = ax.flatten()\n else:\n ax = [ax]\n\n for ind, data_int in enumerate(interpolated):\n ax[ind].set_extent([left, right, down, up], crs=ccrs.PlateCarree())\n if levels:\n mmin, mmax, nnum = levels\n nnum = int(nnum)\n else:\n mmin = np.nanmin(data_int)\n mmax = np.nanmax(data_int)\n nnum = 40\n data_levels = np.linspace(mmin, mmax, nnum)\n if ptype == \"cf\":\n data_int_cyc, lon_cyc = add_cyclic_point(data_int, coord=lonreg)\n image = ax[ind].contourf(\n lon_cyc,\n latreg,\n data_int_cyc,\n levels=data_levels,\n transform=ccrs.PlateCarree(),\n cmap=colormap,\n extend=\"both\",\n )\n elif ptype == \"pcm\":\n data_int_cyc, lon_cyc = add_cyclic_point(data_int, coord=lonreg)\n image = ax[ind].pcolormesh(\n lon_cyc,\n latreg,\n data_int_cyc,\n vmin=mmin,\n vmax=mmax,\n transform=ccrs.PlateCarree(),\n cmap=colormap,\n )\n else:\n raise ValueError(\"Inknown plot type {}\".format(ptype))\n\n # ax.coastlines(resolution = '50m',lw=0.5)\n ax[ind].add_feature(\n cfeature.GSHHSFeature(levels=[1], scale=\"low\", facecolor=\"lightgray\")\n )\n if titles:\n ax[ind].set_title(titles.pop(0), size=20)\n\n for delind in range(ind + 1, len(ax)):\n fig.delaxes(ax[delind])\n\n cb = fig.colorbar(image, orientation=\"horizontal\", ax=ax, pad=0.01, shrink=0.9)\n\n cb.ax.tick_params(labelsize=15)\n\n if units:\n cb.set_label(units, size=20)\n else:\n pass\n\n return ax\n\n\ndef plot_transect_map(\n lon_start, lat_start, lon_end, lat_end, mesh, npoints=30, view=\"w\", stock_img=False\n):\n # plt.figure(figsize=(10,10))\n lonlat = transect_get_lonlat(\n lon_start, lat_start, lon_end, lat_end, npoints=npoints\n )\n nodes = transect_get_nodes(lonlat, mesh)\n # dist = transect_get_distance(lonlat)\n\n if view == \"w\":\n ax = plt.subplot(111, projection=ccrs.Mercator(central_longitude=0))\n ax.set_extent([180, -180, -80, 90], crs=ccrs.PlateCarree())\n elif view == \"np\":\n ax = plt.subplot(111, projection=ccrs.NorthPolarStereo(central_longitude=0))\n ax.set_extent([180, -180, 60, 90], crs=ccrs.PlateCarree())\n elif view == \"sp\":\n ax = plt.subplot(111, projection=ccrs.SouthPolarStereo(central_longitude=0))\n ax.set_extent([180, -180, -90, -50], crs=ccrs.PlateCarree())\n else:\n raise ValueError(\n 'The \"{}\" is not recognized as valid view option.'.format(view)\n )\n\n ax.scatter(lonlat[:, 0], lonlat[:, 1], s=30, c=\"b\", transform=ccrs.PlateCarree())\n ax.scatter(\n mesh.x2[nodes], mesh.y2[nodes], s=30, c=\"r\", transform=ccrs.PlateCarree()\n )\n if stock_img == True:\n ax.stock_img()\n ax.coastlines(resolution=\"50m\")\n return ax\n\n\ndef plot_transect(\n data3d,\n mesh,\n lon_start,\n lat_start,\n lon_end,\n lat_end,\n npoints=30,\n maxdepth=1000,\n label=\"$^{\\circ}$C\",\n title=\"\",\n levels=None,\n cmap=cm.Spectral_r,\n ax=None,\n dist=None,\n nodes=None,\n ncols=2,\n figsize=None,\n transect_data=[],\n max_distance=1e6,\n):\n\n depth_index = ind_for_depth(maxdepth, mesh)\n if not isinstance(data3d, list):\n if ax is None:\n ax = plt.gca()\n oneplot = True\n else:\n oneplot = False\n if (type(dist) is np.ndarray) and (type(nodes) is np.ndarray):\n if not (type(transect_data) is np.ma.core.MaskedArray):\n lonlat = transect_get_lonlat(\n lon_start, lat_start, lon_end, lat_end, npoints=npoints\n )\n mask2d = transect_get_mask(nodes, mesh, lonlat, max_distance)\n transect_data = transect_get_data(data3d, nodes, mask2d)\n else:\n lonlat = transect_get_lonlat(\n lon_start, lat_start, lon_end, lat_end, npoints=npoints\n )\n nodes = transect_get_nodes(lonlat, mesh)\n dist = transect_get_distance(lonlat)\n # profile = transect_get_profile(nodes, mesh)\n if not (type(transect_data) is np.ma.core.MaskedArray):\n mask2d = transect_get_mask(nodes, mesh, lonlat, max_distance)\n transect_data = transect_get_data(data3d, nodes, mask2d)\n\n image = ax.contourf(\n dist,\n np.abs(mesh.zlev[:depth_index]),\n transect_data[:, :depth_index].T,\n levels=levels,\n cmap=cmap,\n extend=\"both\",\n )\n ax.invert_yaxis()\n ax.set_title(title)\n ax.set_xlabel(\"km\")\n ax.set_ylabel(\"m\")\n\n if oneplot:\n cb = plt.colorbar(image)\n cb.set_label(label)\n\n return image\n else:\n ncols = float(ncols)\n nplots = len(data3d)\n nrows = math.ceil(nplots / ncols)\n ncols = int(ncols)\n nrows = int(nrows)\n nplot = 1\n\n if not figsize:\n figsize = (8 * ncols, 2 * nrows * ncols)\n fig, ax = plt.subplots(nrows, ncols, figsize=figsize)\n ax = ax.flatten()\n for ind, data in enumerate(data3d):\n if (type(dist) is np.ndarray) and (type(nodes) is np.ndarray):\n transect_data = transect_get_data(data, nodes)\n else:\n lonlat = transect_get_lonlat(\n lon_start, lat_start, lon_end, lat_end, npoints=npoints\n )\n nodes = transect_get_nodes(lonlat, mesh)\n dist = transect_get_distance(lonlat)\n # profile = transect_get_profile(nodes, mesh)\n mask2d = transect_get_mask(nodes, mesh, lonlat, max_distance)\n transect_data = transect_get_data(data3d, nodes, max_distance)\n\n image = ax[ind].contourf(\n dist,\n np.abs(mesh.zlev[:depth_index]),\n transect_data[:, :depth_index].T,\n levels=levels,\n cmap=cmap,\n extend=\"both\",\n )\n ax[ind].invert_yaxis()\n if not isinstance(title, list):\n ax[ind].set_title(title)\n else:\n ax[ind].set_title(title[ind])\n ax[ind].set_xlabel(\"km\")\n ax[ind].set_ylabel(\"m\")\n\n cb = fig.colorbar(image, orientation=\"horizontal\", ax=ax[ind], pad=0.11)\n cb.set_label(label)\n for delind in range(ind + 1, len(ax)):\n\n fig.delaxes(ax[delind])\n\n fig.tight_layout()\n" ]
[ [ "numpy.copy", "numpy.meshgrid", "matplotlib.pyplot.tricontourf", "numpy.ma.masked_where", "matplotlib.pyplot.gca", "numpy.abs", "matplotlib.pyplot.tripcolor", "matplotlib.pyplot.get_cmap", "numpy.isnan", "numpy.linspace", "numpy.ma.masked_equal", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.colorbar", "numpy.nanmax", "matplotlib.pyplot.grid", "numpy.nanmin", "numpy.array", "matplotlib.pyplot.contourf" ] ]
googleinterns/gail-dyn
[ "31c93b12d068dede0dbe69547f0b2e500374f260" ]
[ "third_party/a2c_ppo_acktr/baselines/results_plotter.py" ]
[ "# The MIT License\n#\n# Copyright (c) 2017 OpenAI (http://openai.com)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport numpy as np\nimport matplotlib\n\nmatplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode\n\nimport matplotlib.pyplot as plt\n\nplt.rcParams['svg.fonttype'] = 'none'\n\nfrom third_party.a2c_ppo_acktr.baselines.common import plot_util\n\nX_TIMESTEPS = 'timesteps'\nX_EPISODES = 'episodes'\nX_WALLTIME = 'walltime_hrs'\nY_REWARD = 'reward'\nY_TIMESTEPS = 'timesteps'\nPOSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]\nEPISODES_WINDOW = 100\nCOLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',\n 'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',\n 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']\n\n\ndef rolling_window(a, window):\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n strides = a.strides + (a.strides[-1],)\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)\n\n\ndef window_func(x, y, window, func):\n yw = rolling_window(y, window)\n yw_func = func(yw, axis=-1)\n return x[window - 1:], yw_func\n\n\ndef ts2xy(ts, xaxis, yaxis):\n if xaxis == X_TIMESTEPS:\n x = np.cumsum(ts.l.values)\n elif xaxis == X_EPISODES:\n x = np.arange(len(ts))\n elif xaxis == X_WALLTIME:\n x = ts.t.values / 3600.\n else:\n raise NotImplementedError\n if yaxis == Y_REWARD:\n y = ts.r.values\n elif yaxis == Y_TIMESTEPS:\n y = ts.l.values\n else:\n raise NotImplementedError\n return x, y\n\n\ndef plot_curves(xy_list, xaxis, yaxis, title):\n fig = plt.figure(figsize=(8, 2))\n maxx = max(xy[0][-1] for xy in xy_list)\n minx = 0\n for (i, (x, y)) in enumerate(xy_list):\n color = COLORS[i % len(COLORS)]\n plt.scatter(x, y, s=2)\n x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) # So returns average of last EPISODE_WINDOW episodes\n plt.plot(x, y_mean, color=color)\n plt.xlim(minx, maxx)\n plt.title(title)\n plt.xlabel(xaxis)\n plt.ylabel(yaxis)\n plt.tight_layout()\n fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout())\n plt.grid(True)\n\n\ndef split_by_task(taskpath):\n return taskpath['dirname'].split('/')[-1].split('-')[0]\n\n\ndef plot_results(dirs, num_timesteps=10e6, xaxis=X_TIMESTEPS, yaxis=Y_REWARD, title='', split_fn=split_by_task):\n results = plot_util.load_results(dirs)\n plot_util.plot_results(results, xy_fn=lambda r: ts2xy(r['monitor'], xaxis, yaxis), split_fn=split_fn,\n average_group=True, resample=int(1e6))\n\n\n# Example usage in jupyter-notebook\n# from third_party.a2c_ppo_acktr.baselines.results_plotter import plot_results\n# %matplotlib inline\n# plot_results(\"./log\")\n# Here ./log is a directory containing the monitor.csv files\n\ndef main():\n import argparse\n import os\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--dirs', help='List of log directories', nargs='*', default=['./log'])\n parser.add_argument('--num_timesteps', type=int, default=int(10e6))\n parser.add_argument('--xaxis', help='Varible on X-axis', default=X_TIMESTEPS)\n parser.add_argument('--yaxis', help='Varible on Y-axis', default=Y_REWARD)\n parser.add_argument('--task_name', help='Title of plot', default='Breakout')\n args = parser.parse_args()\n args.dirs = [os.path.abspath(dir) for dir in args.dirs]\n plot_results(args.dirs, args.num_timesteps, args.xaxis, args.yaxis, args.task_name)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.cumsum", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "numpy.lib.stride_tricks.as_strided", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.use", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
vishalbelsare/PySyft
[ "6b2cb4ca3a54e8bb2e61d549bf7773aa955d7468" ]
[ "packages/syft/tests/syft/core/pointer/garbage_collection/gc_strategies_test.py" ]
[ "# third party\nimport torch\n\n# syft absolute\nimport syft as sy\nfrom syft.core.pointer.garbage_collection import GCBatched\nfrom syft.core.pointer.garbage_collection import GCSimple\nfrom syft.core.pointer.garbage_collection import GarbageCollection\nfrom syft.core.pointer.garbage_collection import gc_get_default_strategy\nfrom syft.core.pointer.garbage_collection import gc_set_default_strategy\n\n\ndef test_gc_simple_strategy() -> None:\n node = sy.VirtualMachine(name=\"alice\")\n client = node.get_client()\n\n x = torch.tensor([1, 2, 3, 4])\n ptr = x.send(client, pointable=False)\n\n assert len(node.store) == 1\n\n del ptr\n\n assert len(node.store) == 0\n\n\ndef test_gc_batched_strategy_setter() -> None:\n node = sy.VirtualMachine(name=\"alice\")\n client = node.get_client()\n client.gc.gc_strategy = GCBatched(threshold=10)\n\n x = torch.tensor([1, 2, 3, 4])\n\n for _ in range(9):\n x.send(client, pointable=False)\n\n assert len(node.store) == 9\n\n x.send(client, pointable=False)\n\n assert len(node.store) == 0\n\n\ndef test_gc_batched_strategy_gc_constructor() -> None:\n # don't share a VM with other tests\n node = sy.VirtualMachine()\n client = node.get_client()\n client.gc = GarbageCollection(\"gcbatched\", 5)\n\n x = torch.tensor([1, 2, 3, 4])\n\n for _ in range(4):\n x.send(client, pointable=False)\n\n assert len(node.store) == 4\n\n x.send(client, pointable=False)\n\n assert len(node.store) == 0\n\n\ndef test_gc_change_default_gc_strategy(node: sy.VirtualMachine) -> None:\n gc_prev_strategy = gc_get_default_strategy()\n gc_set_default_strategy(\"gcbatched\")\n\n client = node.get_client()\n\n res = isinstance(client.gc.gc_strategy, GCBatched)\n\n # Revert\n gc_set_default_strategy(gc_prev_strategy)\n sy.core.pointer.garbage_collection.GC_DEFAULT_STRATEGY = GCSimple\n\n assert res\n\n\ndef test_gc_batched_delete_at_change() -> None:\n node = sy.VirtualMachine(name=\"alice\")\n client = node.get_client()\n\n # Change the strategy\n client.gc.gc_strategy = GCBatched()\n\n x = torch.tensor([1, 2, 3, 4])\n\n x.send(client, pointable=False)\n x.send(client, pointable=False)\n x.send(client, pointable=False)\n\n assert len(node.store) == 3\n\n # It should for the GCBatched to delete all the cached to-delete objs\n client.gc.gc_strategy = GCSimple()\n\n assert len(node.store) == 0\n" ]
[ [ "torch.tensor" ] ]
millernj/phys202-project
[ "51c56d4bd849a717081c6d686e5abbba225d4334" ]
[ "core.py" ]
[ "import numpy as np\n\nsigmoid = lambda x: 1/(1 +np.exp(-x))\n\ndef perceptron_sigmoid(weights, inputvect):\n return sigmoid(np.dot(np.append(inputvect,[1]), weights))\n\ndef gen_network(size):\n weights= [np.array([[np.random.randn() for _ in range(size[n-1]+1)]\n for _ in range(size[n])]) for n in range(len(size))[1:]]\n return weights\n\ndef propforward(network, inputvect):\n outputs = []\n for layer in network:\n neural_input = inputvect\n output = [perceptron_sigmoid(weights, neural_input) for weights in layer]\n outputs.append(output)\n inputvect = output\n \n outputs = np.array(outputs)\n return [outputs[:-1], outputs[-1]]\n\ndef target_convert(n):\n result = np.zeros((10,))\n result[n]=1\n return result\n\ndef find_deltas_sigmoid(outputs, targets):\n return [output*(1-output)*(output-target) for output, target in zip(outputs, targets)]\n\ndef edit_weights(layer, input_list, deltas, learning_rate): \n for a, inpt in enumerate(input_list):\n layer-=learning_rate/len(input_list)*np.dot(deltas[a].reshape(len(deltas[a]),1),\n np.append(inpt,[1]).reshape(1,len(inpt)+1))\ndef backprob(network, inputvect, targets):\n \n hidden_outputs, outputs = propforward(network, inputvect)\n \n change_in_outputs = find_deltas_sigmoid(outputs, targets)\n \n list_deltas = [[] for _ in range(len(network))]\n list_deltas[-1] = change_in_outputs\n \n for n in range(len(network))[-1:0:-1]:\n delta = change_in_outputs\n change_in_hidden_outputs= [hidden_output*(1-hidden_output)*\n np.dot(delta, np.array([a[i] for a in network[n]]).transpose())\n for i, hidden_output in enumerate(hidden_outputs[n-1])]\n list_deltas[n-1] = change_in_hidden_outputs\n change_in_outputs = change_in_hidden_outputs\n \n return list_deltas\n\ndef stoc_descent(network, input_list, target_list, learning_rate):\n mega_delta = []\n hidden_output = [propforward(network, inpt)[0] for inpt in input_list]\n for inpt, target in zip(input_list, target_list):\n mega_delta.append(backprob(network, inpt, target))\n \n inputs=[]\n inputs.append(input_list)\n for n in range(len(network)):\n inputs.append(hidden_output[n])\n assert len(inputs) == len(network) + 1\n deltas = []\n \n \n\n for n in range(len(network)):\n deltas.append([np.array(delta[n]) for delta in mega_delta])\n \n assert len(deltas)==len(network)\n for n in range(len(network)):\n edit_weights(network[n], inputs[n], deltas[n], learning_rate)\n\ndef output_reader(output):\n assert len(output)==10\n result=[]\n for i, t in enumerate(output):\n if t == max(output) and abs(t-1)<=0.5:\n result.append(i)\n if len(result)==1:\n return result[0]\n else:\n return 0\n\ndef target_convert(n):\n assert n <= 9 and n >= 0\n n = round(n)\n result = np.zeros((10,))\n result[n]=1\n return result\n\ndef train_network(network, training_inputs, training_targets, training_cycles = 30,\n numbers_per_cycle = 1438,batch_size = 15,learning_rate = 1):\n \n train_data_index = np.linspace(0,numbers_per_cycle, numbers_per_cycle + 1)\n target_list = [target_convert(n) for n in training_targets[0:numbers_per_cycle]]\n np.random.seed(1)\n np.random.shuffle(train_data_index)\n for _ in range(training_cycles):\n for n in train_data_index:\n if n+batch_size <= numbers_per_cycle:\n training_data = training_inputs[int(n):int(n+batch_size)]\n target_data = target_list[int(n):int(n+batch_size)]\n else: \n training_data = training_inputs[int(n-batch_size):numbers_per_cycle]\n assert len(training_data)!=0\n target_data = target_list[int(n-batch_size):numbers_per_cycle]\n stoc_descent(network, training_data, target_data, learning_rate)\n \ndef check_net(network, testing_list, target_list, rnge):\n guesses = []\n targets = []\n number_correct = 0\n rnge = range(rnge[0],rnge[1])\n for n in rnge:\n\n guesses.append(output_reader(propforward(network, testing_list[n])[1]))\n targets.append(target_list[n])\n\n for guess, target in zip(guesses, targets):\n if guess == target:\n number_correct+=1\n number_total = len(rnge)\n print(number_correct/number_total*100)\n print(\"%s/%s\" %(str(number_correct), str(number_total)))\n\n" ]
[ [ "numpy.random.shuffle", "numpy.append", "numpy.zeros", "numpy.random.seed", "numpy.random.randn", "numpy.exp", "numpy.array", "numpy.linspace" ] ]
cshreyastech/deep-reinforcement-learning
[ "f2c9a45c76afa65083eed6994785fd1c3e04b1ec" ]
[ "p1_navigation/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass QNetwork(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, seed, fc1_units=64, fc2_units=64):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fc1_units (int): Number of nodes in first hidden layer\n fc2_units (int): Number of nodes in second hidden layer\n \"\"\"\n super(QNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, action_size)\n\n def forward(self, state):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n return self.fc3(x)" ]
[ [ "torch.manual_seed", "torch.nn.Linear" ] ]
mkeshita/grove
[ "dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3" ]
[ "grove/tests/jordan_gradient/test_jordan_gradient.py" ]
[ "import numpy as np\nfrom unittest.mock import patch\nfrom pyquil import Program\nfrom pyquil.gates import H, CPHASE, SWAP, MEASURE\n\nfrom grove.alpha.phaseestimation.phase_estimation import controlled\nfrom grove.alpha.jordan_gradient.jordan_gradient import gradient_program, estimate_gradient\n\n\ndef test_gradient_program():\n f_h = 0.25\n precision = 2\n \n trial_prog = gradient_program(f_h, precision)\n \n result_prog = Program([H(0), H(1)])\n\n phase_factor = np.exp(1.0j * 2 * np.pi * abs(f_h))\n U = np.array([[phase_factor, 0],\n [0, phase_factor]])\n q_out = range(precision, precision+1)\n for i in range(precision):\n if i > 0:\n U = np.dot(U, U)\n cU = controlled(U)\n name = \"CONTROLLED-U{0}\".format(2 ** i)\n result_prog.defgate(name, cU)\n result_prog.inst((name, i) + tuple(q_out))\n\n result_prog.inst([SWAP(0, 1), H(0), CPHASE(-1.5707963267948966, 0, 1),\n H(1), MEASURE(0, 0), MEASURE(1, 1)])\n\n assert(trial_prog == result_prog)\n\n\ndef test_estimate_gradient():\n test_perturbation = .25\n test_precision = 3\n test_measurements = 10\n\n with patch(\"pyquil.api.QuantumComputer\") as qc:\n qc.run.return_value = np.asarray([[0, 1, 0, 0] for i in range(test_measurements)])\n\n gradient_estimate = estimate_gradient(test_perturbation, test_precision,\n n_measurements=test_measurements,\n qc=qc)\n\n assert(np.isclose(gradient_estimate, test_perturbation))\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.isclose" ] ]
bioexcel/biobb_ml
[ "f99346ef7885d3a62de47dab738a01db4b27467a" ]
[ "biobb_ml/classification/classification_predict.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"Module containing the ClassificationPredict class and the command line interface.\"\"\"\nimport argparse\nimport pandas as pd\nimport joblib\nfrom biobb_common.generic.biobb_object import BiobbObject\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import linear_model\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import ensemble\nfrom sklearn import svm\nfrom biobb_common.configuration import settings\nfrom biobb_common.tools import file_utils as fu\nfrom biobb_common.tools.file_utils import launchlogger\nfrom biobb_ml.classification.common import *\n\nclass ClassificationPredict(BiobbObject):\n \"\"\"\n | biobb_ml ClassificationPredict\n | Makes predictions from an input dataset and a given classification model.\n | Makes predictions from an input dataset (provided either as a file or as a dictionary property) and a given classification model trained with `DecisionTreeClassifier <https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html>`_, `KNeighborsClassifier <https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html>`_, `LogisticRegression <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html>`_, `RandomForestClassifier <https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_, `Support Vector Machine <https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html>`_ methods.\n\n Args:\n input_model_path (str): Path to the input model. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/classification/model_classification_predict.pkl>`_. Accepted formats: pkl (edam:format_3653).\n input_dataset_path (str) (Optional): Path to the dataset to predict. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/classification/input_classification_predict.csv>`_. Accepted formats: csv (edam:format_3752).\n output_results_path (str): Path to the output results file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_classification_predict.csv>`_. Accepted formats: csv (edam:format_3752).\n properties (dic - Python dictionary object containing the tool parameters, not input/output files):\n * **predictions** (*list*) - (None) List of dictionaries with all values you want to predict targets. It will be taken into account only in case **input_dataset_path** is not provided. Format: [{ 'var1': 1.0, 'var2': 2.0 }, { 'var1': 4.0, 'var2': 2.7 }] for datasets with headers and [[ 1.0, 2.0 ], [ 4.0, 2.7 ]] for datasets without headers.\n * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.\n * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.\n\n Examples:\n This is a use example of how to use the building block from Python::\n\n from biobb_ml.classification.classification_predict import classification_predict\n prop = { \n 'predictions': [\n { \n 'var1': 1.0, \n 'var2': 2.0 \n }, \n { \n 'var1': 4.0, \n 'var2': 2.7 \n }\n ] \n }\n classification_predict(input_model_path='/path/to/myModel.pkl', \n output_results_path='/path/to/newPredictedResults.csv',\n input_dataset_path='/path/to/myDataset.csv', \n properties=prop)\n\n Info:\n * wrapped_software:\n * name: scikit-learn\n * version: >=0.24.2\n * license: BSD 3-Clause\n * ontology:\n * name: EDAM\n * schema: http://edamontology.org/EDAM.owl\n\n \"\"\"\n\n def __init__(self, input_model_path, output_results_path, \n input_dataset_path=None, properties=None, **kwargs) -> None:\n properties = properties or {}\n\n # Call parent class constructor\n super().__init__(properties)\n\n # Input/Output files\n self.io_dict = { \n \"in\": { \"input_model_path\": input_model_path, \"input_dataset_path\": input_dataset_path }, \n \"out\": { \"output_results_path\": output_results_path } \n }\n\n # Properties specific for BB\n self.predictions = properties.get('predictions', [])\n self.properties = properties\n\n # Check the properties\n self.check_properties(properties)\n\n def check_data_params(self, out_log, err_log):\n \"\"\" Checks all the input/output paths and parameters \"\"\"\n self.io_dict[\"in\"][\"input_model_path\"] = check_input_path(self.io_dict[\"in\"][\"input_model_path\"], \"input_model_path\", out_log, self.__class__.__name__)\n self.io_dict[\"out\"][\"output_results_path\"] = check_output_path(self.io_dict[\"out\"][\"output_results_path\"],\"output_results_path\", False, out_log, self.__class__.__name__)\n if self.io_dict[\"in\"][\"input_dataset_path\"]:\n self.io_dict[\"in\"][\"input_dataset_path\"] = check_input_path(self.io_dict[\"in\"][\"input_dataset_path\"], \"input_dataset_path\", out_log, self.__class__.__name__)\n\n @launchlogger\n def launch(self) -> int:\n \"\"\"Execute the :class:`ClassificationPredict <classification.classification_predict.ClassificationPredict>` classification.classification_predict.ClassificationPredict object.\"\"\"\n\n # check input/output paths and parameters\n self.check_data_params(self.out_log, self.err_log)\n\n # Setup Biobb\n if self.check_restart(): return 0\n self.stage_files()\n\n fu.log('Getting model from %s' % self.io_dict[\"in\"][\"input_model_path\"], self.out_log, self.global_log)\n\n with open(self.io_dict[\"in\"][\"input_model_path\"], \"rb\") as f:\n while True:\n try:\n m = joblib.load(f)\n if (isinstance(m, linear_model.LogisticRegression)\n or isinstance(m, KNeighborsClassifier)\n or isinstance(m, DecisionTreeClassifier)\n or isinstance(m, ensemble.RandomForestClassifier)\n or isinstance(m, svm.SVC)):\n new_model = m\n if isinstance(m, StandardScaler):\n scaler = m\n if isinstance(m, dict):\n variables = m\n except EOFError:\n break\n\n if self.io_dict[\"in\"][\"input_dataset_path\"]:\n # load dataset from input_dataset_path file\n fu.log('Getting dataset from %s' % self.io_dict[\"in\"][\"input_dataset_path\"], self.out_log, self.global_log)\n if 'columns' in variables['independent_vars']:\n labels = getHeader(self.io_dict[\"in\"][\"input_dataset_path\"])\n skiprows = 1\n else:\n labels = None\n skiprows = None\n new_data_table = pd.read_csv(self.io_dict[\"in\"][\"input_dataset_path\"], header = None, sep=\"\\s+|;|:|,|\\t\", engine=\"python\", skiprows=skiprows, names=labels)\n else:\n # load dataset from properties\n if 'columns' in variables['independent_vars']:\n # sorting self.properties in the correct order given by variables['independent_vars']['columns']\n index_map = { v: i for i, v in enumerate(variables['independent_vars']['columns']) }\n predictions = []\n for i, pred in enumerate(self.predictions):\n sorted_pred = sorted(pred.items(), key=lambda pair: index_map[pair[0]])\n predictions.append(dict(sorted_pred))\n new_data_table = pd.DataFrame(data=get_list_of_predictors(predictions),columns=get_keys_of_predictors(predictions))\n else:\n predictions = self.predictions\n new_data_table = pd.DataFrame(data=predictions) \n\n if variables['scale']: \n fu.log('Scaling dataset', self.out_log, self.global_log)\n new_data = scaler.transform(new_data_table)\n else: new_data = new_data_table\n\n p = new_model.predict_proba(new_data)\n\n # if headers, create target column with proper label\n if self.io_dict[\"in\"][\"input_dataset_path\"] or 'columns' in variables['independent_vars']:\n clss = ' (' + ', '.join(str(x) for x in variables['target_values']) + ')'\n new_data_table[variables['target']['column'] + ' ' + clss] = tuple(map(tuple, p))\n else:\n new_data_table[len(new_data_table.columns)] = tuple(map(tuple, p))\n fu.log('Predicting results\\n\\nPREDICTION RESULTS\\n\\n%s\\n' % new_data_table, self.out_log, self.global_log)\n fu.log('Saving results to %s' % self.io_dict[\"out\"][\"output_results_path\"], self.out_log, self.global_log)\n new_data_table.to_csv(self.io_dict[\"out\"][\"output_results_path\"], index = False, header=True, float_format='%.3f')\n\n return 0\n\ndef classification_predict(input_model_path: str, output_results_path: str, input_dataset_path: str = None, properties: dict = None, **kwargs) -> int:\n \"\"\"Execute the :class:`ClassificationPredict <classification.classification_predict.ClassificationPredict>` class and\n execute the :meth:`launch() <classification.classification_predict.ClassificationPredict.launch>` method.\"\"\"\n\n return ClassificationPredict(input_model_path=input_model_path, \n output_results_path=output_results_path, \n input_dataset_path=input_dataset_path,\n properties=properties, **kwargs).launch()\n\ndef main():\n \"\"\"Command line execution of this building block. Please check the command line documentation.\"\"\"\n parser = argparse.ArgumentParser(description=\"Makes predictions from an input dataset and a given classification model.\", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_model_path', required=True, help='Path to the input model. Accepted formats: pkl.')\n required_args.add_argument('--output_results_path', required=True, help='Path to the output results file. Accepted formats: csv.')\n parser.add_argument('--input_dataset_path', required=False, help='Path to the dataset to predict. Accepted formats: csv.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n classification_predict(input_model_path=args.input_model_path, \n output_results_path=args.output_results_path, \n input_dataset_path=args.input_dataset_path,\n properties=properties)\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
YuXie96/time
[ "8539d55d2449c712f54331b06720ab7faf3593df" ]
[ "evaluate.py" ]
[ "import torch\nfrom utils.data_util import char_list\nfrom utils.train_util import data_init, model_init\n\n\ndef eval_total_acc(config):\n # initialize data loaders\n test_loader = data_init(mode='test', use_velocity=config.use_velocity,\n t_scale=config.t_scale, batch_s=config.batch_s,\n context=config.context, context_w=config.context_w)\n # initialize model\n inp_size = 3\n if config.context is not None:\n inp_size += config.context_w\n model = model_init(mode='test', model_type=config.rnn_type,\n input_size=inp_size, hidden_size=config.hidden_size,\n save_path=config.save_path)\n\n correct = 0\n total = 0\n with torch.no_grad():\n for test_data, test_label in test_loader:\n hid = model.init_hidden(config.batch_s)\n for t_ in range(test_data.shape[0]):\n output, hid = model(test_data[t_], hid)\n if t_ >= test_data.shape[0] - config.readout_steps:\n _, predicted = torch.max(output.detach(), 1)\n total += test_label.size(0)\n correct += (predicted == test_label).sum().item()\n # Accuracy and loss of the network on the test set:\n test_acc = 100 * correct / total\n print(\"Test Accuracy is: {:.1f} %\".format(test_acc))\n return test_acc\n\n\ndef eval_class_acc(config):\n # initialize data loaders\n test_loader = data_init(mode='test', use_velocity=config.use_velocity,\n t_scale=config.t_scale, batch_s=config.batch_s,\n context=config.context, context_w=config.context_w)\n\n # initialize model\n inp_size = 3\n if config.context is not None:\n inp_size += config.context_w\n model = model_init(mode='test', model_type=config.rnn_type,\n input_size=inp_size, hidden_size=config.hidden_size,\n save_path=config.save_path)\n\n # prepare to count predictions for each class\n classes = char_list\n correct_pred = {classname: 0 for classname in classes}\n total_pred = {classname: 0 for classname in classes}\n\n # again no gradients needed\n with torch.no_grad():\n for test_data, test_label in test_loader:\n hid = model.init_hidden(config.batch_s)\n for t_ in range(test_data.shape[0]):\n output, hid = model(test_data[t_], hid)\n if t_ >= test_data.shape[0] - config.readout_steps:\n _, predictions = torch.max(output.detach(), 1)\n # collect the correct predictions for each class\n for lab, prediction in zip(test_label, predictions):\n if lab == prediction:\n correct_pred[classes[lab]] += 1\n total_pred[classes[lab]] += 1\n\n # print accuracy for each class\n for classname, correct_count in correct_pred.items():\n accuracy = 100 * float(correct_count) / total_pred[classname]\n print(\"Accuracy for class {} is: {:.1f} %\".format(classname, accuracy))\n" ]
[ [ "torch.no_grad" ] ]
nuannuanhcc/mmdetection
[ "26162d7fd49d2b87ead2bf5d9d8fbabd2b8933bb" ]
[ "mmdet/apis/runner/base_runner.py" ]
[ "# Copyright (c) Open-MMLab. All rights reserved.\nimport logging\nimport os.path as osp\nimport warnings\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nfrom torch.optim import Optimizer\n\nimport mmcv\nfrom mmcv.parallel import is_module_wrapper\nfrom .checkpoint import load_checkpoint\nfrom .dist_utils import get_dist_info\nfrom .hooks import HOOKS, Hook, IterTimerHook\nfrom .log_buffer import LogBuffer\nfrom .priority import get_priority\nfrom .utils import get_time_str\n\n\nclass BaseRunner(metaclass=ABCMeta):\n \"\"\"The base class of Runner, a training helper for PyTorch.\n\n All subclasses should implement the following APIs:\n\n - ``run()``\n - ``train()``\n - ``val()``\n - ``save_checkpoint()``\n\n Args:\n model (:obj:`torch.nn.Module`): The model to be run.\n batch_processor (callable): A callable method that process a data\n batch. The interface of this method should be\n `batch_processor(model, data, train_mode) -> dict`\n optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an\n optimizer (in most cases) or a dict of optimizers (in models that\n requires more than one optimizer, e.g., GAN).\n work_dir (str, optional): The working directory to save checkpoints\n and logs. Defaults to None.\n logger (:obj:`logging.Logger`): Logger used during training.\n Defaults to None. (The default value is just for backward\n compatibility)\n meta (dict | None): A dict records some import information such as\n environment info and seed, which will be logged in logger hook.\n Defaults to None.\n max_epochs (int, optional): Total training epochs.\n max_iters (int, optional): Total training iterations.\n \"\"\"\n\n def __init__(self,\n model,\n batch_processor=None,\n optimizer=None,\n work_dir=None,\n logger=None,\n meta=None,\n max_iters=None,\n max_epochs=None):\n if batch_processor is not None:\n if not callable(batch_processor):\n raise TypeError('batch_processor must be callable, '\n f'but got {type(batch_processor)}')\n warnings.warn('batch_processor is deprecated, please implement '\n 'train_step() and val_step() in the model instead.')\n # raise an error is `batch_processor` is not None and\n # `model.train_step()` exists.\n if is_module_wrapper(model):\n _model = model.module\n else:\n _model = model\n if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):\n raise RuntimeError(\n 'batch_processor and model.train_step()/model.val_step() '\n 'cannot be both available.')\n else:\n assert hasattr(model, 'train_step')\n\n # check the type of `optimizer`\n if isinstance(optimizer, dict):\n for name, optim in optimizer.items():\n if not isinstance(optim, Optimizer):\n raise TypeError(\n f'optimizer must be a dict of torch.optim.Optimizers, '\n f'but optimizer[\"{name}\"] is a {type(optim)}')\n elif not isinstance(optimizer, Optimizer) and optimizer is not None:\n raise TypeError(\n f'optimizer must be a torch.optim.Optimizer object '\n f'or dict or None, but got {type(optimizer)}')\n\n # check the type of `logger`\n if not isinstance(logger, logging.Logger):\n raise TypeError(f'logger must be a logging.Logger object, '\n f'but got {type(logger)}')\n\n # check the type of `meta`\n if meta is not None and not isinstance(meta, dict):\n raise TypeError(\n f'meta must be a dict or None, but got {type(meta)}')\n\n self.model = model\n self.batch_processor = batch_processor\n self.optimizer = optimizer\n self.logger = logger\n self.meta = meta\n\n # create work_dir\n if mmcv.is_str(work_dir):\n self.work_dir = osp.abspath(work_dir)\n mmcv.mkdir_or_exist(self.work_dir)\n elif work_dir is None:\n self.work_dir = None\n else:\n raise TypeError('\"work_dir\" must be a str or None')\n\n # get model name from the model class\n if hasattr(self.model, 'module'):\n self._model_name = self.model.module.__class__.__name__\n else:\n self._model_name = self.model.__class__.__name__\n\n self._rank, self._world_size = get_dist_info()\n self.timestamp = get_time_str()\n self.mode = None\n self._hooks = []\n self._epoch = 0\n self._iter = 0\n self._inner_iter = 0\n\n if max_epochs is not None and max_iters is not None:\n raise ValueError(\n 'Only one of `max_epochs` or `max_iters` can be set.')\n\n self._max_epochs = max_epochs\n self._max_iters = max_iters\n # TODO: Redesign LogBuffer, it is not flexible and elegant enough\n self.log_buffer = LogBuffer()\n\n @property\n def model_name(self):\n \"\"\"str: Name of the model, usually the module class name.\"\"\"\n return self._model_name\n\n @property\n def rank(self):\n \"\"\"int: Rank of current process. (distributed training)\"\"\"\n return self._rank\n\n @property\n def world_size(self):\n \"\"\"int: Number of processes participating in the job.\n (distributed training)\"\"\"\n return self._world_size\n\n @property\n def hooks(self):\n \"\"\"list[:obj:`Hook`]: A list of registered hooks.\"\"\"\n return self._hooks\n\n @property\n def epoch(self):\n \"\"\"int: Current epoch.\"\"\"\n return self._epoch\n\n @property\n def iter(self):\n \"\"\"int: Current iteration.\"\"\"\n return self._iter\n\n @property\n def inner_iter(self):\n \"\"\"int: Iteration in an epoch.\"\"\"\n return self._inner_iter\n\n @property\n def max_epochs(self):\n \"\"\"int: Maximum training epochs.\"\"\"\n return self._max_epochs\n\n @property\n def max_iters(self):\n \"\"\"int: Maximum training iterations.\"\"\"\n return self._max_iters\n\n @abstractmethod\n def train(self):\n pass\n\n @abstractmethod\n def val(self):\n pass\n\n @abstractmethod\n def run(self, data_loaders, workflow, **kwargs):\n pass\n\n @abstractmethod\n def save_checkpoint(self,\n out_dir,\n filename_tmpl,\n save_optimizer=True,\n meta=None,\n create_symlink=True):\n pass\n\n def current_lr(self):\n \"\"\"Get current learning rates.\n\n Returns:\n list[float] | dict[str, list[float]]: Current learning rates of all\n param groups. If the runner has a dict of optimizers, this\n method will return a dict.\n \"\"\"\n if isinstance(self.optimizer, torch.optim.Optimizer):\n lr = [group['lr'] for group in self.optimizer.param_groups]\n elif isinstance(self.optimizer, dict):\n lr = dict()\n for name, optim in self.optimizer.items():\n lr[name] = [group['lr'] for group in optim.param_groups]\n else:\n raise RuntimeError(\n 'lr is not applicable because optimizer does not exist.')\n return lr\n\n def current_momentum(self):\n \"\"\"Get current momentums.\n\n Returns:\n list[float] | dict[str, list[float]]: Current momentums of all\n param groups. If the runner has a dict of optimizers, this\n method will return a dict.\n \"\"\"\n\n def _get_momentum(optimizer):\n momentums = []\n for group in optimizer.param_groups:\n if 'momentum' in group.keys():\n momentums.append(group['momentum'])\n elif 'betas' in group.keys():\n momentums.append(group['betas'][0])\n else:\n momentums.append(0)\n return momentums\n\n if self.optimizer is None:\n raise RuntimeError(\n 'momentum is not applicable because optimizer does not exist.')\n elif isinstance(self.optimizer, torch.optim.Optimizer):\n momentums = _get_momentum(self.optimizer)\n elif isinstance(self.optimizer, dict):\n momentums = dict()\n for name, optim in self.optimizer.items():\n momentums[name] = _get_momentum(optim)\n return momentums\n\n def register_hook(self, hook, priority='NORMAL'):\n \"\"\"Register a hook into the hook list.\n\n The hook will be inserted into a priority queue, with the specified\n priority (See :class:`Priority` for details of priorities).\n For hooks with the same priority, they will be triggered in the same\n order as they are registered.\n\n Args:\n hook (:obj:`Hook`): The hook to be registered.\n priority (int or str or :obj:`Priority`): Hook priority.\n Lower value means higher priority.\n \"\"\"\n assert isinstance(hook, Hook)\n if hasattr(hook, 'priority'):\n raise ValueError('\"priority\" is a reserved attribute for hooks')\n priority = get_priority(priority)\n hook.priority = priority\n # insert the hook to a sorted list\n inserted = False\n for i in range(len(self._hooks) - 1, -1, -1):\n if priority >= self._hooks[i].priority:\n self._hooks.insert(i + 1, hook)\n inserted = True\n break\n if not inserted:\n self._hooks.insert(0, hook)\n\n def register_hook_from_cfg(self, hook_cfg):\n \"\"\"Register a hook from its cfg.\n\n Args:\n hook_cfg (dict): Hook config. It should have at least keys 'type'\n and 'priority' indicating its type and priority.\n\n Notes:\n The specific hook class to register should not use 'type' and\n 'priority' arguments during initialization.\n \"\"\"\n hook_cfg = hook_cfg.copy()\n priority = hook_cfg.pop('priority', 'NORMAL')\n hook = mmcv.build_from_cfg(hook_cfg, HOOKS)\n self.register_hook(hook, priority=priority)\n\n def call_hook(self, fn_name):\n \"\"\"Call all hooks.\n\n Args:\n fn_name (str): The function name in each hook to be called, such as\n \"before_train_epoch\".\n \"\"\"\n for hook in self._hooks:\n getattr(hook, fn_name)(self)\n\n def load_checkpoint(self, filename, map_location='cpu', strict=False):\n self.logger.info('load checkpoint from %s', filename)\n return load_checkpoint(self.model, filename, map_location, strict,\n self.logger)\n\n def resume(self,\n checkpoint,\n resume_optimizer=True,\n map_location='default'):\n if map_location == 'default':\n if torch.cuda.is_available():\n device_id = torch.cuda.current_device()\n checkpoint = self.load_checkpoint(\n checkpoint,\n map_location=lambda storage, loc: storage.cuda(device_id))\n else:\n checkpoint = self.load_checkpoint(checkpoint)\n else:\n checkpoint = self.load_checkpoint(\n checkpoint, map_location=map_location)\n\n self._epoch = checkpoint['meta']['epoch']\n self._iter = checkpoint['meta']['iter']\n if 'optimizer' in checkpoint and resume_optimizer:\n if isinstance(self.optimizer, Optimizer):\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n elif isinstance(self.optimizer, dict):\n for k in self.optimizer.keys():\n self.optimizer[k].load_state_dict(\n checkpoint['optimizer'][k])\n else:\n raise TypeError(\n 'Optimizer should be dict or torch.optim.Optimizer '\n f'but got {type(self.optimizer)}')\n\n self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)\n\n def register_lr_hook(self, lr_config):\n if isinstance(lr_config, dict):\n assert 'policy' in lr_config\n policy_type = lr_config.pop('policy')\n # If the type of policy is all in lower case, e.g., 'cyclic',\n # then its first letter will be capitalized, e.g., to be 'Cyclic'.\n # This is for the convenient usage of Lr updater.\n # Since this is not applicable for `\n # CosineAnnealingLrUpdater`,\n # the string will not be changed if it contains capital letters.\n if policy_type == policy_type.lower():\n policy_type = policy_type.title()\n hook_type = policy_type + 'LrUpdaterHook'\n lr_config['type'] = hook_type\n hook = mmcv.build_from_cfg(lr_config, HOOKS)\n else:\n hook = lr_config\n self.register_hook(hook)\n\n def register_momentum_hook(self, momentum_config):\n if momentum_config is None:\n return\n if isinstance(momentum_config, dict):\n assert 'policy' in momentum_config\n policy_type = momentum_config.pop('policy')\n # If the type of policy is all in lower case, e.g., 'cyclic',\n # then its first letter will be capitalized, e.g., to be 'Cyclic'.\n # This is for the convenient usage of momentum updater.\n # Since this is not applicable for\n # `CosineAnnealingMomentumUpdater`,\n # the string will not be changed if it contains capital letters.\n if policy_type == policy_type.lower():\n policy_type = policy_type.title()\n hook_type = policy_type + 'MomentumUpdaterHook'\n momentum_config['type'] = hook_type\n hook = mmcv.build_from_cfg(momentum_config, HOOKS)\n else:\n hook = momentum_config\n self.register_hook(hook)\n\n def register_optimizer_hook(self, optimizer_config):\n if optimizer_config is None:\n return\n if isinstance(optimizer_config, dict):\n optimizer_config.setdefault('type', 'OptimizerHook')\n hook = mmcv.build_from_cfg(optimizer_config, HOOKS)\n else:\n hook = optimizer_config\n self.register_hook(hook)\n\n def register_checkpoint_hook(self, checkpoint_config):\n if checkpoint_config is None:\n return\n if isinstance(checkpoint_config, dict):\n checkpoint_config.setdefault('type', 'CheckpointHook')\n hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)\n else:\n hook = checkpoint_config\n self.register_hook(hook)\n\n def register_logger_hooks(self, log_config):\n if log_config is None:\n return\n log_interval = log_config['interval']\n for info in log_config['hooks']:\n logger_hook = mmcv.build_from_cfg(\n info, HOOKS, default_args=dict(interval=log_interval))\n self.register_hook(logger_hook, priority='VERY_LOW')\n\n def register_training_hooks(self,\n lr_config,\n optimizer_config=None,\n checkpoint_config=None,\n log_config=None,\n momentum_config=None):\n \"\"\"Register default hooks for training.\n\n Default hooks include:\n\n - LrUpdaterHook\n - MomentumUpdaterHook\n - OptimizerStepperHook\n - CheckpointSaverHook\n - IterTimerHook\n - LoggerHook(s)\n \"\"\"\n self.register_lr_hook(lr_config)\n self.register_momentum_hook(momentum_config)\n self.register_optimizer_hook(optimizer_config)\n self.register_checkpoint_hook(checkpoint_config)\n self.register_hook(IterTimerHook())\n self.register_logger_hooks(log_config)\n" ]
[ [ "torch.cuda.is_available", "torch.cuda.current_device" ] ]
ZJULearning/SRDet
[ "12d9302fad742f64ca3c8e05cd601d7dca1bf81e" ]
[ "mmdet3d/ops/furthest_point_sample/points_sampler.py" ]
[ "import torch\nfrom mmcv.runner import force_fp32\nfrom torch import nn as nn\nfrom typing import List\n\nfrom .furthest_point_sample import (furthest_point_sample,\n furthest_point_sample_with_dist)\nfrom .utils import calc_square_dist\n\n\ndef get_sampler_type(sampler_type):\n \"\"\"Get the type and mode of points sampler.\n\n Args:\n sampler_type (str): The type of points sampler.\n The valid value are \"D-FPS\", \"F-FPS\", or \"FS\".\n\n Returns:\n class: Points sampler type.\n \"\"\"\n if sampler_type == 'D-FPS':\n sampler = DFPS_Sampler\n elif sampler_type == 'F-FPS':\n sampler = FFPS_Sampler\n elif sampler_type == 'FS':\n sampler = FS_Sampler\n elif sampler_type == 'RS':\n sampler = RS_Sampler\n else:\n raise ValueError('Only \"sampler_type\" of \"D-FPS\", \"F-FPS\", or \"FS\"'\n f' are supported, got {sampler_type}')\n\n return sampler\n\n\nclass Points_Sampler(nn.Module):\n \"\"\"Points sampling.\n\n Args:\n num_point (list[int]): Number of sample points.\n fps_mod_list (list[str]: Type of FPS method, valid mod\n ['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].\n F-FPS: using feature distances for FPS.\n D-FPS: using Euclidean distances of points for FPS.\n FS: using F-FPS and D-FPS simultaneously.\n fps_sample_range_list (list[int]): Range of points to apply FPS.\n Default: [-1].\n \"\"\"\n\n def __init__(self,\n num_point: List[int],\n fps_mod_list: List[str] = ['D-FPS'],\n fps_sample_range_list: List[int] = [-1]):\n super(Points_Sampler, self).__init__()\n # FPS would be applied to different fps_mod in the list,\n # so the length of the num_point should be equal to\n # fps_mod_list and fps_sample_range_list.\n assert len(num_point) == len(fps_mod_list) == len(\n fps_sample_range_list)\n self.num_point = num_point\n self.fps_sample_range_list = fps_sample_range_list\n self.samplers = nn.ModuleList()\n for fps_mod in fps_mod_list:\n self.samplers.append(get_sampler_type(fps_mod)())\n self.fp16_enabled = False\n\n @force_fp32()\n def forward(self, points_xyz, features):\n \"\"\"forward.\n\n Args:\n points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.\n features (Tensor): (B, C, N) Descriptors of the features.\n\n Return:\n Tensor: (B, npoint, sample_num) Indices of sampled points.\n \"\"\"\n indices = []\n last_fps_end_index = 0\n\n for fps_sample_range, sampler, npoint in zip(\n self.fps_sample_range_list, self.samplers, self.num_point):\n assert fps_sample_range < points_xyz.shape[1]\n\n if fps_sample_range == -1:\n sample_points_xyz = points_xyz[:, last_fps_end_index:]\n sample_features = features[:, :, last_fps_end_index:] if \\\n features is not None else None\n else:\n sample_points_xyz = \\\n points_xyz[:, last_fps_end_index:fps_sample_range]\n sample_features = \\\n features[:, :, last_fps_end_index:fps_sample_range] if \\\n features is not None else None\n\n fps_idx = sampler(sample_points_xyz.contiguous(), sample_features,\n npoint)\n\n indices.append(fps_idx + last_fps_end_index)\n last_fps_end_index += fps_sample_range\n indices = torch.cat(indices, dim=1)\n\n return indices\n\n\nclass DFPS_Sampler(nn.Module):\n \"\"\"DFPS_Sampling.\n\n Using Euclidean distances of points for FPS.\n \"\"\"\n\n def __init__(self):\n super(DFPS_Sampler, self).__init__()\n\n def forward(self, points, features, npoint):\n \"\"\"Sampling points with D-FPS.\"\"\"\n fps_idx = furthest_point_sample(points.contiguous(), npoint)\n return fps_idx\n\n\nclass FFPS_Sampler(nn.Module):\n \"\"\"FFPS_Sampler.\n\n Using feature distances for FPS.\n \"\"\"\n\n def __init__(self):\n super(FFPS_Sampler, self).__init__()\n\n def forward(self, points, features, npoint):\n \"\"\"Sampling points with F-FPS.\"\"\"\n assert features is not None, \\\n 'feature input to FFPS_Sampler should not be None'\n features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)\n features_dist = calc_square_dist(\n features_for_fps, features_for_fps, norm=False)\n fps_idx = furthest_point_sample_with_dist(features_dist, npoint)\n return fps_idx\n\n\nclass FS_Sampler(nn.Module):\n \"\"\"FS_Sampling.\n\n Using F-FPS and D-FPS simultaneously.\n \"\"\"\n\n def __init__(self):\n super(FS_Sampler, self).__init__()\n\n def forward(self, points, features, npoint):\n \"\"\"Sampling points with FS_Sampling.\"\"\"\n assert features is not None, \\\n 'feature input to FS_Sampler should not be None'\n features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)\n features_dist = calc_square_dist(\n features_for_fps, features_for_fps, norm=False)\n fps_idx_ffps = furthest_point_sample_with_dist(features_dist, npoint)\n fps_idx_dfps = furthest_point_sample(points, npoint)\n fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1)\n return fps_idx\n\n\nclass RS_Sampler(nn.Module):\n\n def __init__(self):\n super(RS_Sampler, self).__init__()\n\n def forward(self, points, features, npoint):\n fps_idx = []\n for _ in range(points.shape[0]):\n fps_idx.append(torch.randperm(points.shape[1], dtype=torch.int32)[:npoint])\n fps_idx = torch.stack(fps_idx, dim=0).to(points.device)\n return fps_idx\n\n" ]
[ [ "torch.stack", "torch.nn.ModuleList", "torch.cat", "torch.randperm" ] ]
mdebony/gammapy
[ "015206d2418b1d254f1c9d3ea819ab0c5ece99e9" ]
[ "gammapy/datasets/io.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport abc\nfrom pathlib import Path\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom gammapy.data import GTI\nfrom gammapy.utils.scripts import make_path\nfrom gammapy.maps import RegionNDMap\nfrom gammapy.irf import EDispKernelMap, EDispKernel\nfrom .spectrum import SpectrumDatasetOnOff\n\n\nclass DatasetReader(abc.ABC):\n \"\"\"Dataset reader base class\"\"\"\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n @abc.abstractmethod\n def read(self):\n pass\n\n\nclass DatasetWriter(abc.ABC):\n \"\"\"Dataset writer base class\"\"\"\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n @abc.abstractmethod\n def write(self, dataset):\n pass\n\n\nclass OGIPDatasetWriter(DatasetWriter):\n \"\"\"Write OGIP files.\n\n If you want to use the written files with Sherpa you have to use the\n ``ogip-sherpa`` format. Then all files will be written in units of 'keV' and\n 'cm2'.\n\n The naming scheme is fixed as following:\n\n * PHA file is named filename.fits\n * BKG file is named filename_bkg.fits\n * ARF file is named filename_arf.fits\n * RMF file is named filename_rmf.fits\n\n Parameters\n ----------\n filename : `pathlib.Path` or str\n Filename.\n format : {\"ogip\", \"ogip-sherpa\"}\n Which format to use.\n overwrite : bool\n Overwrite existing files?\n \"\"\"\n tag = [\"ogip\", \"ogip-sherpa\"]\n\n def __init__(self, filename, format=\"ogip\", overwrite=False):\n filename = make_path(filename)\n filename.parent.mkdir(exist_ok=True, parents=True)\n\n self.filename = filename\n self.format = format\n self.overwrite = overwrite\n\n @staticmethod\n def get_filenames(filename):\n \"\"\"Get filenames\n\n Parameters\n ----------\n filename : `~pathlib.Path`\n Filename\n\n Returns\n -------\n filenames : dict\n Dict of filenames.\n \"\"\"\n suffix = \"\".join(filename.suffixes)\n name = filename.name.replace(suffix, \"\")\n name = f\"{name}{{}}{suffix}\"\n return {\n \"respfile\": name.format(\"_rmf\"),\n \"backfile\": name.format(\"_bkg\"),\n \"ancrfile\": name.format(\"_arf\")\n }\n\n def get_ogip_meta(self, dataset, is_bkg=False):\n \"\"\"Meta info for the OGIP data format\"\"\"\n try:\n livetime = dataset.exposure.meta[\"livetime\"]\n except KeyError:\n raise ValueError(\n \"Storing in ogip format require the livetime \"\n \"to be defined in the exposure meta data\"\n )\n\n hdu_class = \"BKG\" if is_bkg else \"TOTAL\"\n\n meta = {\n \"HDUCLAS2\": hdu_class,\n \"HDUCLAS3\": \"COUNT\",\n \"HDUCLAS4\": \"TYPE:1\",\n \"EXPOSURE\": livetime.to_value(\"s\"),\n \"OBS_ID\": dataset.name,\n }\n\n filenames = OGIPDatasetWriter.get_filenames(self.filename)\n meta[\"ANCRFILE\"] = filenames[\"ancrfile\"]\n\n if dataset.edisp:\n meta[\"BACKFILE\"] = filenames[\"backfile\"]\n\n if dataset.counts_off:\n meta[\"RESPFILE\"] = filenames[\"respfile\"]\n\n return meta\n\n def write(self, dataset):\n \"\"\"Write dataset to files\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n \"\"\"\n filenames = self.get_filenames(self.filename)\n\n self.write_pha(dataset, filename=self.filename)\n\n path = self.filename.parent\n self.write_arf(dataset, filename=path / filenames[\"ancrfile\"])\n\n if dataset.counts_off:\n self.write_bkg(dataset, filename=path / filenames[\"backfile\"])\n\n if dataset.edisp:\n self.write_rmf(dataset, filename=path / filenames[\"respfile\"])\n\n def write_rmf(self, dataset, filename):\n \"\"\"Write energy dispersion.\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n filename : str or `Path`\n Filename to use.\n \"\"\"\n kernel = dataset.edisp.get_edisp_kernel()\n kernel.write(\n filename=filename,\n overwrite=self.overwrite,\n format=self.format\n )\n\n def write_arf(self, dataset, filename):\n \"\"\"Write effective area\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n filename : str or `Path`\n Filename to use.\n\n \"\"\"\n aeff = dataset.exposure / dataset.exposure.meta[\"livetime\"]\n aeff.write(\n filename=filename,\n overwrite=self.overwrite,\n format=self.format.replace(\"ogip\", \"ogip-arf\"),\n )\n\n def to_counts_hdulist(self, dataset, is_bkg=False):\n \"\"\"Convert counts region map to hdulist\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n is_bkg : bool\n Whether to use counts off.\n \"\"\"\n counts = dataset.counts_off if is_bkg else dataset.counts\n acceptance = dataset.acceptance_off if is_bkg else dataset.acceptance\n\n hdulist = counts.to_hdulist()\n\n table = Table.read(hdulist[\"SPECTRUM\"])\n meta = self.get_ogip_meta(dataset, is_bkg=is_bkg)\n\n if dataset.mask_safe is not None:\n mask_array = dataset.mask_safe.data[:, 0, 0]\n else:\n mask_array = np.ones(acceptance.data.size)\n\n table[\"QUALITY\"] = np.logical_not(mask_array)\n del table.meta[\"QUALITY\"]\n\n table[\"BACKSCAL\"] = acceptance.data[:, 0, 0]\n del table.meta[\"BACKSCAL\"]\n\n # adapt meta data\n table.meta.update(meta)\n hdulist[\"SPECTRUM\"] = fits.BinTableHDU(table)\n return hdulist\n\n def write_pha(self, dataset, filename):\n \"\"\"Write counts file\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n filename : str or `Path`\n Filename to use.\n\n \"\"\"\n hdulist = self.to_counts_hdulist(dataset)\n\n if dataset.gti:\n hdu = fits.BinTableHDU(dataset.gti.table, name=\"GTI\")\n hdulist.append(hdu)\n\n hdulist.writeto(filename, overwrite=self.overwrite)\n\n def write_bkg(self, dataset, filename):\n \"\"\"Write off counts file\n\n Parameters\n ----------\n dataset : `SpectrumDatasetOnOff`\n Dataset to write\n filename : str or `Path`\n Filename to use.\n \"\"\"\n hdulist = self.to_counts_hdulist(dataset, is_bkg=True)\n hdulist.writeto(filename, overwrite=self.overwrite)\n\n\nclass OGIPDatasetReader(DatasetReader):\n \"\"\"Read `~gammapy.datasets.SpectrumDatasetOnOff` from OGIP files.\n\n BKG file, ARF, and RMF must be set in the PHA header and be present in\n the same folder.\n\n The naming scheme is fixed to the following scheme:\n\n * PHA file is named ``pha_obs{name}.fits``\n * BKG file is named ``bkg_obs{name}.fits``\n * ARF file is named ``arf_obs{name}.fits``\n * RMF file is named ``rmf_obs{name}.fits``\n with ``{name}`` the dataset name.\n\n Parameters\n ----------\n filename : str or `~pathlib.Path`\n OGIP PHA file to read\n \"\"\"\n tag = \"ogip\"\n\n def __init__(self, filename):\n self.filename = make_path(filename)\n\n def get_valid_path(self, filename):\n \"\"\"Get absolute or relative path\n\n The relative path is with respect to the name of the reference file.\n\n Parameters\n ----------\n filename : str or `Path`\n Filename\n\n Returns\n -------\n filename : `Path`\n Valid path\n \"\"\"\n filename = make_path(filename)\n\n if not filename.exists():\n return self.filename.parent / filename\n else:\n return filename\n\n def get_filenames(self, pha_meta):\n \"\"\"Get filenames\n\n Parameters\n ----------\n pha_meta : dict\n Meta data from the PHA file\n\n Returns\n -------\n filenames : dict\n Dict with filenames of \"arffile\", \"rmffile\" (optional)\n and \"bkgfile\" (optional)\n \"\"\"\n filenames = {\n \"arffile\": self.get_valid_path(pha_meta[\"ANCRFILE\"])\n }\n\n if \"BACKFILE\" in pha_meta:\n filenames[\"bkgfile\"] = self.get_valid_path(pha_meta[\"BACKFILE\"])\n\n if \"RESPFILE\" in pha_meta:\n filenames[\"rmffile\"] = self.get_valid_path(pha_meta[\"RESPFILE\"])\n\n return filenames\n\n @staticmethod\n def read_pha(filename):\n \"\"\"Read PHA file\n\n Parameters\n ----------\n filename : str or `Path`\n PHA file name\n\n Returns\n -------\n data : dict\n Dict with counts, acceptance and mask_safe\n \"\"\"\n data = {}\n\n with fits.open(filename, memmap=False) as hdulist:\n data[\"counts\"] = RegionNDMap.from_hdulist(hdulist, format=\"ogip\")\n data[\"acceptance\"] = RegionNDMap.from_hdulist(\n hdulist, format=\"ogip\", ogip_column=\"BACKSCAL\"\n )\n\n if \"GTI\" in hdulist:\n data[\"gti\"] = GTI(Table.read(hdulist[\"GTI\"]))\n\n data[\"mask_safe\"] = RegionNDMap.from_hdulist(\n hdulist, format=\"ogip\", ogip_column=\"QUALITY\"\n )\n\n return data\n\n @staticmethod\n def read_bkg(filename):\n \"\"\"Read PHA background file\n\n Parameters\n ----------\n filename : str or `Path`\n PHA file name\n\n Returns\n -------\n data : dict\n Dict with counts_off and acceptance_off\n \"\"\"\n with fits.open(filename, memmap=False) as hdulist:\n counts_off = RegionNDMap.from_hdulist(hdulist, format=\"ogip\")\n acceptance_off = RegionNDMap.from_hdulist(\n hdulist, ogip_column=\"BACKSCAL\"\n )\n return {\"counts_off\": counts_off, \"acceptance_off\": acceptance_off}\n\n @staticmethod\n def read_rmf(filename, exposure):\n \"\"\"Read RMF file\n\n Parameters\n ----------\n filename : str or `Path`\n PHA file name\n exposure : `RegionNDMap`\n Exposure map\n\n Returns\n -------\n data : `EDispKernelMap`\n Dict with edisp\n \"\"\"\n kernel = EDispKernel.read(filename)\n edisp = EDispKernelMap.from_edisp_kernel(kernel, geom=exposure.geom)\n\n # TODO: resolve this separate handling of exposure for edisp\n edisp.exposure_map.data = exposure.data[:, :, np.newaxis, :]\n return edisp\n\n @staticmethod\n def read_arf(filename, livetime):\n \"\"\"Read ARF file\n\n Parameters\n ----------\n filename : str or `Path`\n PHA file name\n livetime : `Quantity`\n Livetime\n\n Returns\n -------\n data : `RegionNDMap`\n Exposure map\n \"\"\"\n aeff = RegionNDMap.read(filename, format=\"ogip-arf\")\n exposure = aeff * livetime\n exposure.meta[\"livetime\"] = livetime\n return exposure\n\n def read(self):\n \"\"\"Read dataset\n\n Returns\n -------\n dataset : SpectrumDatasetOnOff\n Spectrum dataset\n \"\"\"\n kwargs = self.read_pha(self.filename)\n pha_meta = kwargs[\"counts\"].meta\n\n name = str(pha_meta[\"OBS_ID\"])\n livetime = pha_meta[\"EXPOSURE\"] * u.s\n\n filenames = self.get_filenames(pha_meta=pha_meta)\n exposure = self.read_arf(filenames[\"arffile\"], livetime=livetime)\n\n if \"bkgfile\" in filenames:\n bkg = self.read_bkg(filenames[\"bkgfile\"])\n kwargs.update(bkg)\n\n if \"rmffile\" in filenames:\n kwargs[\"edisp\"] = self.read_rmf(filenames[\"rmffile\"], exposure=exposure)\n\n return SpectrumDatasetOnOff(name=name, exposure=exposure, **kwargs)\n" ]
[ [ "numpy.logical_not", "numpy.ones" ] ]
QUANHAO-NCU/pytorch-visual-block
[ "f024541add5581026343aaaaeaf27d8415f3d4fe" ]
[ "Working/oc-cnn-master-Q/src/main/getAUC.py" ]
[ "import numpy as np\nimport h5py\n\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score\nfrom sklearn import svm\n\n# path variables\nscore_path = '../../temp_files/scores.mat'\nlabel_path = '../../temp_files/labels.mat'\n\nwith h5py.File(score_path, 'r') as f:\n test_features = f['scores'][()]\nwith h5py.File(label_path, 'r') as f:\n test_label = f['test_label'][()]\n\nfpr, tpr, thresholds = metrics.roc_curve(np.transpose(test_label), np.transpose(test_features))\nprint(metrics.auc(fpr, tpr))\n" ]
[ [ "sklearn.metrics.auc", "numpy.transpose" ] ]
SaitejaUtpala/geomstats
[ "5d4e16b3f30a86aab4725142f2263d8f10a30508" ]
[ "geomstats/geometry/hypersphere.py" ]
[ "\"\"\"The n-dimensional hypersphere.\n\nThe n-dimensional hypersphere embedded in (n+1)-dimensional\nEuclidean space.\n\"\"\"\n\nimport logging\nimport math\nfrom itertools import product\n\nfrom scipy.stats import beta\n\nimport geomstats.algebra_utils as utils\nimport geomstats.backend as gs\nfrom geomstats.geometry.base import EmbeddedManifold\nfrom geomstats.geometry.euclidean import Euclidean, EuclideanMetric\nfrom geomstats.geometry.riemannian_metric import RiemannianMetric\n\n\nclass _Hypersphere(EmbeddedManifold):\n \"\"\"Private class for the n-dimensional hypersphere.\n\n Class for the n-dimensional hypersphere embedded in the\n (n+1)-dimensional Euclidean space.\n\n By default, points are parameterized by their extrinsic\n (n+1)-coordinates.\n\n Parameters\n ----------\n dim : int\n Dimension of the hypersphere.\n \"\"\"\n\n def __init__(self, dim):\n super(_Hypersphere, self).__init__(\n dim=dim, embedding_space=Euclidean(dim + 1),\n submersion=lambda x: gs.sum(x ** 2, axis=-1), value=1.,\n tangent_submersion=lambda v, x: 2 * gs.sum(x * v, axis=-1))\n\n def projection(self, point):\n \"\"\"Project a point on the hypersphere.\n\n Parameters\n ----------\n point : array-like, shape=[..., dim + 1]\n Point in embedding Euclidean space.\n\n Returns\n -------\n projected_point : array-like, shape=[..., dim + 1]\n Point projected on the hypersphere.\n \"\"\"\n norm = gs.linalg.norm(point, axis=-1)\n projected_point = gs.einsum('...,...i->...i', 1. / norm, point)\n\n return projected_point\n\n def to_tangent(self, vector, base_point):\n \"\"\"Project a vector to the tangent space.\n\n Project a vector in Euclidean space\n on the tangent space of the hypersphere at a base point.\n\n Parameters\n ----------\n vector : array-like, shape=[..., dim + 1]\n Vector in Euclidean space.\n base_point : array-like, shape=[..., dim + 1]\n Point on the hypersphere defining the tangent space,\n where the vector will be projected.\n\n Returns\n -------\n tangent_vec : array-like, shape=[..., dim + 1]\n Tangent vector in the tangent space of the hypersphere\n at the base point.\n \"\"\"\n sq_norm = gs.sum(base_point ** 2, axis=-1)\n inner_prod = self.embedding_metric.inner_product(base_point, vector)\n coef = inner_prod / sq_norm\n tangent_vec = vector - gs.einsum('...,...j->...j', coef, base_point)\n\n return tangent_vec\n\n def spherical_to_extrinsic(self, point_spherical):\n \"\"\"Convert point from spherical to extrinsic coordinates.\n\n Convert from the spherical coordinates in the hypersphere\n to the extrinsic coordinates in Euclidean space.\n Only implemented in dimension 2.\n\n Parameters\n ----------\n point_spherical : array-like, shape=[..., dim]\n Point on the sphere, in spherical coordinates.\n\n Returns\n -------\n point_extrinsic : array_like, shape=[..., dim + 1]\n Point on the sphere, in extrinsic coordinates in Euclidean space.\n \"\"\"\n if self.dim != 2:\n raise NotImplementedError(\n 'The conversion from spherical coordinates'\n ' to extrinsic coordinates is implemented'\n ' only in dimension 2.')\n\n theta = point_spherical[..., 0]\n phi = point_spherical[..., 1]\n\n point_extrinsic = gs.stack(\n [gs.sin(theta) * gs.cos(phi),\n gs.sin(theta) * gs.sin(phi),\n gs.cos(theta)],\n axis=-1)\n\n if not gs.all(self.belongs(point_extrinsic)):\n raise ValueError('Points do not belong to the manifold.')\n\n return point_extrinsic\n\n def tangent_spherical_to_extrinsic(self, tangent_vec_spherical,\n base_point_spherical):\n \"\"\"Convert tangent vector from spherical to extrinsic coordinates.\n\n Convert from the spherical coordinates in the hypersphere\n to the extrinsic coordinates in Euclidean space for a tangent\n vector. Only implemented in dimension 2.\n\n Parameters\n ----------\n tangent_vec_spherical : array-like, shape=[..., dim]\n Tangent vector to the sphere, in spherical coordinates.\n base_point_spherical : array-like, shape=[..., dim]\n Point on the sphere, in spherical coordinates.\n\n Returns\n -------\n tangent_vec_extrinsic : array-like, shape=[..., dim + 1]\n Tangent vector to the sphere, at base point,\n in extrinsic coordinates in Euclidean space.\n \"\"\"\n if self.dim != 2:\n raise NotImplementedError(\n 'The conversion from spherical coordinates'\n ' to extrinsic coordinates is implemented'\n ' only in dimension 2.')\n\n axes = (2, 0, 1) if base_point_spherical.ndim == 2 else (0, 1)\n theta = base_point_spherical[..., 0]\n phi = base_point_spherical[..., 1]\n\n zeros = gs.zeros_like(theta)\n\n jac = gs.array([\n [gs.cos(theta) * gs.cos(phi), - gs.sin(theta) * gs.sin(phi)],\n [gs.cos(theta) * gs.sin(phi), gs.sin(theta) * gs.cos(phi)],\n [- gs.sin(theta), zeros]])\n jac = gs.transpose(jac, axes)\n\n tangent_vec_extrinsic = gs.einsum(\n '...ij,...j->...i', jac, tangent_vec_spherical)\n\n return tangent_vec_extrinsic\n\n def intrinsic_to_extrinsic_coords(self, point_intrinsic):\n \"\"\"Convert point from intrinsic to extrinsic coordinates.\n\n Convert from the intrinsic coordinates in the hypersphere,\n to the extrinsic coordinates in Euclidean space.\n\n Parameters\n ----------\n point_intrinsic : array-like, shape=[..., dim]\n Point on the hypersphere, in intrinsic coordinates.\n\n Returns\n -------\n point_extrinsic : array-like, shape=[..., dim + 1]\n Point on the hypersphere, in extrinsic coordinates in\n Euclidean space.\n \"\"\"\n sq_coord_0 = 1. - gs.sum(point_intrinsic ** 2, axis=-1)\n if gs.any(gs.less(sq_coord_0, 0.)):\n raise ValueError('Square-root of a negative number.')\n coord_0 = gs.sqrt(sq_coord_0)\n\n point_extrinsic = gs.concatenate([\n coord_0[..., None], point_intrinsic], axis=-1)\n\n return point_extrinsic\n\n def extrinsic_to_intrinsic_coords(self, point_extrinsic):\n \"\"\"Convert point from extrinsic to intrinsic coordinates.\n\n Convert from the extrinsic coordinates in Euclidean space,\n to some intrinsic coordinates in the hypersphere.\n\n Parameters\n ----------\n point_extrinsic : array-like, shape=[..., dim + 1]\n Point on the hypersphere, in extrinsic coordinates in\n Euclidean space.\n\n Returns\n -------\n point_intrinsic : array-like, shape=[..., dim]\n Point on the hypersphere, in intrinsic coordinates.\n \"\"\"\n point_intrinsic = point_extrinsic[..., 1:]\n\n return point_intrinsic\n\n def _replace_values(self, samples, new_samples, indcs):\n replaced_indices = [\n i for i, is_replaced in enumerate(indcs) if is_replaced]\n value_indices = list(product(replaced_indices, range(self.dim + 1)))\n return gs.assignment(samples, gs.flatten(new_samples), value_indices)\n\n def random_point(self, n_samples=1, bound=1.):\n \"\"\"Sample in the hypersphere from the uniform distribution.\n\n Parameters\n ----------\n n_samples : int\n Number of samples.\n Optional, default: 1.\n bound : unused\n\n Returns\n -------\n samples : array-like, shape=[..., dim + 1]\n Points sampled on the hypersphere.\n \"\"\"\n return self.random_uniform(n_samples)\n\n def random_uniform(self, n_samples=1):\n \"\"\"Sample in the hypersphere from the uniform distribution.\n\n Parameters\n ----------\n n_samples : int\n Number of samples.\n Optional, default: 1.\n\n Returns\n -------\n samples : array-like, shape=[..., dim + 1]\n Points sampled on the hypersphere.\n \"\"\"\n size = (n_samples, self.dim + 1)\n\n samples = gs.random.normal(size=size)\n while True:\n norms = gs.linalg.norm(samples, axis=1)\n indcs = gs.isclose(norms, 0.0, atol=gs.atol)\n num_bad_samples = gs.sum(indcs)\n if num_bad_samples == 0:\n break\n new_samples = gs.random.normal(\n size=(num_bad_samples, self.dim + 1))\n samples = self._replace_values(samples, new_samples, indcs)\n\n samples = gs.einsum('..., ...i->...i', 1 / norms, samples)\n if n_samples == 1:\n samples = gs.squeeze(samples, axis=0)\n return samples\n\n def random_von_mises_fisher(\n self, mu=None, kappa=10, n_samples=1, max_iter=100):\n \"\"\"Sample with the von Mises-Fisher distribution.\n\n This distribution corresponds to the maximum entropy distribution\n given a mean. In dimension 2, a closed form expression is available.\n In larger dimension, rejection sampling is used according to [Wood94]_\n\n References\n ----------\n https://en.wikipedia.org/wiki/Von_Mises-Fisher_distribution\n\n .. [Wood94] Wood, Andrew T. A. “Simulation of the von Mises Fisher\n Distribution.” Communications in Statistics - Simulation\n and Computation, June 27, 2007.\n https://doi.org/10.1080/03610919408813161.\n\n Parameters\n ----------\n mu : array-like, shape=[dim]\n Mean parameter of the distribution.\n kappa : float\n Kappa parameter of the von Mises distribution.\n Optional, default: 10.\n n_samples : int\n Number of samples.\n Optional, default: 1.\n max_iter : int\n Maximum number of trials in the rejection algorithm. In case it\n is reached, the current number of samples < n_samples is returned.\n Optional, default: 100.\n\n Returns\n -------\n point : array-like, shape=[n_samples, dim + 1]\n Points sampled on the sphere in extrinsic coordinates\n in Euclidean space of dimension dim + 1.\n \"\"\"\n dim = self.dim\n\n if dim == 2:\n angle = 2. * gs.pi * gs.random.rand(n_samples)\n angle = gs.to_ndarray(angle, to_ndim=2, axis=1)\n unit_vector = gs.hstack((gs.cos(angle), gs.sin(angle)))\n scalar = gs.random.rand(n_samples)\n\n coord_x = 1. + 1. / kappa * gs.log(\n scalar + (1. - scalar) * gs.exp(gs.array(-2. * kappa)))\n coord_x = gs.to_ndarray(coord_x, to_ndim=2, axis=1)\n coord_yz = gs.sqrt(1. - coord_x ** 2) * unit_vector\n sample = gs.hstack((coord_x, coord_yz))\n\n else:\n # rejection sampling in the general case\n sqrt = gs.sqrt(4 * kappa ** 2. + dim ** 2)\n envelop_param = (-2 * kappa + sqrt) / dim\n node = (1. - envelop_param) / (1. + envelop_param)\n correction = kappa * node + dim * gs.log(1. - node ** 2)\n\n n_accepted, n_iter = 0, 0\n result = []\n while (n_accepted < n_samples) and (n_iter < max_iter):\n sym_beta = beta.rvs(\n dim / 2, dim / 2, size=n_samples - n_accepted)\n sym_beta = gs.cast(sym_beta, node.dtype)\n coord_x = (1 - (1 + envelop_param) * sym_beta) / (\n 1 - (1 - envelop_param) * sym_beta)\n accept_tol = gs.random.rand(n_samples - n_accepted)\n criterion = (\n kappa * coord_x\n + dim * gs.log(1 - node * coord_x)\n - correction) > gs.log(accept_tol)\n result.append(coord_x[criterion])\n n_accepted += gs.sum(criterion)\n n_iter += 1\n if n_accepted < n_samples:\n logging.warning(\n 'Maximum number of iteration reached in rejection '\n 'sampling before n_samples were accepted.')\n coord_x = gs.concatenate(result)\n coord_rest = _Hypersphere(dim - 1).random_uniform(n_accepted)\n coord_rest = gs.einsum(\n '...,...i->...i', gs.sqrt(1 - coord_x ** 2), coord_rest)\n sample = gs.concatenate([coord_x[..., None], coord_rest], axis=1)\n\n if mu is not None:\n sample = utils.rotate_points(sample, mu)\n\n return sample if (n_samples > 1) else sample[0]\n\n def random_riemannian_normal(\n self, mean=None, precision=None, n_samples=1, max_iter=100):\n r\"\"\"Sample from the Riemannian normal distribution.\n\n The Riemannian normal distribution, or spherical normal in this case,\n is defined by the probability density function (with respect to the\n Riemannian volume measure) proportional to:\n .. math::\n \\exp \\Big \\left(- \\frac{\\lambda}{2} \\mathtm{arccos}^2(x^T\\mu)\n \\Big \\right)\n\n where :math: `\\mu` is the mean and :math: `\\lambda` is the isotropic\n precision. For the anisotropic case,\n :math: `\\log_{\\mu}(x)^T \\Lambda \\log_{\\mu}(x)` is used instead.\n\n A rejection algorithm is used to sample from this distribution [Hau18]_\n\n Parameters\n ----------\n mean : array-like, shape=[dim]\n Mean parameter of the distribution.\n Optional, default: (0,...,0,1) (the north pole).\n precision : float or array-like, shape=[dim, dim]\n Inverse of the covariance parameter of the normal distribution.\n If a float is passed, the covariance matrix is precision times\n identity.\n Optional, default: identity.\n n_samples : int\n Number of samples.\n Optional, default: 1.\n max_iter : int\n Maximum number of trials in the rejection algorithm. In case it\n is reached, the current number of samples < n_samples is returned.\n Optional, default: 100.\n\n Returns\n -------\n point : array-like, shape=[n_samples, dim + 1]\n Points sampled on the sphere.\n\n References\n ----------\n .. [Hau18] Hauberg, Soren. “Directional Statistics with the\n Spherical Normal Distribution.”\n In 2018 21st International Conference on Information\n Fusion (FUSION), 704–11, 2018.\n https://doi.org/10.23919/ICIF.2018.8455242.\n \"\"\"\n dim = self.dim\n n_accepted, n_iter = 0, 0\n result = []\n if precision is None:\n precision_ = gs.eye(self.dim)\n elif isinstance(precision, (float, int)):\n precision_ = precision * gs.eye(self.dim)\n else:\n precision_ = precision\n precision_2 = precision_ + (dim - 1) / gs.pi * gs.eye(dim)\n tangent_cov = gs.linalg.inv(precision_2)\n\n def threshold(random_v):\n \"\"\"Compute the acceptance threshold.\"\"\"\n squared_norm = gs.sum(random_v ** 2, axis=-1)\n sinc = utils.taylor_exp_even_func(\n squared_norm, utils.sinc_close_0) ** (dim - 1)\n threshold_val = sinc * gs.exp(squared_norm * (dim - 1) / 2 / gs.pi)\n return threshold_val, squared_norm ** .5\n\n while (n_accepted < n_samples) and (n_iter < max_iter):\n envelope = gs.random.multivariate_normal(\n gs.zeros(dim), tangent_cov, size=(n_samples - n_accepted,))\n thresh, norm = threshold(envelope)\n proposal = gs.random.rand(n_samples - n_accepted)\n criterion = gs.logical_and(norm <= gs.pi, proposal <= thresh)\n result.append(envelope[criterion])\n n_accepted += gs.sum(criterion)\n n_iter += 1\n if n_accepted < n_samples:\n logging.warning(\n 'Maximum number of iteration reached in rejection '\n 'sampling before n_samples were accepted.')\n tangent_sample_intr = gs.concatenate(result)\n tangent_sample = gs.concatenate(\n [tangent_sample_intr, gs.zeros(n_accepted)[:, None]], axis=1)\n\n metric = HypersphereMetric(dim)\n north_pole = gs.array([0.] * dim + [1.])\n if mean is not None:\n mean_from_north = metric.log(mean, north_pole)\n tangent_sample_at_pt = metric.parallel_transport(\n tangent_sample, mean_from_north, north_pole)\n else:\n tangent_sample_at_pt = tangent_sample\n mean = north_pole\n sample = metric.exp(tangent_sample_at_pt, mean)\n return sample[0] if (n_samples == 1) else sample\n\n\nclass HypersphereMetric(RiemannianMetric):\n \"\"\"Class for the Hypersphere Metric.\n\n Parameters\n ----------\n dim : int\n Dimension of the hypersphere.\n \"\"\"\n\n def __init__(self, dim):\n super(HypersphereMetric, self).__init__(\n dim=dim, signature=(dim, 0))\n self.embedding_metric = EuclideanMetric(dim + 1)\n self._space = _Hypersphere(dim=dim)\n\n def metric_matrix(self, base_point=None):\n \"\"\"Metric matrix at the tangent space at a base point.\n\n Parameters\n ----------\n base_point : array-like, shape=[..., dim + 1]\n Base point.\n Optional, default: None.\n\n Returns\n -------\n mat : array-like, shape=[..., dim + 1, dim + 1]\n Inner-product matrix.\n \"\"\"\n return gs.eye(self.dim + 1)\n\n def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):\n \"\"\"Compute the inner-product of two tangent vectors at a base point.\n\n Parameters\n ----------\n tangent_vec_a : array-like, shape=[..., dim + 1]\n First tangent vector at base point.\n tangent_vec_b : array-like, shape=[..., dim + 1]\n Second tangent vector at base point.\n base_point : array-like, shape=[..., dim + 1], optional\n Point on the hypersphere.\n\n Returns\n -------\n inner_prod : array-like, shape=[...,]\n Inner-product of the two tangent vectors.\n \"\"\"\n inner_prod = self.embedding_metric.inner_product(\n tangent_vec_a, tangent_vec_b, base_point)\n\n return inner_prod\n\n def squared_norm(self, vector, base_point=None):\n \"\"\"Compute the squared norm of a vector.\n\n Squared norm of a vector associated with the inner-product\n at the tangent space at a base point.\n\n Parameters\n ----------\n vector : array-like, shape=[..., dim + 1]\n Vector on the tangent space of the hypersphere at base point.\n base_point : array-like, shape=[..., dim + 1], optional\n Point on the hypersphere.\n\n Returns\n -------\n sq_norm : array-like, shape=[..., 1]\n Squared norm of the vector.\n \"\"\"\n sq_norm = self.embedding_metric.squared_norm(vector)\n return sq_norm\n\n def exp(self, tangent_vec, base_point):\n \"\"\"Compute the Riemannian exponential of a tangent vector.\n\n Parameters\n ----------\n tangent_vec : array-like, shape=[..., dim + 1]\n Tangent vector at a base point.\n base_point : array-like, shape=[..., dim + 1]\n Point on the hypersphere.\n\n Returns\n -------\n exp : array-like, shape=[..., dim + 1]\n Point on the hypersphere equal to the Riemannian exponential\n of tangent_vec at the base point.\n \"\"\"\n hypersphere = Hypersphere(dim=self.dim)\n proj_tangent_vec = hypersphere.to_tangent(tangent_vec, base_point)\n norm2 = self.embedding_metric.squared_norm(proj_tangent_vec)\n\n coef_1 = utils.taylor_exp_even_func(\n norm2, utils.cos_close_0, order=4)\n coef_2 = utils.taylor_exp_even_func(\n norm2, utils.sinc_close_0, order=4)\n exp = (gs.einsum('...,...j->...j', coef_1, base_point)\n + gs.einsum('...,...j->...j', coef_2, proj_tangent_vec))\n\n return exp\n\n def log(self, point, base_point, **kwargs):\n \"\"\"Compute the Riemannian logarithm of a point.\n\n Parameters\n ----------\n point : array-like, shape=[..., dim + 1]\n Point on the hypersphere.\n base_point : array-like, shape=[..., dim + 1]\n Point on the hypersphere.\n\n Returns\n -------\n log : array-like, shape=[..., dim + 1]\n Tangent vector at the base point equal to the Riemannian logarithm\n of point at the base point.\n \"\"\"\n inner_prod = self.embedding_metric.inner_product(base_point, point)\n cos_angle = gs.clip(inner_prod, -1., 1.)\n squared_angle = gs.arccos(cos_angle) ** 2\n coef_1_ = utils.taylor_exp_even_func(\n squared_angle, utils.inv_sinc_close_0, order=5)\n coef_2_ = utils.taylor_exp_even_func(\n squared_angle, utils.inv_tanc_close_0, order=5)\n log = (gs.einsum('...,...j->...j', coef_1_, point)\n - gs.einsum('...,...j->...j', coef_2_, base_point))\n\n return log\n\n def dist(self, point_a, point_b):\n \"\"\"Compute the geodesic distance between two points.\n\n Parameters\n ----------\n point_a : array-like, shape=[..., dim + 1]\n First point on the hypersphere.\n point_b : array-like, shape=[..., dim + 1]\n Second point on the hypersphere.\n\n Returns\n -------\n dist : array-like, shape=[..., 1]\n Geodesic distance between the two points.\n \"\"\"\n norm_a = self.embedding_metric.norm(point_a)\n norm_b = self.embedding_metric.norm(point_b)\n inner_prod = self.embedding_metric.inner_product(point_a, point_b)\n\n cos_angle = inner_prod / (norm_a * norm_b)\n cos_angle = gs.clip(cos_angle, -1, 1)\n\n dist = gs.arccos(cos_angle)\n\n return dist\n\n def squared_dist(self, point_a, point_b):\n \"\"\"Squared geodesic distance between two points.\n\n Parameters\n ----------\n point_a : array-like, shape=[..., dim]\n Point on the hypersphere.\n point_b : array-like, shape=[..., dim]\n Point on the hypersphere.\n\n Returns\n -------\n sq_dist : array-like, shape=[...,]\n \"\"\"\n return self.dist(point_a, point_b) ** 2\n\n @staticmethod\n def parallel_transport(tangent_vec_a, tangent_vec_b, base_point):\n r\"\"\"Compute the parallel transport of a tangent vector.\n\n Closed-form solution for the parallel transport of a tangent vector a\n along the geodesic defined by :math: `t \\mapsto exp_(base_point)(t*\n tangent_vec_b)`.\n\n Parameters\n ----------\n tangent_vec_a : array-like, shape=[..., dim + 1]\n Tangent vector at base point to be transported.\n tangent_vec_b : array-like, shape=[..., dim + 1]\n Tangent vector at base point, along which the parallel transport\n is computed.\n base_point : array-like, shape=[..., dim + 1]\n Point on the hypersphere.\n\n Returns\n -------\n transported_tangent_vec: array-like, shape=[..., dim + 1]\n Transported tangent vector at `exp_(base_point)(tangent_vec_b)`.\n \"\"\"\n theta = gs.linalg.norm(tangent_vec_b, axis=-1)\n eps = gs.where(theta == 0., 1., theta)\n normalized_b = gs.einsum('...,...i->...i', 1 / eps, tangent_vec_b)\n pb = gs.einsum('...i,...i->...', tangent_vec_a, normalized_b)\n p_orth = tangent_vec_a - gs.einsum('...,...i->...i', pb, normalized_b)\n transported = \\\n - gs.einsum('...,...i->...i', gs.sin(theta) * pb, base_point)\\\n + gs.einsum('...,...i->...i', gs.cos(theta) * pb, normalized_b)\\\n + p_orth\n return transported\n\n def christoffels(self, point, point_type='spherical'):\n \"\"\"Compute the Christoffel symbols at a point.\n\n Only implemented in dimension 2 and for spherical coordinates.\n\n Parameters\n ----------\n point : array-like, shape=[..., dim]\n Point on hypersphere where the Christoffel symbols are computed.\n\n point_type: str, {'spherical', 'intrinsic', 'extrinsic'}\n Coordinates in which to express the Christoffel symbols.\n Optional, default: 'spherical'.\n\n Returns\n -------\n christoffel : array-like, shape=[..., contravariant index, 1st\n covariant index, 2nd covariant index]\n Christoffel symbols at point.\n \"\"\"\n if self.dim != 2 or point_type != 'spherical':\n raise NotImplementedError(\n 'The Christoffel symbols are only implemented'\n ' for spherical coordinates in the 2-sphere')\n\n point = gs.to_ndarray(point, to_ndim=2)\n christoffel = []\n for sample in point:\n gamma_0 = gs.array(\n [[0, 0], [0, - gs.sin(sample[0]) * gs.cos(sample[0])]])\n gamma_1 = gs.array([[0, gs.cos(sample[0]) / gs.sin(sample[0])],\n [gs.cos(sample[0]) / gs.sin(sample[0]), 0]])\n christoffel.append(gs.stack([gamma_0, gamma_1]))\n\n christoffel = gs.stack(christoffel)\n if gs.ndim(christoffel) == 4 and gs.shape(christoffel)[0] == 1:\n christoffel = gs.squeeze(christoffel, axis=0)\n return christoffel\n\n def curvature(\n self, tangent_vec_a, tangent_vec_b, tangent_vec_c,\n base_point):\n r\"\"\"Compute the curvature.\n\n For three tangent vectors at a base point :math: `x,y,z`,\n the curvature is defined by\n :math: `R(x, y)z = \\nabla_{[x,y]}z\n - \\nabla_z\\nabla_y z + \\nabla_y\\nabla_x z`, where :math: `\\nabla`\n is the Levi-Civita connection. In the case of the hypersphere,\n we have the closed formula\n :math: `R(x,y)z = \\langle x, z \\rangle y - \\langle y,z \\rangle x`.\n\n Parameters\n ----------\n tangent_vec_a : array-like, shape=[..., dim]\n Tangent vector at `base_point`.\n tangent_vec_b : array-like, shape=[..., dim]\n Tangent vector at `base_point`.\n tangent_vec_c : array-like, shape=[..., dim]\n Tangent vector at `base_point`.\n base_point : array-like, shape=[..., dim]\n Point on the group. Optional, default is the identity.\n\n Returns\n -------\n curvature : array-like, shape=[..., dim]\n Tangent vector at `base_point`.\n \"\"\"\n inner_ac = self.inner_product(tangent_vec_a, tangent_vec_c)\n inner_bc = self.inner_product(tangent_vec_b, tangent_vec_c)\n first_term = gs.einsum('...,...i->...i', inner_bc, tangent_vec_a)\n second_term = gs.einsum('...,...i->...i', inner_ac, tangent_vec_b)\n return - first_term + second_term\n\n def _normalization_factor_odd_dim(self, variances):\n \"\"\"Compute the normalization factor - odd dimension.\"\"\"\n dim = self.dim\n half_dim = int((dim + 1) / 2)\n area = 2 * gs.pi ** half_dim / math.factorial(half_dim - 1)\n comb = gs.comb(dim - 1, half_dim - 1)\n\n erf_arg = gs.sqrt(variances / 2) * gs.pi\n first_term = area / (2 ** dim - 1) * comb * gs.sqrt(\n gs.pi / (2 * variances)) * gs.erf(erf_arg)\n\n def summand(k):\n exp_arg = - (dim - 1 - 2 * k) ** 2 / 2 / variances\n erf_arg_2 = (gs.pi * variances - (dim - 1 - 2 * k) * 1j) / gs.sqrt(\n 2 * variances)\n sign = (- 1.) ** k\n comb_2 = gs.comb(k, dim - 1)\n return sign * comb_2 * gs.exp(exp_arg) * gs.real(gs.erf(erf_arg_2))\n\n if half_dim > 2:\n sum_term = gs.sum(\n gs.stack([summand(k)] for k in range(half_dim - 2)))\n else:\n sum_term = summand(0)\n coef = area / 2 / erf_arg * gs.pi ** .5 * (- 1.) ** (half_dim - 1)\n\n return first_term + coef / 2 ** (dim - 2) * sum_term\n\n def _normalization_factor_even_dim(self, variances):\n \"\"\"Compute the normalization factor - even dimension.\"\"\"\n dim = self.dim\n half_dim = (dim + 1) / 2\n area = 2 * gs.pi ** half_dim / math.gamma(half_dim)\n\n def summand(k):\n exp_arg = - (dim - 1 - 2 * k) ** 2 / 2 / variances\n erf_arg_1 = (dim - 1 - 2 * k) * 1j / gs.sqrt(2 * variances)\n erf_arg_2 = (gs.pi * variances - (dim - 1 - 2 * k) * 1j) / gs.sqrt(\n 2 * variances)\n sign = (- 1.) ** k\n comb = gs.comb(dim - 1, k)\n erf_terms = gs.imag(gs.erf(erf_arg_2) + gs.erf(erf_arg_1))\n return sign * comb * gs.exp(exp_arg) * erf_terms\n\n half_dim_2 = int((dim - 2) / 2)\n if half_dim_2 > 0:\n sum_term = gs.sum(\n gs.stack([summand(k)] for k in range(half_dim_2)))\n else:\n sum_term = summand(0)\n coef = area * (- 1.) ** half_dim_2 / 2 ** (dim - 2) * gs.sqrt(\n gs.pi / 2 / variances)\n\n return coef * sum_term\n\n def normalization_factor(self, variances):\n \"\"\"Return normalization factor of the Gaussian distribution.\n\n Parameters\n ----------\n variances : array-like, shape=[n,]\n Variance of the distribution.\n\n Returns\n -------\n norm_func : array-like, shape=[n,]\n Normalisation factor for all given variances.\n \"\"\"\n if self.dim % 2 == 0:\n return self._normalization_factor_even_dim(variances)\n return self._normalization_factor_odd_dim(variances)\n\n def norm_factor_gradient(self, variances):\n \"\"\"Compute the gradient of the normalization factor.\n\n Parameters\n ----------\n variances : array-like, shape=[n,]\n Variance of the distribution.\n\n Returns\n -------\n norm_func : array-like, shape=[n,]\n Normalisation factor for all given variances.\n \"\"\"\n\n def func(var):\n return gs.sum(self.normalization_factor(var))\n\n _, grad = gs.autograd.value_and_grad(func)(variances)\n return _, grad\n\n\nclass Hypersphere(_Hypersphere):\n \"\"\"Class for the n-dimensional hypersphere.\n\n Class for the n-dimensional hypersphere embedded in the\n (n+1)-dimensional Euclidean space.\n\n By default, points are parameterized by their extrinsic\n (n+1)-coordinates.\n\n Parameters\n ----------\n dim : int\n Dimension of the hypersphere.\n \"\"\"\n\n def __init__(self, dim):\n super(Hypersphere, self).__init__(dim)\n self.metric = HypersphereMetric(dim)\n" ]
[ [ "scipy.stats.beta.rvs" ] ]
ankitshah009/Object_Detection_Tracking
[ "90b0d5a04f87155c2a84b0d51ecb009f757ebf85" ]
[ "obj_detect_tracking.py" ]
[ "# coding=utf-8\n# run script\n\nimport sys, os, argparse\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # so here won't have poll allocator info\n\n# remove all the annoying warnings from tf v1.10 to v1.13\nimport logging\nlogging.getLogger('tensorflow').disabled = True\n\nfrom tqdm import tqdm\nimport numpy as np\nimport tensorflow as tf\n\nimport cv2\n\nfrom models import get_model, resizeImage\nfrom nn import fill_full_mask\n\nimport math, time, json, random, operator\nimport pickle\nimport pycocotools.mask as cocomask\nfrom deep_sort import nn_matching\nfrom deep_sort.detection import Detection\nfrom deep_sort.tracker import Tracker\nfrom application_util import preprocessing\nfrom deep_sort.utils import create_obj_infos,linear_inter_bbox,filter_short_objs\nfrom utils import Dataset, Summary, get_op_tensor_name\n\n# for mask\nimport pycocotools.mask as cocomask\n\nfrom class_ids import targetClass2id_new_nopo\n\ntargetClass2id = targetClass2id_new_nopo\n\ntargetid2class = {targetClass2id[one]: one for one in targetClass2id}\nfrom class_ids import coco_obj_class_to_id, coco_obj_id_to_class, coco_obj_to_actev_obj\n\ndef get_args():\n global targetClass2id, targetid2class\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--video_dir\", default=None)\n parser.add_argument(\"--video_lst_file\", default=None, help=\"video_file_path = os.path.join(video_dir, $line)\")\n\n parser.add_argument(\"--out_dir\", default=None, help=\"out_dir/$basename/%%d.json, start from 0 index. This is the object box output. Leave this blank when use tracking to avoid saving the obj class output to save IO time.\")\n\n parser.add_argument(\"--frame_gap\", default=8, type=int)\n\n parser.add_argument(\"--threshold_conf\", default=0.0001, type=float)\n\n parser.add_argument(\"--is_load_from_pb\", action=\"store_true\", help=\"load from a frozen graph\")\n\n # ------ for box feature extraction\n parser.add_argument(\"--get_box_feat\", action=\"store_true\",\n help=\"this will generate (num_box, 256, 7, 7) tensor for each frame\")\n parser.add_argument(\"--box_feat_path\", default=None,\n help=\"output will be out_dir/$basename/%%d.npy, start from 0 index\")\n\n parser.add_argument(\"--version\", type=int, default=4, help=\"model version\")\n parser.add_argument(\"--is_coco_model\", action=\"store_true\",\n help=\"is coco model, will output coco classes instead\")\n\n # ---- gpu params\n parser.add_argument(\"--gpu\", default=1, type=int, help=\"number of gpu\")\n parser.add_argument(\"--gpuid_start\", default=0, type=int, help=\"start of gpu id\")\n parser.add_argument('--im_batch_size', type=int, default=1)\n parser.add_argument(\"--use_all_mem\", action=\"store_true\")\n\n # --- for internal visualization\n parser.add_argument(\"--visualize\", action=\"store_true\")\n parser.add_argument(\"--vis_path\", default=None)\n parser.add_argument(\"--vis_thres\", default=0.7, type=float)\n\n # ----------- model params\n parser.add_argument(\"--num_class\", type=int, default=15, help=\"num catagory + 1 background\")\n\n parser.add_argument(\"--model_path\", default=\"/app/object_detection_model\")\n\n parser.add_argument(\"--rpn_batch_size\", type=int, default=256, help=\"num roi per image for RPN training\")\n parser.add_argument(\"--frcnn_batch_size\", type=int, default=512, help=\"num roi per image for fastRCNN training\")\n\n parser.add_argument(\"--rpn_test_post_nms_topk\", type=int, default=1000, help=\"test post nms, input to fast rcnn\")\n\n parser.add_argument(\"--max_size\", type=int, default=1920, help=\"num roi per image for RPN and fastRCNN training\")\n parser.add_argument(\"--short_edge_size\", type=int, default=1080,\n help=\"num roi per image for RPN and fastRCNN training\")\n\n # ----------- tracking params\n parser.add_argument(\"--get_tracking\", action=\"store_true\",\n help=\"this will generate tracking results for each frame\")\n parser.add_argument(\"--tracking_dir\", default=\"/tmp\",\n help=\"output will be out_dir/$videoname.txt, start from 0 index\")\n parser.add_argument(\"--tracking_objs\", default=\"Person,Vehicle\",\n help=\"Objects to be tracked, default are Person and Vehicle\")\n parser.add_argument(\"--min_confidence\", default=0.85, type=float,\n help=\"Detection confidence threshold. Disregard all detections \"\n \"that have a confidence lower than this value.\")\n parser.add_argument(\"--min_detection_height\", default=0, type=int,\n help=\"Threshold on the detection bounding box height. Detections \"\n \"with height smaller than this value are disregarded\")\n parser.add_argument(\"--nms_max_overlap\", default=0.85, type=float,\n help=\"Non-maxima suppression threshold: Maximum detection overlap.\")\n parser.add_argument(\"--max_cosine_distance\", type=float, default=0.5,\n help=\"Gating threshold for cosine distance metric (object appearance).\")\n parser.add_argument(\"--nn_budget\", type=int, default=5,\n help=\"Maximum size of the appearance descriptors gallery. If None, no budget is enforced.\")\n\n parser.add_argument(\"--bupt_exp\", action=\"store_true\", help=\"activity box experiemnt\")\n # ---- tempory: for activity detection model\n parser.add_argument(\"--actasobj\", action=\"store_true\")\n parser.add_argument(\"--actmodel_path\", default=\"/app/activity_detection_model\")\n\n parser.add_argument(\"--resnet152\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--resnet50\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--resnet34\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--resnet18\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--use_se\", action=\"store_true\", help=\"use squeeze and excitation in backbone\")\n parser.add_argument(\"--use_frcnn_class_agnostic\", action=\"store_true\", help=\"use class agnostic fc head\")\n parser.add_argument(\"--use_resnext\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--use_att_frcnn_head\", action=\"store_true\",\n help=\"use attention to sum [K, 7, 7, C] feature into [K, C]\")\n\n # ---- COCO model\n parser.add_argument(\"--add_mask\", action=\"store_true\")\n\n # --------------- exp junk\n parser.add_argument(\"--use_dilations\", action=\"store_true\", help=\"use dilations=2 in res5\")\n parser.add_argument(\"--use_deformable\", action=\"store_true\", help=\"use deformable conv\")\n parser.add_argument(\"--add_act\", action=\"store_true\", help=\"add activitiy model\")\n parser.add_argument(\"--finer_resolution\", action=\"store_true\", help=\"fpn use finer resolution conv\")\n parser.add_argument(\"--fix_fpn_model\", action=\"store_true\",\n help=\"for finetuneing a fpn model, whether to fix the lateral and poshoc weights\")\n parser.add_argument(\"--is_cascade_rcnn\", action=\"store_true\", help=\"cascade rcnn on top of fpn\")\n parser.add_argument(\"--add_relation_nn\", action=\"store_true\", help=\"add relation network feature\")\n\n parser.add_argument(\"--test_frame_extraction\", action=\"store_true\")\n parser.add_argument(\"--use_my_naming\", action=\"store_true\")\n\n # for efficient use of COCO model classes\n parser.add_argument(\"--use_partial_classes\", action=\"store_true\")\n\n args = parser.parse_args()\n\n if args.use_partial_classes:\n assert args.is_coco_model\n args.partial_classes = [classname for classname in coco_obj_to_actev_obj]\n\n assert args.gpu == args.im_batch_size # one gpu one image\n assert args.gpu == 1, \"Currently only support single-gpu inference\"\n\n if args.is_load_from_pb:\n args.load_from = args.model_path\n\n args.controller = \"/cpu:0\" # parameter server\n\n targetid2class = targetid2class\n targetClass2id = targetClass2id\n\n if args.actasobj:\n from class_ids import targetAct2id\n targetClass2id = targetAct2id\n targetid2class = {targetAct2id[one]: one for one in targetAct2id}\n if args.bupt_exp:\n from class_ids import targetAct2id_bupt\n targetClass2id =targetAct2id_bupt\n targetid2class = {targetAct2id_bupt[one]: one for one in targetAct2id_bupt}\n\n assert len(targetClass2id) == args.num_class, (len(targetClass2id), args.num_class)\n\n\n assert args.version in [2, 3, 4, 5, 6], \"Currently we only have version 2-6 model\"\n\n if args.version == 2:\n pass\n elif args.version == 3:\n args.use_dilations = True\n elif args.version == 4:\n args.use_frcnn_class_agnostic = True\n args.use_dilations = True\n elif args.version == 5:\n args.use_frcnn_class_agnostic = True\n args.use_dilations = True\n elif args.version == 6:\n args.use_frcnn_class_agnostic = True\n args.use_se = True\n\n if args.is_coco_model:\n assert args.version == 2\n targetClass2id = coco_obj_class_to_id\n targetid2class = coco_obj_id_to_class\n args.num_class = 81\n if args.use_partial_classes:\n partial_classes = [\"BG\"] + args.partial_classes\n targetClass2id = {classname: i\n for i, classname in enumerate(partial_classes)}\n targetid2class = {targetClass2id[o]: o for o in targetClass2id}\n\n # ---------------more defautls\n args.is_pack_model = False\n args.diva_class3 = True\n args.diva_class = False\n args.diva_class2 = False\n args.use_small_object_head = False\n args.use_so_score_thres = False\n args.use_so_association = False\n args.use_gn = False\n args.so_person_topk = 10\n args.use_conv_frcnn_head = False\n args.use_cpu_nms = False\n args.use_bg_score = False\n args.freeze_rpn = True\n args.freeze_fastrcnn = True\n args.freeze = 2\n args.small_objects = [\"Prop\", \"Push_Pulled_Object\", \"Prop_plus_Push_Pulled_Object\", \"Bike\"]\n args.no_obj_detect = False\n #args.add_mask = False\n args.is_fpn = True\n # args.new_tensorpack_model = True\n args.mrcnn_head_dim = 256\n args.is_train = False\n\n args.rpn_min_size = 0\n args.rpn_proposal_nms_thres = 0.7\n args.anchor_strides = (4, 8, 16, 32, 64)\n\n args.fpn_resolution_requirement = float(args.anchor_strides[3]) # [3] is 32, since we build FPN with r2,3,4,5?\n\n args.max_size = np.ceil(args.max_size / args.fpn_resolution_requirement) * args.fpn_resolution_requirement\n\n args.fpn_num_channel = 256\n\n args.fpn_frcnn_fc_head_dim = 1024\n\n # ---- all the mask rcnn config\n\n args.resnet_num_block = [3, 4, 23, 3] # resnet 101\n args.use_basic_block = False # for resnet-34 and resnet-18\n if args.resnet152:\n args.resnet_num_block = [3, 8, 36, 3]\n if args.resnet50:\n args.resnet_num_block = [3, 4, 6, 3]\n if args.resnet34:\n args.resnet_num_block = [3, 4, 6, 3]\n args.use_basic_block = True\n if args.resnet18:\n args.resnet_num_block = [2, 2, 2, 2]\n args.use_basic_block = True\n\n args.anchor_stride = 16 # has to be 16 to match the image feature total stride\n args.anchor_sizes = (32, 64, 128, 256, 512)\n\n args.anchor_ratios = (0.5, 1, 2)\n\n args.num_anchors = len(args.anchor_sizes) * len(args.anchor_ratios)\n # iou thres to determine anchor label\n # args.positive_anchor_thres = 0.7\n # args.negative_anchor_thres = 0.3\n\n # when getting region proposal, avoid getting too large boxes\n args.bbox_decode_clip = np.log(args.max_size / 16.0)\n\n # fastrcnn\n args.fastrcnn_batch_per_im = args.frcnn_batch_size\n args.fastrcnn_bbox_reg_weights = np.array([10, 10, 5, 5], dtype='float32')\n\n args.fastrcnn_fg_thres = 0.5 # iou thres\n # args.fastrcnn_fg_ratio = 0.25 # 1:3 -> pos:neg\n\n # testing\n args.rpn_test_pre_nms_topk = 6000\n\n args.fastrcnn_nms_iou_thres = 0.5\n\n args.result_score_thres = args.threshold_conf\n args.result_per_im = 100\n\n return args\n\n\ndef initialize(config, sess):\n tf.global_variables_initializer().run()\n allvars = tf.global_variables()\n allvars = [var for var in allvars if \"global_step\" not in var.name]\n restore_vars = allvars\n opts = [\"Adam\",\"beta1_power\",\"beta2_power\",\"Adam_1\",\"Adadelta_1\",\"Adadelta\",\"Momentum\"]\n restore_vars = [var for var in restore_vars if var.name.split(\":\")[0].split(\"/\")[-1] not in opts]\n\n saver = tf.train.Saver(restore_vars, max_to_keep=5)\n\n load_from = config.model_path\n ckpt = tf.train.get_checkpoint_state(load_from)\n if ckpt and ckpt.model_checkpoint_path:\n loadpath = ckpt.model_checkpoint_path\n saver.restore(sess, loadpath)\n else:\n if os.path.exists(load_from):\n if load_from.endswith(\".ckpt\"):\n # load_from should be a single .ckpt file\n saver.restore(sess, load_from)\n elif load_from.endswith(\".npz\"):\n # load from dict\n weights = np.load(load_from)\n params = {get_op_tensor_name(n)[1]:v\n for n, v in dict(weights).iteritems()}\n param_names = set(params.iterkeys())\n\n variables = restore_vars\n\n variable_names = set([k.name for k in variables])\n\n intersect = variable_names & param_names\n\n restore_vars = [v for v in variables if v.name in intersect]\n\n with sess.as_default():\n for v in restore_vars:\n vname = v.name\n v.load(params[vname])\n\n not_used = [(one, weights[one].shape)\n for one in weights.keys()\n if get_op_tensor_name(one)[1] not in intersect]\n if not not_used:\n print(\"warning, %s/%s in npz not restored:%s\" %(len(weights.keys()) - len(intersect), len(weights.keys()), not_used))\n\n else:\n raise Exception(\"Not recognized model type:%s\" % load_from)\n else:\n raise Exception(\"Model not exists\")\n\n\n\n# check argument\ndef check_args(args):\n assert args.video_dir is not None\n assert args.video_lst_file is not None\n assert args.frame_gap >= 1\n if args.get_box_feat:\n assert args.box_feat_path is not None\n if not os.path.exists(args.box_feat_path):\n os.makedirs(args.box_feat_path)\n #print(\"cv2 version %s\" % (cv2.__version__)\n\n\nif __name__ == \"__main__\":\n args = get_args()\n\n check_args(args)\n\n videolst = [os.path.join(args.video_dir, one.strip()) for one in open(args.video_lst_file).readlines()]\n\n if args.out_dir is not None:\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n\n if args.visualize:\n from viz import draw_boxes\n\n vis_path = args.vis_path\n if not os.path.exists(vis_path):\n os.makedirs(vis_path)\n\n # 1. load the object detection model\n model = get_model(args, args.gpuid_start, controller=args.controller)\n\n tfconfig = tf.ConfigProto(allow_soft_placement=True)\n if not args.use_all_mem:\n tfconfig.gpu_options.allow_growth = True\n tfconfig.gpu_options.visible_device_list = \"%s\" % (\n \",\".join([\"%s\" % i for i in range(args.gpuid_start, args.gpuid_start + args.gpu)]))\n\n with tf.Session(config=tfconfig) as sess:\n\n if not args.is_load_from_pb:\n initialize(config=args, sess=sess)\n\n for videofile in tqdm(videolst, ascii=True):\n # 2. read the video file\n try:\n vcap = cv2.VideoCapture(videofile)\n if not vcap.isOpened():\n raise Exception(\"cannot open %s\" % videofile)\n except Exception as e:\n raise e\n\n # initialize tracking module\n if args.get_tracking:\n tracking_objs = args.tracking_objs.split(',')\n tracker_dict = {}\n tracking_results_dict = {}\n tmp_tracking_results_dict = {}\n for tracking_obj in tracking_objs:\n metric = metric = nn_matching.NearestNeighborDistanceMetric(\n \"cosine\", args.max_cosine_distance, args.nn_budget)\n tracker_dict[tracking_obj] = Tracker(metric)\n tracking_results_dict[tracking_obj] = []\n tmp_tracking_results_dict[tracking_obj] = {}\n\n # videoname = os.path.splitext(os.path.basename(videofile))[0]\n videoname = os.path.basename(videofile)\n if args.out_dir is not None: # not saving box json to save time\n video_out_path = os.path.join(args.out_dir, videoname)\n if not os.path.exists(video_out_path):\n os.makedirs(video_out_path)\n\n # for box feature\n if args.get_box_feat:\n feat_out_path = os.path.join(args.box_feat_path, videoname)\n if not os.path.exists(feat_out_path):\n os.makedirs(feat_out_path)\n\n # opencv 2\n if cv2.__version__.split(\".\")[0] == \"2\":\n frame_count = vcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)\n else:\n # opencv 3/4\n frame_count = vcap.get(cv2.CAP_PROP_FRAME_COUNT)\n\n # 3. read frame one by one\n cur_frame = 0\n vis_count = 0\n frame_stack = []\n while cur_frame < frame_count:\n suc, frame = vcap.read()\n if not suc:\n cur_frame += 1\n tqdm.write(\"warning, %s frame of %s failed\" % (cur_frame, videoname))\n continue\n\n # skip some frame if frame_gap >1\n if cur_frame % args.frame_gap != 0:\n cur_frame += 1\n continue\n\n # 4. run detection on the frame stack if there is enough\n\n im = frame.astype(\"float32\")\n\n if args.test_frame_extraction:\n frame_file = os.path.join(video_out_path, \"%d.jpg\" % cur_frame)\n cv2.imwrite(frame_file, im)\n cur_frame += 1\n continue\n\n resized_image = resizeImage(im, args.short_edge_size, args.max_size)\n\n scale = (resized_image.shape[0] * 1.0 / im.shape[0] + resized_image.shape[1] * 1.0 / im.shape[1]) / 2.0\n\n feed_dict = model.get_feed_dict_forward(resized_image)\n\n if args.get_box_feat:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.fpn_box_feat]\n\n final_boxes, final_labels, final_probs, box_feats = sess.run(sess_input, feed_dict=feed_dict)\n assert len(box_feats) == len(final_boxes)\n # save the box feature first\n\n featfile = os.path.join(feat_out_path, \"%d.npy\" % (cur_frame))\n np.save(featfile, box_feats)\n elif args.get_tracking:\n\n if args.add_mask:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.fpn_box_feat, model.final_masks]\n final_boxes, final_labels, final_probs, box_feats, final_masks = sess.run(sess_input, feed_dict=feed_dict)\n else:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.fpn_box_feat]\n final_boxes, final_labels, final_probs, box_feats = sess.run(sess_input, feed_dict=feed_dict)\n\n assert len(box_feats) == len(final_boxes)\n\n for tracking_obj in tracking_objs:\n target_tracking_obs = [tracking_obj]\n detections = create_obj_infos(cur_frame, final_boxes, final_probs, final_labels, box_feats,\n targetid2class, target_tracking_obs, args.min_confidence,\n args.min_detection_height, scale, is_coco_model=args.is_coco_model, coco_to_actev_mapping=coco_obj_to_actev_obj)\n # Run non-maxima suppression.\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = preprocessing.non_max_suppression(\n boxes, args.nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n\n # tracking\n tracker_dict[tracking_obj].predict()\n tracker_dict[tracking_obj].update(detections)\n\n # Store results\n for track in tracker_dict[tracking_obj].tracks:\n if not track.is_confirmed() or track.time_since_update > 1:\n if (not track.is_confirmed()) and track.time_since_update == 0:\n bbox = track.to_tlwh()\n if track.track_id not in tmp_tracking_results_dict[tracking_obj]:\n tmp_tracking_results_dict[tracking_obj][track.track_id] = [[cur_frame, track.track_id,\n bbox[0], bbox[1], bbox[2], bbox[3]]]\n else:\n tmp_tracking_results_dict[tracking_obj][track.track_id].append([cur_frame, track.track_id,\n bbox[0], bbox[1], bbox[2],\n bbox[3]])\n continue\n bbox = track.to_tlwh()\n if track.track_id in tmp_tracking_results_dict[tracking_obj]:\n pred_list = tmp_tracking_results_dict[tracking_obj][track.track_id]\n for pred_data in pred_list:\n tracking_results_dict[tracking_obj].append(pred_data)\n tmp_tracking_results_dict[tracking_obj].pop(track.track_id, None)\n tracking_results_dict[tracking_obj].append([\n cur_frame, track.track_id, bbox[0], bbox[1], bbox[2], bbox[3]])\n\n else:\n if args.add_mask:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs, model.final_masks]\n final_boxes, final_labels, final_probs, final_masks = sess.run(sess_input, feed_dict=feed_dict)\n else:\n sess_input = [model.final_boxes, model.final_labels, model.final_probs]\n final_boxes, final_labels, final_probs = sess.run(sess_input, feed_dict=feed_dict)\n\n if args.out_dir is None:\n cur_frame += 1\n continue\n\n # scale back the box to original image size\n final_boxes = final_boxes / scale\n\n if args.add_mask:\n final_masks = [fill_full_mask(box, mask, im.shape[:2]) for box, mask in zip(final_boxes, final_masks)]\n\n # save as json\n pred = []\n\n for j, (box, prob, label) in enumerate(zip(final_boxes, final_probs, final_labels)):\n box[2] -= box[0]\n box[3] -= box[1] # produce x,y,w,h output\n\n\n cat_id = label\n cat_name = targetid2class[cat_id]\n\n # encode mask\n rle = None\n if args.add_mask:\n final_mask = final_masks[j] # [14, 14]\n rle = cocomask.encode(np.array(final_mask[:, :, None], order=\"F\"))[0]\n rle['counts'] = rle['counts'].decode(\"ascii\")\n\n res = {\n \"category_id\": cat_id,\n \"cat_name\": cat_name, # [0-80]\n \"score\": float(round(prob, 7)),\n \"bbox\": list(map(lambda x: float(round(x, 2)), box)),\n \"segmentation\": rle,\n }\n\n pred.append(res)\n\n # predfile = os.path.join(args.out_dir, \"%s_F_%08d.json\"%(videoname, cur_frame))\n if args.use_my_naming:\n predfile = os.path.join(video_out_path,\n \"%s_F_%08d.json\" % (os.path.splitext(videoname)[0], cur_frame))\n else:\n predfile = os.path.join(video_out_path, \"%d.json\" % (cur_frame))\n\n with open(predfile, \"w\") as f:\n json.dump(pred, f)\n\n # for visualization\n if args.visualize:\n good_ids = [i for i in range(len(final_boxes)) if final_probs[i] >= args.vis_thres]\n final_boxes, final_labels, final_probs = final_boxes[good_ids], final_labels[good_ids], final_probs[\n good_ids]\n vis_boxes = np.asarray([[box[0], box[1], box[2] + box[0], box[3] + box[1]] for box in final_boxes])\n vis_labels = [\"%s_%.2f\" % (targetid2class[cat_id], prob) for cat_id, prob in\n zip(final_labels, final_probs)]\n newim = draw_boxes(im, vis_boxes, vis_labels, color=np.array([255, 0, 0]), font_scale=0.5,\n thickness=2)\n\n vis_file = os.path.join(vis_path, \"%s_F_%08d.jpg\" % (videoname, vis_count))\n cv2.imwrite(vis_file, newim)\n vis_count += 1\n\n cur_frame += 1\n\n if args.get_tracking:\n for tracking_obj in tracking_objs:\n output_dir = os.path.join(args.tracking_dir, videoname, tracking_obj)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n output_file = os.path.join(output_dir, \"%s.txt\" % (os.path.splitext(videoname))[0])\n\n tracking_results = sorted(tracking_results_dict[tracking_obj], key=lambda x: (x[0], x[1]))\n # print(len(tracking_results)\n tracking_data = np.asarray(tracking_results)\n # print(tracking_data.shape\n tracking_data = linear_inter_bbox(tracking_data, args.frame_gap)\n tracking_data = filter_short_objs(tracking_data)\n tracking_results = tracking_data.tolist()\n with open(output_file, 'wb') as fw:\n for row in tracking_results:\n line = '%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1' % (\n row[0], row[1], row[2], row[3], row[4], row[5])\n fw.write(line + '\\n')\n\n if args.test_frame_extraction:\n tqdm.write(\n \"video %s got %s frames, opencv said frame count is %s\" % (videoname, cur_frame, frame_count))\n\n" ]
[ [ "numpy.load", "numpy.save", "numpy.ceil", "tensorflow.global_variables_initializer", "tensorflow.train.get_checkpoint_state", "numpy.asarray", "tensorflow.global_variables", "numpy.log", "tensorflow.train.Saver", "tensorflow.Session", "numpy.array", "tensorflow.ConfigProto" ] ]