repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
caudaz/RoboND-Perception-Project
[ "f676c0613331d5ac53986e0dfeea002cf320a89a" ]
[ "sensor_stick/scripts/capture_features.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\nimport pickle\nimport rospy\n\nfrom sensor_stick.pcl_helper import *\nfrom sensor_stick.training_helper import spawn_model\nfrom sensor_stick.training_helper import delete_model\nfrom sensor_stick.training_helper import initial_setup\nfrom sensor_stick.training_helper import capture_sample\nfrom sensor_stick.features import compute_color_histograms\nfrom sensor_stick.features import compute_normal_histograms\nfrom sensor_stick.srv import GetNormals\nfrom geometry_msgs.msg import Pose\nfrom sensor_msgs.msg import PointCloud2\n\n\ndef get_normals(cloud):\n get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)\n return get_normals_prox(cloud).cluster\n\n\nif __name__ == '__main__':\n rospy.init_node('capture_node')\n\n models = [\\\n 'biscuits',\n 'soap',\n 'soap2',\n 'book',\n 'glue',\n 'sticky_notes',\n 'snacks',\n 'eraser']\n\n # Disable gravity and delete the ground plane\n initial_setup()\n labeled_features = []\n\n for model_name in models:\n spawn_model(model_name)\n\n for i in range(15):\n # make five attempts to get a valid a point cloud then give up\n sample_was_good = False\n try_count = 0\n while not sample_was_good and try_count < 5:\n sample_cloud = capture_sample()\n sample_cloud_arr = ros_to_pcl(sample_cloud).to_array()\n\n # Check for invalid clouds.\n if sample_cloud_arr.shape[0] == 0:\n print('Invalid cloud detected')\n try_count += 1\n else:\n sample_was_good = True\n\n # Extract histogram features\n chists = compute_color_histograms(sample_cloud, using_hsv=True)\n normals = get_normals(sample_cloud)\n nhists = compute_normal_histograms(normals)\n feature = np.concatenate((chists, nhists))\n labeled_features.append([feature, model_name])\n\n delete_model()\n\n\n pickle.dump(labeled_features, open('training_set.sav', 'wb'))\n\n" ]
[ [ "numpy.concatenate" ] ]
anatoliy-kuznetsov/ExtractionScore
[ "f98721eba382abac379351a03dd5ab96b0fd537b" ]
[ "draw.py" ]
[ "import rdkit.Chem as Chem\nimport rdkit.Chem.AllChem as AllChem\nimport rdkit.Chem.Draw as Draw\nfrom PIL import Image, ImageOps\nfrom collections import defaultdict\n# from rdkit.Chem.Draw.cairoCanvas import Canvas\nimport os\nimport numpy as np\nimport re\n'''\nMany of these functions are taken from RDKit.\n'''\n\n\ndef mols_from_smiles_list(all_smiles):\n '''Given a list of smiles strings, this function creates rdkit\n molecules'''\n mols = []\n for smiles in all_smiles:\n if not smiles:\n continue\n mols.append(Chem.MolFromSmiles(smiles))\n return mols\n\n\ndef defaultDrawOptions():\n '''This function returns an RDKit drawing options object with \n default drawing options.'''\n\n opts = Draw.DrawingOptions()\n opts.elemDict = defaultdict(lambda: (0,0,0)) # all atoms are black\n opts.noCarbonSymbols = True\n opts.bondLineWidth = 3\n opts.selectColor = (0, 0, 0)\n opts.wedgeBonds = True\n return opts\n\n\ndef StripAlphaFromImage(img):\n '''This function takes an RGBA PIL image and returns an RGB image'''\n\n if len(img.split()) == 3:\n return img\n return Image.merge('RGB', img.split()[:3])\n\n\ndef MolToImage(mol, max_size=(1000, 1000), kekulize=True, options=None,\n canvas=None, **kwargs):\n '''Wrapper for RDKit's MolToImage. If mol == None, an arrow is drawn'''\n\n if not options:\n options = defaultDrawOptions()\n if mol == '->':\n subImgSize = (160, 160)\n img, canvas = Draw._createCanvas(subImgSize)\n p0 = (10, subImgSize[1]//2)\n p1 = (subImgSize[0]-10, subImgSize[1]//2)\n p3 = (subImgSize[0]-20, subImgSize[1]//2-10)\n p4 = (subImgSize[0]-20, subImgSize[1]//2+10)\n canvas.addCanvasLine(p0, p1, lineWidth=2, color=(0, 0, 0))\n canvas.addCanvasLine(p3, p1, lineWidth=2, color=(0, 0, 0))\n canvas.addCanvasLine(p4, p1, lineWidth=2, color=(0, 0, 0))\n if hasattr(canvas, 'flush'):\n canvas.flush()\n else:\n canvas.save()\n return img\n elif mol is not None:\n return Draw.MolToImage(mol, size=max_size, kekulize=kekulize, options=options,\n canvas=canvas, **kwargs)\n else: # retro arrow or error\n subImgSize = (160, 160)\n (a, b) = subImgSize\n img, canvas = Draw._createCanvas(subImgSize)\n canvas.addCanvasLine((10, b//2-7), (a-17, b//2-7),\n lineWidth=2, color=(0, 0, 0))\n canvas.addCanvasLine((10, b//2+7), (a-17, b//2+7),\n lineWidth=2, color=(0, 0, 0))\n canvas.addCanvasLine((a-24, b//2-14), (a-10, b//2),\n lineWidth=2, color=(0, 0, 0))\n canvas.addCanvasLine((a-24, b//2+14), (a-10, b//2),\n lineWidth=2, color=(0, 0, 0))\n if hasattr(canvas, 'flush'):\n canvas.flush()\n else:\n canvas.save()\n return img\n\n\ndef TrimImgByWhite(img, padding=0):\n '''This function takes a PIL image, img, and crops it to the minimum rectangle \n based on its whiteness/transparency. 5 pixel padding used automatically.'''\n\n # Convert to array\n as_array = np.array(img) # N x N x (r,g,b,a)\n\n # Set previously-transparent pixels to white\n as_array[as_array[:, :, 2] == 0] = [255, 255, 255]\n\n # Content defined as non-white and non-transparent pixel\n has_content = np.sum(as_array, axis=2, dtype=np.uint32) != 255 * 4\n xs, ys = np.nonzero(has_content)\n\n # Crop down\n x_range = max([min(xs) - 5, 0]), min([max(xs) + 5, as_array.shape[0]])\n y_range = max([min(ys) - 5, 0]), min([max(ys) + 5, as_array.shape[1]])\n as_array_cropped = as_array[\n x_range[0]:x_range[1], y_range[0]:y_range[1], 0:3]\n\n img = Image.fromarray(as_array_cropped, mode='RGB')\n\n return ImageOps.expand(img, border=padding, fill=(255, 255, 255, 0))\n\n\ndef StitchPILsHorizontally(imgs):\n '''This function takes a list of PIL images and concatenates\n them onto a new image horizontally, with each one\n vertically centered.'''\n\n # Create blank image (def: transparent white)\n heights = [img.size[1] for img in imgs]\n height = max(heights)\n widths = [img.size[0] for img in imgs]\n width = sum(widths)\n res = Image.new('RGBA', (width, height), (255, 255, 255, 255))\n\n # Add in sub-images\n for i, img in enumerate(imgs):\n offset_x = sum(widths[:i]) # left to right\n offset_y = (height - heights[i]) // 2\n res.paste(img, (offset_x, offset_y))\n\n return res\n\n\ndef CheckAtomForGeneralization(atom):\n '''Given an RDKit atom, this function determines if that atom's SMART \n representation was likely a result of generalization. This assumes that\n the transform string was generated using explicit Hs with aliphatic \n carbons as C, aromatic carbons as c, and non-carbons as #N where N is the \n atomic number of the generalized species.'''\n\n smarts = atom.GetSmarts()\n\n # Check if this was a result of generalization\n # non-carbon atom, generlized\n if '#' in smarts:\n atomSymbol = atom.GetSymbol()\n atom.SetAtomicNum(0)\n atom.SetProp('dummyLabel', '[{}]'.format(atomSymbol))\n atom.UpdatePropertyCache()\n # aliphatic carbon, generalized (all non-generalized use explicit Hs)\n elif '[C:' in smarts and 'H' not in smarts:\n atom.SetAtomicNum(0)\n atom.SetProp('dummyLabel', 'C[al]')\n atom.UpdatePropertyCache()\n elif '[c:' in smarts and 'H' not in smarts:\n atom.SetAtomicNum(0)\n atom.SetProp('dummyLabel', 'C[ar]')\n atom.UpdatePropertyCache()\n\n # Clear atom map number of 0 -> this is a dummy assignment!\n if ':0]' in smarts:\n atom.ClearProp('molAtomMapNumber')\n\n\ndef ReactionToImage(rxn, dummyAtoms=False, kekulize=True, options=None, **kwargs):\n '''Modification of RDKit's ReactionToImage to allow for each molecule \n to have a different drawn size. rxn is an RDKit reaction object\n\n warning: this function adds hydrogens as it sees fit'''\n # Extract mols from reaction\n mols = []\n for i in range(rxn.GetNumReactantTemplates()):\n mol = rxn.GetReactantTemplate(i)\n mol.UpdatePropertyCache(False)\n mols.append(mol)\n if dummyAtoms:\n [CheckAtomForGeneralization(atom) for atom in mol.GetAtoms()]\n\n mols.append(None) # placeholder for arrow\n for j in range(rxn.GetNumProductTemplates()):\n mol = rxn.GetProductTemplate(j)\n mol.UpdatePropertyCache(False)\n mols.append(mol)\n if dummyAtoms:\n [CheckAtomForGeneralization(atom) for atom in mol.GetAtoms()]\n\n # Generate images for all molecules/arrow\n imgs = [TrimImgByWhite(MolToImage(\n mol, kekulize=kekulize, options=options), padding=15) for mol in mols]\n\n # Combine\n return StitchPILsHorizontally(imgs)\n\n\ndef ReactionStringToImage(rxn_string, strip=True, update=True, options=None,\n\t\tretro=False, **kwargs):\n '''This function takes a SMILES rxn_string as input, not an \n RDKit reaction object, and draws it.'''\n\n reactants, agents, products = [mols_from_smiles_list(x) for x in\n [mols.split('.') for mols in rxn_string.split('>')]]\n if None in reactants + products:\n raise ValueError(\n 'Could not parse entirety of reaction: {}'.format(rxn_string))\n\n # Stich together mols (ignore agents)\n mols = reactants + ['->'] + products\n if update:\n [mol.UpdatePropertyCache(False) for mol in mols if mol is not None and type(mol) != str]\n if strip:\n for mol in mols:\n if mol is not None and type(mol) != str:\n [a.ClearProp('molAtomMapNumber') for a in mol.GetAtoms()]\n\n # Generate images\n # imgs = [TrimImgByWhite(MolToImage(mol, kekulize=True, options=options), padding=15) for mol in mols]\n imgs = [MolToImage(mol, kekulize=True, options=options) for mol in mols]\n\n # Combine\n return StitchPILsHorizontally(imgs)\n\n\ndef TransformStringToImage(transform, retro=True, **kwargs):\n '''Wrapper function meant to take a SMARTS transform and return a PIL image\n of that transform.\n\n TODO: Need to improve generalization visually! Right now it still shows'''\n\n options = defaultDrawOptions()\n options.dotsPerAngstrom = 50\n\n # To generalize un-mapped atoms in transform, need to identify square brackets\n # without colon in the middle (e.g., [C]) and replace with dummy label [C:0] so\n # generalization display works\n old_tags = re.findall('\\[[^:]+\\]', transform)\n for old_tag in old_tags:\n new_tag = old_tag.replace(']', ':0]')\n transform = transform.replace(old_tag, new_tag)\n rxn = AllChem.ReactionFromSmarts(transform)\n return ReactionToImage(rxn, dummyAtoms=True, options=options, retro=retro, **kwargs)\n\n\ndef MolsSmilesToImage(smiles, options=None, **kwargs):\n '''This function takes a SMILES string of one or more molecules\n and generates a combined image for that molecule set.'''\n\n # Generate mols\n mols = mols_from_smiles_list(smiles.split('.'))\n # Generate images\n imgs = [TrimImgByWhite(MolToImage(\n mol, kekulize=True, options=options), padding=15) for mol in mols]\n # Combine\n return StitchPILsHorizontally(imgs)\n\n\nif __name__ == '__main__':\n\n # Simple test cases\n rxn_string = '[Na+].[CH3:2][C:3](=[O:5])[O-].[CH3:6][c:7]1[cH:12][cH:11][cH:10][cH:9][cH:8]1>>CN3[C@H]1CC[C@@H]3C[C@@H](C1)OC(=O)C(CO)c2ccccc2.[c:7]1([CH3:6])[c:12]([C:3]([c:2]2[cH:11][cH:12][cH:7][cH:8][c:9]2[CH3:10])=[O:5])[cH:11][cH:10][cH:9][cH:8]1'\n rxn = AllChem.ReactionFromSmarts(rxn_string)\n rxn_image = ReactionToImage(rxn)\n rxn_image.save('test_rxn.png')\n rxn_image_string = ReactionStringToImage(rxn_string, strip=True)\n rxn_image_string.save('test_rxn_string.png')\n tform = '([O;H0:3]=[C;H0:4](-[C:5])-[NH:2]-[C:1])>>([C:1]-[NH2:2]).([OH:3]-[C;H0:4](=O)-[C:5])'\n img = TransformStringToImage(tform)\n img.save('transform.png')\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.nonzero" ] ]
BenjSta/spaudiopy
[ "c54dd32386812084926bb9e284650efc0f38d7d9" ]
[ "examples/SH.py" ]
[ "# ---\n# jupyter:\n# jupytext:\n# comment_magics: 'false'\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.1'\n# jupytext_version: 0.8.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# language_info:\n# codemirror_mode:\n# name: ipython\n# version: 3\n# file_extension: .py\n# mimetype: text/x-python\n# name: python\n# nbconvert_exporter: python\n# pygments_lexer: ipython3\n# version: 3.6.8\n# ---\n\n# %%\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom spaudiopy import utils, IO, sph, plots, grids\n\n\n# %% Spherical Harmonics Order\nN = 1\n\n# %%\n# Grids\nt = grids.load_t_design(2)\nt_az, t_colat, t_r = utils.cart2sph(t[:, 0], t[:, 1], t[:, 2])\nazi = t_az\ncolat = t_colat\n\n# %%\n# First, check condition number to which SH order the SHT is stable\n# Tetraeder is not suited for SHT N>1:\nsph.check_cond_sht(3, t_az, t_colat, 'real')\n\n# %% Real and Complex SHs\nY_nm_c = sph.sh_matrix(N, azi, colat, 'complex')\nY_nm_r = sph.sh_matrix(N, azi, colat, 'real')\n\n# %%\n# Look at some SHTs\nsig = np.array([1, 1, 1, 1])\nsig_t = np.c_[np.eye(4), np.eye(4)] # second axis s(t)\nsig_B = sph.soundfield_to_b(sig)\nF_nm = sph.sht(sig, N, azi, colat, SH_type='real')\nF_nm_t = sph.sht(sig_t, N, azi, colat, SH_type='real')\n\n# %%\nF_nm_c = sph.sht(sig, N, azi, colat, SH_type='complex')\nF_nm_c_t = sph.sht(sig_t, N, azi, colat, SH_type='complex')\n\n# %%\nF_nm_lst = sph.sht_lstsq(sig, N, azi, colat, SH_type='complex')\nF_nm_lst_t = sph.sht_lstsq(sig_t, N, azi, colat, SH_type='real')\n\n# %% inverse SHT\nf = sph.inverse_sht(F_nm, azi, colat, SH_type='real')\nf_c_t = sph.inverse_sht(F_nm_c_t, azi, colat, SH_type='complex')\nf_lst_t = sph.inverse_sht(F_nm_lst_t, azi, colat, SH_type='real')\n\n# %% Checks\nprint(\"Single dimension signal:\")\nutils.test_diff(sig, f)\nprint(\"Complex valued SHT of time signal:\")\nutils.test_diff(sig_t, f_c_t)\nprint(\"Real valued least-squares SHT of time signal:\")\nutils.test_diff(sig_t, f_lst_t)\n\n# %%\n# Check B format conversion\nB_sig = np.array([1, 1, 0, 0]) # W, X, Y, Z\nF_B = sph.b_to_sh(B_sig)\nB_sig_re = sph.sh_to_b(F_B)\nprint(\"B format to SH conversion:\")\nutils.test_diff(B_sig, B_sig_re)\n\n# %%\n# Some plots\nplots.sh_coeffs(F_nm, title=\"Ambeo: all channels max\")\nplots.sh_coeffs(F_B, title=\"b_to_sh: W+X\")\n\n# %%\nplots.sh_coeffs_subplot([np.array([1, 0, 0, 0]),\n np.array([0, 1, 0, 0]),\n np.array([0, 0, 1, 0]),\n np.array([0, 0, 0, 1])],\n titles=[\"0\", \"1, -1\", \"1, 0\", \"1, 1\"])\n\n# %%\nplots.sh_coeffs(np.sqrt(2) * np.array([1, 0, 0, -1]), 'complex',\n title=\"sqrt(2) * [1, 0, 0, -1] complex coeffs\")\n\n# %%\n# Look at simple B format generator\nsig2 = np.ones(8)\nB = sph.src_to_B(sig2, np.pi / 4, np.pi / 4)\nB_nm = sph.b_to_sh(B)\nplots.sh_coeffs(B_nm[:, 0], title=\"Sig 2 B\")\n\n# %%\nplt.show()\n" ]
[ [ "numpy.sqrt", "numpy.eye", "numpy.ones", "numpy.array", "matplotlib.pyplot.show" ] ]
goyalrinki/ivy
[ "0aa999073709e1d9f4471b9adc7f528a2488041b" ]
[ "ivy/functional/backends/torch/core/linear_algebra.py" ]
[ "\"\"\"\nCollection of PyTorch linear algebra functions, wrapped to fit Ivy syntax and signature.\n\"\"\"\n\n# global\nimport torch as _torch\nfrom typing import List\nimport ivy as _ivy\nfrom typing import Union, Tuple\n\n\n\n\ndef matrix_norm(x, p=2, axes=None, keepdims=False):\n axes = [-2, -1] if axes is None else axes\n if isinstance(axes, int):\n raise Exception('if specified, axes must be a length-2 sequence of ints,'\n 'but found {} of type {}'.format(axes, type(axes)))\n ret = _torch.linalg.matrix_norm(x, ord=p, dim=axes, keepdim=keepdims)\n if ret.shape == ():\n return _torch.unsqueeze(ret, 0)\n return ret\n\n\ndef inv(x):\n return _torch.inverse(x)\n\n\ndef pinv(x):\n return _torch.pinverse(x)\n\n\ndef cholesky(x):\n return _torch.linalg.cholesky(x)\n\n\ndef vector_to_skew_symmetric_matrix(vector):\n batch_shape = list(vector.shape[:-1])\n # BS x 3 x 1\n vector_expanded = _torch.unsqueeze(vector, -1)\n # BS x 1 x 1\n a1s = vector_expanded[..., 0:1, :]\n a2s = vector_expanded[..., 1:2, :]\n a3s = vector_expanded[..., 2:3, :]\n # BS x 1 x 1\n zs = _torch.zeros(batch_shape + [1, 1], device=vector.device)\n # BS x 1 x 3\n row1 = _torch.cat((zs, -a3s, a2s), -1)\n row2 = _torch.cat((a3s, zs, -a1s), -1)\n row3 = _torch.cat((-a2s, a1s, zs), -1)\n # BS x 3 x 3\n return _torch.cat((row1, row2, row3), -2)\n\ndef qr(x, mode):\n return _torch.linalg.qr(x, mode=str(mode)) # str(mode) is required for JIT type inference\n" ]
[ [ "torch.linalg.cholesky", "torch.zeros", "torch.cat", "torch.pinverse", "torch.unsqueeze", "torch.inverse", "torch.linalg.matrix_norm" ] ]
berndverst/Hackathon-CaptureImageForFaceDetection
[ "723ead3cd86ae527c27b645ccb36023645e51d7f" ]
[ "app.py" ]
[ "import os, io, base64\nfrom flask import Flask, render_template, request, jsonify\n\nimport cv2\nimport tensorflow as tf\nfrom PIL import Image\nimport numpy as np\n\n# Load the TensorFlow model exported from Azure Cognitive Services Custom Vision\n\nCWD = os.getcwd()\n\nMODELFILE = CWD + '/model/model.pb'\nLABELFILE = CWD + '/model/labels.txt'\n\nOUTPUT_LAYER = 'loss:0'\nINPUT_NODE = 'Placeholder:0'\n\nGRAPH_DEF = None\n\nwith tf.compat.v1.gfile.FastGFile(MODELFILE, 'rb') as f:\n GRAPH_DEF = tf.compat.v1.GraphDef()\n GRAPH_DEF.ParseFromString(f.read())\n tf.compat.v1.import_graph_def(GRAPH_DEF, name='')\n\nLABELS = [line.rstrip() for line in tf.compat.v1.gfile.GFile(LABELFILE)]\n\n\n# Initialize the web application\n\napp = Flask(__name__)\n\n# The root route, returns the rendered 'home.html' template page\[email protected]('/')\ndef home():\n page_data = {}\n return render_template('home.html', page_data = page_data)\n\n# Our custom API endpoint where we will receive images\[email protected]('/process_image', methods=['POST'])\ndef check_results():\n\n # Get the JSON passed to the request and extract the image\n body = request.get_json()\n image_bytes = base64.b64decode(body['image_base64'].split(',')[1])\n image = io.BytesIO(image_bytes)\n\n tf.compat.v1.reset_default_graph()\n tf.compat.v1.import_graph_def(GRAPH_DEF, name='')\n\n # Convert the image to the appropriate format and size for our tensorflow model\n augmented_image = prepare_image(image)\n\n with tf.compat.v1.Session() as sess:\n prob_tensor = sess.graph.get_tensor_by_name(OUTPUT_LAYER)\n predictions, = sess.run(prob_tensor, {INPUT_NODE: augmented_image})\n\n # Get the highest probability label\n highest_probability_index = np.argmax(predictions)\n probability = predictions[highest_probability_index]\n \n return jsonify({'predicted': str(LABELS[highest_probability_index]),\n 'probability': str(probability),\n 'opponent': LABELS[np.random.randint(5)]})\n\n\ndef convert_to_opencv(image):\n # RGB -> BGR conversion is performed as well.\n image = image.convert('RGB')\n r,g,b = np.array(image).T\n opencv_image = np.array([b,g,r]).transpose()\n return opencv_image\n\ndef crop_center(img,cropx,cropy):\n h, w = img.shape[:2]\n startx = w//2-(cropx//2)\n starty = h//2-(cropy//2)\n return img[starty:starty+cropy, startx:startx+cropx]\n\ndef resize_to_256_square(image):\n h, w = image.shape[:2]\n return cv2.resize(image, (256, 256), interpolation = cv2.INTER_LINEAR)\n\ndef prepare_image(image):\n image = Image.open(image)\n image = convert_to_opencv(image)\n\n # We next get the largest center square\n h, w = image.shape[:2]\n min_dim = min(w,h)\n max_square_image = crop_center(image, min_dim, min_dim)\n\n # Resize that square down to 256x256\n augmented_image = resize_to_256_square(max_square_image)\n\n # Get the input size of the model\n with tf.compat.v1.Session() as sess:\n input_tensor_shape = sess.graph.get_tensor_by_name(INPUT_NODE).shape.as_list()\n network_input_size = input_tensor_shape[1]\n\n # Crop the center for the specified network_input_Size\n augmented_image = crop_center(augmented_image, network_input_size, network_input_size)\n\n # Need to introduce an additional tensor dimension\n augmented_image = np.expand_dims(augmented_image, axis=0)\n\n return augmented_image" ]
[ [ "numpy.expand_dims", "tensorflow.compat.v1.import_graph_def", "tensorflow.compat.v1.gfile.FastGFile", "tensorflow.compat.v1.Session", "numpy.argmax", "tensorflow.compat.v1.gfile.GFile", "tensorflow.compat.v1.GraphDef", "numpy.array", "tensorflow.compat.v1.reset_default_graph", "numpy.random.randint" ] ]
XWan2021/yaglm
[ "a0454427a04dc6be37a15d7031e0d77dbb21538e" ]
[ "yaglm/infer/Inferencer.py" ]
[ "from numbers import Number\nfrom pandas.core.dtypes.inference import is_array_like\nfrom copy import deepcopy\nimport numpy as np\n\nfrom yaglm.config.base import Config\nfrom yaglm.autoassign import autoassign\nfrom yaglm.config.penalty import Lasso\nfrom yaglm.infer.dof import est_dof_support\nfrom yaglm.utils import is_fitted\nfrom yaglm.config.loss import get_loss_config\n\n\nclass Inferencer(Config):\n \"\"\"\n An object that runs statistical inference routines for penalized GLMs. This includes estimating:\n\n - GLM exponential family scale parameter (if there is one)\n - degrees of freedom of estimated coefficient.\n\n Parameters\n ----------\n dof: str\n Method to estimator the number of degrees of freedom.\n\n scale: None, float, array-like, str\n Method to estimate the GLM scale parameter (e.g. linear regression noise standard deviation) if one is required. The scale parameter(s) can be manually set by providing a float or array-like\n\n Attributes\n ----------\n dof_: int, None\n The estimated degrees of freedom. If no dof estimation procedure is available this is None.\n\n\n scale_: None, float, array-like\n The estimated scale parameter. Only for GLMs that have scale parameters. If no scale estimation procedure is available this is None.\n\n scale_est_: ScaleEstimator\n The estimator object used to estimate the GLM scale parameter, if one was used.\n \"\"\"\n\n @autoassign\n def __init__(self, dof='support', scale=None): pass\n\n def pre_fit(self, estimator, X, y, sample_weight=None):\n \"\"\"\n Runs inferences procedures before fitting the estimator e.g. estimating the scale parameter.\n\n Parameters\n ----------\n estimator: Estimator\n The fit estimator object we want to run inference on.\n\n X: array-like, shape (n_samples, n_features)\n The training covariate data.\n\n y: array-like, shape (n_samples, ) or (n_samples, n_responses)\n The training response data.\n\n sample_weight: array-like, shape (n_samples, )\n The training sample weights.\n\n Output\n ------\n self\n \"\"\"\n self.X_shape_ = X.shape\n\n ################################\n # estimate GLM scale parameter #\n ################################\n\n loss = get_loss_config(estimator.loss)\n if loss.has_scale:\n # TODO: we need to pass the loss to the scale estiamtor\n # e.g. even just to validate we have the right scale estimaor\n\n # if a float or array was provided use this value\n if isinstance(self.scale, Number):\n self.scale_ = deepcopy(self.scale)\n elif is_array_like(self.scale):\n self.scale_ = np.array(self.scale).reshape(-1)\n\n else:\n # TODO: do we want a copy here?\n scale_est = self.scale\n\n # fit the scale estimator if we have not already fit it\n if not is_fitted(scale_est):\n scale_est.fit(X=X, y=y,\n sample_weight=sample_weight)\n\n self.scale_ = scale_est.scale_\n\n def after_fit(self, estimator, X, y, sample_weight=None):\n \"\"\"\n Runs inferences procedures after fitting the estimator e.g. estimating the degrees of freedom.\n\n Parameters\n ----------\n estimator: Estimator\n The fit estimator object we want to run inference on.\n\n X: array-like, shape (n_samples, n_features)\n The training covariate data.\n\n y: array-like, shape (n_samples, ) or (n_samples, n_responses)\n The training response data.\n\n sample_weight: array-like, shape (n_samples, )\n The training sample weights.\n\n Output\n ------\n self\n \"\"\"\n # TODO-THINK-THROUGH: Do we want all inference procedures to use the raw data? Should they sometimes use the processed data?\n\n zero_tol = 1e-6\n\n #########################################\n # estimate number of degrees of freedom #\n #########################################\n if self.dof == 'support':\n\n if isinstance(estimator.fit_penalty_, Lasso):\n\n self.dof_ = est_dof_support(coef=estimator.coef_,\n intercept=estimator.intercept_,\n transform=None,\n zero_tol=zero_tol)\n\n else:\n # we don't currently support estimating the DoF for this model\n self.dof_ = None\n\n elif isinstance(self.dof, Number):\n # user provided DOF value\n self.dof_ = deepcopy(self.dof)\n\n else:\n raise NotImplementedError(\"Do don't currently support dof={}\".\n format(self.dof))\n return self\n" ]
[ [ "numpy.array", "pandas.core.dtypes.inference.is_array_like" ] ]
prbronze/Kaggle
[ "aeba51432df18aae5ba521343f584b8b98db3955" ]
[ "toxic_comment_challenge/src/visualize/plot_learning_curve.py" ]
[ "from sklearn.model_selection import learning_curve\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef plot_curve(estimator, X, yt):\n \"\"\"Plot learning curves for each label of a multilabel problem.\n \n Parameters:\n -----------\n estimator : sklearn-type classifier\n Estimator used to as classifier.\n \n X : DataFrame/Matrix\n Features used to fit the estimator.\n \n yt : DataFrame\n DataFrame of multiple labels.\n \"\"\"\n \n for col in yt.columns:\n y = yt[col]\n _, axes = plt.subplots(1, 1, figsize=(5, 5))\n \n axes.set_title(f'{col}')\n\n axes.set_xlabel(\"Training examples\")\n axes.set_ylabel(\"Score\")\n\n train_sizes, train_scores, test_scores = \\\n learning_curve(estimator,X,y,\n train_sizes=np.linspace(.1, 1.0, 5),\n cv=5,\n n_jobs=4,\n scoring='roc_auc') # uses stratified as default\n\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n axes.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n axes.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1,\n color=\"g\")\n axes.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n axes.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n \n axes.legend(loc=\"best\")\n axes.grid(True)\n plt.show()" ]
[ [ "numpy.linspace", "matplotlib.pyplot.subplots", "numpy.std", "numpy.mean", "matplotlib.pyplot.show" ] ]
Maercoli/sqlalchemy-challenge
[ "a07878815c02d4ccb1a55aa5fcb2dd982bebb324" ]
[ "app.py" ]
[ "#Import dependecies\nfrom flask import Flask, jsonify\nimport datetime as dt\n\nimport sqlalchemy\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, asc\nfrom sqlalchemy import inspect\n\n#SQLAlchemy\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\nBase = automap_base()\nBase.prepare(engine, reflect=True)\nsession = Session(engine)\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Flask Setup\napp = Flask(__name__)\n\n# Flask Routes\n# Homepage: List all routes that are available\[email protected](\"/\")\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Welcome to the Climate API!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/<start><br/>\"\n f\"Note: API (/api/v1.0/) request takes one parameter: Start date (e.g. 2014-05-02)<br/>\"\n f\"/api/v1.0/<start>/<end><br/>\"\n f\"Note: API (/api/v1.0//) request takes two parameter: Start date/ End date (e.g. 2014-05-02/2015-10-10)\"\n )\n\n# Return the JSON representation of your dictionary\[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n maxDate = dt.date(2017, 8 ,23)\n year_ago = maxDate - dt.timedelta(days=365)\n\n past_temp = (session.query(Measurement.date, Measurement.prcp)\n .filter(Measurement.date <= maxDate)\n .filter(Measurement.date >= year_ago)\n .order_by(Measurement.date).all())\n \n precip = {date: prcp for date, prcp in past_temp}\n \n return jsonify(precip)\n\n\n# Return a JSON-list of stations from the dataset.\[email protected]('/api/v1.0/stations')\ndef stations():\n\n stations_all = session.query(Station.station).all()\n \n return jsonify(stations_all)\n\n# Return a JSON-list of Temperature Observations from the dataset.\[email protected]('/api/v1.0/tobs') \ndef tobs(): \n maxDate = dt.date(2017, 8 ,23)\n year_ago = maxDate - dt.timedelta(days=365)\n\n lastyear = (session.query(Measurement.tobs)\n .filter(Measurement.station == 'USC00519281')\n .filter(Measurement.date <= maxDate)\n .filter(Measurement.date >= year_ago)\n .order_by(Measurement.tobs).all())\n \n return jsonify(lastyear)\n\n# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given\[email protected]('/api/v1.0/<start>') \ndef start(start=None):\n\n #start = Measurement.date <= '2010-01-01'\n #end = Measurement.date >= '2017-08-23'\n\n tobs_only = (session.query(Measurement.tobs).filter(Measurement.date.between(start, '2017-08-23')).all())\n \n tobs_df = pd.DataFrame(tobs_only)\n\n tavg = tobs_df[\"tobs\"].mean()\n tmax = tobs_df[\"tobs\"].max()\n tmin = tobs_df[\"tobs\"].min()\n \n return jsonify(tavg, tmax, tmin)\n\n# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start-end range.\n\[email protected]('/api/v1.0/<start>/<end>') \ndef startend(start=None, end=None):\n\n #start = Measurement.date <= '2010-01-01'\n #end = Measurement.date >= '2017-08-23'\n\n tobs_only = (session.query(Measurement.tobs).filter(Measurement.date.between(start, end)).all())\n \n tobs_df = pd.DataFrame(tobs_only)\n\n tavg = tobs_df[\"tobs\"].mean()\n tmax = tobs_df[\"tobs\"].max()\n tmin = tobs_df[\"tobs\"].min()\n \n return jsonify(tavg, tmax, tmin)\n\nif __name__ == '__main__':\n app.run(debug=True)\n" ]
[ [ "pandas.DataFrame" ] ]
EloDoyard/sentiment-evolution-in-characters-network
[ "94a2b0e9d9c95989c9d07b8f9ad9a0019677a70e" ]
[ "src/evaluation_metrics.py" ]
[ "import sklearn\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pandas as pd\n\n\nground_truth_data_df = pd.DataFrame([ \n('agde', 'Monsieur Agde',13,'M',36930), \n('altamira','Comte Altamira',26,'M',97835),\n('amanda', 'Mme Amanda Binet',17,'F',60381),\n('appert', 'M Appert',8,'M',2363),\n('castanède','Abbé Castanède',19,'M',64861),\n('caylus','Comte Caylus',24,'M',94378),\n('chélan','Abbé Chélan',6,'M',1929),\n('croisenois','Monsieur Croisenois',23,'M',94374),\n('danton','Monsieur Danton',1,'M', 15),\n('derville','Madame Derville',12,'F',13130),\n('falcoz','Monsieur Falcoz',14,'M',45151),\n('fervaques','Madame Fervaques',25,'F',96924),\n('fouqué','Monsieur Fouqué',10,'M',7451),\n('frilair','Monsieur Frilair',15,'M',53833),\n('geronimo','Monsieur Geronimo',16,'M',55797),\n('korasoff','Monsieur Korasoff',27,'M',102772),\n('julien','Monsieur Julien Sorel',3,'M',4751),\n('louise','Madame Louise Rênal',7,'F',45391),\n('maslon','Monsieur Maslon',5,'M',1900),\n('mathilde','Mademoiselle Mathilde Sorel',21,'F',90709),\n('norbert','Monsieur Norbert Mole',20,'M',87123),\n('pirard','Monsieur Pirard',18,'M',62166),\n('rênal','Monsieur de Rênal',2,'M', 605),\n('rênal','Madame Louise Rênal',7,'F', 2214),\n('sorel','Monsieur Julien Sorel',3,'M', 940),\n('tanbeau','Monsieur Tanbeau',22,'M',92323),\n('valenod','Monsieur Valenod',4,'M',1724),\n('élisa','Mademoiselle Élisa',11,'F',12267),\n('mole', 'Mademoiselle Mathilde Sorel', 21,'F',90768),\n('mole', 'Monsieur de la Mole',9,'M',2610)],\ncolumns=['name', 'entity','entity_ID', 'gender','first_appearance' ])\n\ndef get_clustering_metrics(embeddings, embeddings_type):\n '''Given embeddings, and their ground truth data type, computes several clustering performance\n metrics. The right `ground_truth_data_df`, `textually_close_ent_ground_truth_df` or \n `lax_ent_ground_truth_df` should have been loaded into memory before calling this function.\n\n Parameters\n ----------\n embeddings : dictionary\n The dictionary containing each entity and their associated embedding vector\n embeddings_type : str\n The matching ground truth data type for the given embeddings (either 'first_version',\n 'textually_close' or 'lax')\n\n Returns\n -------\n same_entityness : list\n A list containing the performance metrics with regards to the 'same_entityness' axis\n gender : list\n A list containing the performance metrics with regards to the 'gender' axis\n first_appearance : list\n A list containing the performance metrics with regards to the 'first_appearance' axis\n '''\n \n # SAME ENTITY-NESS\n same_entityness = []\n \n mask_embs_entity = [(k, \n embeddings[k], \n ground_truth_data_df[ground_truth_data_df['name'] == k]['entity_ID'].values[0]) \n for k in embeddings \n if k.lower() in ground_truth_data_df['name'].tolist()]\n \n tmp_df = pd.DataFrame(mask_embs_entity)\n same_entityness.append(sklearn.metrics.silhouette_score(np.array(tmp_df[1].tolist()), \n np.array(tmp_df[2]), \n metric='euclidean', \n random_state=0))\n \n same_entityness.append(sklearn.metrics.calinski_harabasz_score(np.array(tmp_df[1].tolist()), \n np.array(tmp_df[2])))\n \n same_entityness.append(sklearn.metrics.davies_bouldin_score(np.array(tmp_df[1].tolist()), \n np.array(tmp_df[2])))\n \n tmp_df = pd.DataFrame(mask_embs_entity)\n entityness_matrix = np.array([np.array(emb) for emb in tmp_df[1]])\n k_choice = 21 # obtained by the elbow method\n kmean = KMeans(n_clusters=k_choice, random_state=0).fit(entityness_matrix, )\n predicted_clusters = kmean.predict(np.array([np.array(emb) for emb in tmp_df[1]]))\n \n \n same_entityness.append(sklearn.metrics.rand_score(np.array(tmp_df[2]), predicted_clusters))\n same_entityness.append(sklearn.metrics.adjusted_rand_score(np.array(tmp_df[2]), predicted_clusters))\n same_entityness.append(sklearn.metrics.mutual_info_score(np.array(tmp_df[2]), predicted_clusters))\n same_entityness.append(sklearn.metrics.adjusted_mutual_info_score(np.array(tmp_df[2]), \n predicted_clusters, \n average_method='arithmetic'))\n \n \n # GENDER\n gender = []\n \n mask_embs_gender = [(k, \n embeddings[k], \n ground_truth_data_df[ground_truth_data_df['name'] == k]['gender'].values[0]) \n for k in embeddings \n if k.lower() in ground_truth_data_df['name'].tolist()]\n\n tmp_df = pd.DataFrame(mask_embs_gender)\n gender.append(sklearn.metrics.silhouette_score(np.array(tmp_df[1].tolist()), \n np.array(tmp_df[2] == 'M').astype(int), \n metric='euclidean', \n random_state=0))\n gender.append(sklearn.metrics.calinski_harabasz_score(np.array(tmp_df[1].tolist()), np.array(tmp_df[2])))\n gender.append(sklearn.metrics.davies_bouldin_score(np.array(tmp_df[1].tolist()), np.array(tmp_df[2])))\n \n tmp_df = pd.DataFrame(mask_embs_gender)\n gender_matrix = np.array([np.array(emb) for emb in tmp_df[1]])\n k_choice = 2 # two genders in PG literature (men and women)\n kmean = KMeans(n_clusters=k_choice, random_state=0).fit(gender_matrix)\n predicted_clusters = kmean.predict(np.array([np.array(emb) for emb in tmp_df[1]]))\n \n gender.append(sklearn.metrics.rand_score(np.array(tmp_df[2]), predicted_clusters))\n gender.append(sklearn.metrics.adjusted_rand_score(np.array(tmp_df[2]), predicted_clusters))\n gender.append(sklearn.metrics.mutual_info_score(np.array(tmp_df[2]), predicted_clusters))\n gender.append(sklearn.metrics.adjusted_mutual_info_score(np.array(tmp_df[2]), predicted_clusters, \n average_method='arithmetic'))\n \n # FIRST APPEARANCE\n first_appearance = []\n \n # build distance matrix \n mask_embs_appear = [(k, \n embeddings[k], \n ground_truth_data_df[ground_truth_data_df['name'] == k]['first_appearance'].values[0]) \n for k in embeddings \n if k.lower() in ground_truth_data_df['name'].tolist()]\n \n tmp_df = pd.DataFrame(mask_embs_appear)\n appear_matrix = np.array(tmp_df[2]).reshape(-1, 1)\n\n # k based both on \"vector\" being predict (first appearance in book) and overall clustering\n # using elbow shape\n k_choice = 20\n kmean = KMeans(n_clusters=k_choice, random_state=0).fit(appear_matrix)\n\n first_appearance.append(sklearn.metrics.silhouette_score(np.array(tmp_df[1].tolist()), \n kmean.predict(np.array(tmp_df[2]).reshape(-1,1)), \n metric='euclidean', \n random_state=0))\n \n first_appearance.append(sklearn.metrics.calinski_harabasz_score(np.array(tmp_df[1].tolist()), \n kmean.predict(np.array(tmp_df[2]).reshape(-1,1))))\n \n first_appearance.append(sklearn.metrics.davies_bouldin_score(np.array(tmp_df[1].tolist()), \n kmean.predict(np.array(tmp_df[2]).reshape(-1,1))))\n \n tmp_df = pd.DataFrame(mask_embs_appear)\n ground_truth_based_clusters = kmean.predict(np.array(tmp_df[2]).reshape(-1,1))\n appear_matrix = np.array([np.array(emb) for emb in tmp_df[1]])\n kmean = KMeans(n_clusters=k_choice, random_state=0).fit(appear_matrix)\n predicted_clusters = kmean.predict(np.array([np.array(emb) for emb in tmp_df[1]]))\n \n first_appearance.append(sklearn.metrics.rand_score(ground_truth_based_clusters, predicted_clusters))\n first_appearance.append(sklearn.metrics.adjusted_rand_score(ground_truth_based_clusters, predicted_clusters))\n first_appearance.append(sklearn.metrics.mutual_info_score(ground_truth_based_clusters, predicted_clusters))\n first_appearance.append(sklearn.metrics.adjusted_mutual_info_score(ground_truth_based_clusters, predicted_clusters, \n average_method='arithmetic'))\n \n return same_entityness, gender, first_appearance\n\ndef print_clustering_metrics(embeddings, embeddings_type):\n '''Given embeddings, and their ground truth data type, display in a table several\n clustering performance metrics. The right `ground_truth_data_df`, \n `textually_close_ent_ground_truth_df` or `lax_ent_ground_truth_df` should have been \n loaded into memory before calling this function.\n\n Parameters\n ----------\n embeddings : dictionary\n The dictionary containing each entity and their associated embedding vector\n embeddings_type : str\n The matching ground truth data type for the given embeddings (either 'first_version',\n 'textually_close' or 'lax')\n '''\n \n same_entityness, gender, first_appearance = get_clustering_metrics(embeddings, embeddings_type)\n print('-------------------------------------------------------------------------------')\n print('| | Same Entity-ness | Gender | First Appearance |')\n print('-------------------------------------------------------------------------------')\n print(f'| Silhouette Score | {same_entityness[0]:8.5f} | {gender[0]:8.5f} | {first_appearance[0]:8.5f} |')\n print(f'| Calinski Harabasz Score | {same_entityness[1]:8.5f} | {gender[1]:8.5f} | {first_appearance[1]:8.5f} |')\n print(f'| Davies Bouldin Score | {same_entityness[2]:8.5f} | {gender[2]:8.5f} | {first_appearance[2]:8.5f} |')\n print(f'| Rand Score | {same_entityness[3]:8.5f} | {gender[3]:8.5f} | {first_appearance[3]:8.5f} |')\n print(f'| Adjusted Rand Score | {same_entityness[4]:8.5f} | {gender[4]:8.5f} | {first_appearance[4]:8.5f} |')\n print(f'| Mutual Info Score | {same_entityness[5]:8.5f} | {gender[5]:8.5f} | {first_appearance[5]:8.5f} |')\n print(f'| Adjusted Mutual Info Score | {same_entityness[6]:8.5f} | {gender[6]:8.5f} | {first_appearance[6]:8.5f} |')\n print('-------------------------------------------------------------------------------')" ]
[ [ "sklearn.cluster.KMeans", "sklearn.metrics.mutual_info_score", "sklearn.metrics.adjusted_mutual_info_score", "pandas.DataFrame", "sklearn.metrics.rand_score", "sklearn.metrics.adjusted_rand_score", "numpy.array" ] ]
K-H-Ismail/ConvNeXt
[ "664c758d0616aa9499c5878e1a0eaf7c5000ac61" ]
[ "engine.py" ]
[ "# Copyright (c) Meta Platforms, Inc. and affiliates.\n\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport math\nfrom typing import Iterable, Optional\nimport torch\nfrom timm.data import Mixup\nfrom timm.utils import accuracy, ModelEma\n\nimport utils\n\ndef train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,\n wandb_logger=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None,\n num_training_steps_per_epoch=None, update_freq=None, use_amp=False, use_dcls=False, dcls_kernel_size=7):\n model.train(True)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}]'.format(epoch)\n print_freq = 10\n\n optimizer.zero_grad()\n\n for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n step = data_iter_step // update_freq\n if step >= num_training_steps_per_epoch:\n continue\n it = start_steps + step # global training iteration\n # Update LR & WD for the first acc\n if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:\n for i, param_group in enumerate(optimizer.param_groups):\n if lr_schedule_values is not None:\n if param_group[\"group_name\"] == \"no_decay_dcls\":\n param_group[\"lr\"] = 0.02*(0.98**epoch) \n else:\n param_group[\"lr\"] = lr_schedule_values[it] * param_group[\"lr_scale\"]\n if wd_schedule_values is not None and param_group[\"weight_decay\"] > 0:\n param_group[\"weight_decay\"] = wd_schedule_values[it]\n\n samples = samples.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n if use_amp:\n with torch.cuda.amp.autocast():\n output = model(samples)\n loss = criterion(output, targets)\n else: # full precision\n output = model(samples)\n loss = criterion(output, targets)\n \n loss_rep = torch.zeros_like(loss)\n loss_fit = loss.item()\n if use_dcls:\n layer_count = 0\n for name, param in model.named_parameters():\n if name.endswith(\".P\"):\n layer_count += 1\n chout, chin, k_count = param.size(1), param.size(2), param.size(3) \n P = param.view(2, chout * chin, k_count) \n P = P.permute(1,2,0).contiguous() \n distances = torch.cdist(P,P,p=2)\n distances_triu = (1-distances).triu(diagonal=1)\n loss_rep += 2*torch.sum(torch.clamp_min(distances_triu , min=0)) / (k_count*(k_count-1)*chout*chin)\n loss_rep /= layer_count\n\n loss = loss + loss_rep ** 2 if epoch > 20 else loss\n \n loss_value = loss.item() \n\n if not math.isfinite(loss_value): # this could trigger if using AMP\n print(\"Loss is {}, stopping training\".format(loss_value))\n assert math.isfinite(loss_value)\n\n if use_amp:\n # this attribute is added by timm on one optimizer (adahessian)\n is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n loss /= update_freq\n grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=is_second_order,\n update_grad=(data_iter_step + 1) % update_freq == 0)\n if (data_iter_step + 1) % update_freq == 0:\n optimizer.zero_grad()\n if model_ema is not None:\n model_ema.update(model)\n else: # full precision\n loss /= update_freq\n loss.backward()\n if (data_iter_step + 1) % update_freq == 0:\n optimizer.step()\n optimizer.zero_grad()\n if model_ema is not None:\n model_ema.update(model)\n if use_dcls: \n with torch.no_grad():\n lim = dcls_kernel_size // 2\n for i in range(4):\n if hasattr(model, 'module'):\n for j in range(len(model.module.stages[i])):\n model.module.stages[i][j].dwconv.P.clamp_(-lim, lim)\n else:\n for j in range(len(model.stages[i])):\n model.stages[i][j].dwconv.P.clamp_(-lim, lim)\n torch.cuda.synchronize()\n\n if mixup_fn is None:\n class_acc = (output.max(-1)[-1] == targets).float().mean()\n else:\n class_acc = None\n metric_logger.update(loss=loss_value)\n metric_logger.update(loss_fit=loss_fit)\n metric_logger.update(loss_rep=loss_rep)\n metric_logger.update(lr_pos=0.02*(0.98**epoch))\n metric_logger.update(class_acc=class_acc)\n min_lr = 10.\n max_lr = 0.\n for group in optimizer.param_groups:\n min_lr = min(min_lr, group[\"lr\"])\n max_lr = max(max_lr, group[\"lr\"])\n\n metric_logger.update(lr=max_lr)\n metric_logger.update(min_lr=min_lr)\n weight_decay_value = None\n for group in optimizer.param_groups:\n if group[\"weight_decay\"] > 0:\n weight_decay_value = group[\"weight_decay\"]\n metric_logger.update(weight_decay=weight_decay_value)\n if use_amp:\n metric_logger.update(grad_norm=grad_norm)\n\n if log_writer is not None:\n log_writer.update(loss=loss_value, head=\"loss\")\n log_writer.update(class_acc=class_acc, head=\"loss\")\n log_writer.update(lr=max_lr, head=\"opt\")\n log_writer.update(min_lr=min_lr, head=\"opt\")\n log_writer.update(weight_decay=weight_decay_value, head=\"opt\")\n if use_amp:\n log_writer.update(grad_norm=grad_norm, head=\"opt\")\n log_writer.set_step()\n\n if wandb_logger:\n wandb_logger._wandb.log({\n 'Rank-0 Batch Wise/train_loss': loss_value, \n 'Rank-0 Batch Wise/train_max_lr': max_lr,\n 'Rank-0 Batch Wise/train_min_lr': min_lr\n }, commit=False)\n if class_acc:\n wandb_logger._wandb.log({'Rank-0 Batch Wise/train_class_acc': class_acc}, commit=False)\n if use_amp:\n wandb_logger._wandb.log({'Rank-0 Batch Wise/train_grad_norm': grad_norm}, commit=False)\n wandb_logger._wandb.log({'Rank-0 Batch Wise/global_train_step': it})\n \n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}\n\[email protected]_grad()\ndef evaluate(data_loader, model, device, use_amp=False):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n\n # switch to evaluation mode\n model.eval()\n for batch in metric_logger.log_every(data_loader, 10, header):\n images = batch[0]\n target = batch[-1]\n\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n # compute output\n if use_amp:\n with torch.cuda.amp.autocast():\n output = model(images)\n loss = criterion(output, target)\n else:\n output = model(images)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n batch_size = images.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.cuda.synchronize", "torch.zeros_like", "torch.cuda.amp.autocast", "torch.cdist", "torch.no_grad", "torch.clamp_min" ] ]
irglbriz/goban_to_sgf
[ "bea415bc897ed901de7bffb154fea288da04699e" ]
[ "src/warp.py" ]
[ "\"\"\"Warps image to square top-down view given corner coordinates\nExpects grayscale image and corners in shape(4,2))\"\"\"\n\nimport numpy as np\nimport cv2\n\nTARGET_RES = 380 # 19x20 - goban is 19x19, patches will be 20x20\n\ndef sort_corners(corners):\n rect = np.zeros((4, 2), dtype=\"float32\")\n # the top-left point will have the smallest sum, whereas\n # the bottom-right point will have the largest sum\n s = np.sum(corners, axis=1)\n rect[0] = corners[np.argmin(s)]\n rect[2] = corners[np.argmax(s)]\n # now, compute the difference between the points, the\n # top-right point will have the smallest difference,\n # whereas the bottom-left will have the largest difference\n diff = np.diff(corners, axis=1)\n rect[1] = corners[np.argmin(diff)]\n rect[3] = corners[np.argmax(diff)]\n return rect\n\ndef warp(img, rect):\n target_corners = np.float32([[0, 0], [TARGET_RES, 0],\n [TARGET_RES, TARGET_RES], [0, TARGET_RES]])\n M = cv2.getPerspectiveTransform(rect, target_corners)\n warped = cv2.warpPerspective(img, M, (TARGET_RES, TARGET_RES))\n return warped\n\ndef top_down_view(img, corners):\n rect = sort_corners(corners)\n result = warp(img, rect)\n return result" ]
[ [ "numpy.argmax", "numpy.argmin", "numpy.diff", "numpy.float32", "numpy.zeros", "numpy.sum" ] ]
NathanKlineInstitute/OEvent
[ "51cc9b4dab7e96160a31b9b451cf9c32bd014271" ]
[ "cyc.py" ]
[ "# Most of the code in cyc.py is based on Cole & Voytek's \"Cycle-by-cycle analysis of neural oscillations\"\n# implementation by Sam Neymotin ([email protected])\nimport scipy.signal as sps\nfrom evstats import getCV2\nfrom numpy import diff\nimport numpy as np\nfrom pylab import *\nfrom collections import OrderedDict\n\ndef index2ms (idx, sampr): return 1e3*idx/sampr\ndef ms2index (ms, sampr): return int(sampr*ms/1e3)\n\n#\ndef getmidts (sig, leftidx, rightidx):\n leftamp = sig[leftidx]\n rightamp = sig[rightidx]\n midamp = (leftamp + rightamp) / 2.0\n idx = leftidx\n if leftamp > rightamp:\n for idx in range(leftidx,rightidx+1,1):\n if sig[idx] <= midamp:\n break\n else:\n for idx in range(rightidx,leftidx,-1):\n if sig[idx] <= midamp:\n break\n return idx\n\n#\ndef calcwidth (n, left, right, sampr):\n lwidth = []\n for i in range(n):\n if i in left and i in right:\n lwidth.append(index2ms(right[i] - left[i],sampr))\n else:\n lwidth.append(0) # invalid\n return lwidth\n\n# \ndef getcyclefeatures (sig, sampr, maxF, hthresh = None):\n d = ms2index(1e3/maxF,sampr) # minimum distance between peaks and troughs (in samples)\n if hthresh is None: hthresh = min(sig) # default is to find any maxima, regardless of amplitude\n peaks_positive, peak_prop = sps.find_peaks(sig, height = hthresh, threshold = None, distance=d)\n peaks_negative, trough_prop = sps.find_peaks(-sig, height = hthresh, threshold = None, distance=d)\n peakh = peak_prop['peak_heights'] # peak heights\n troughh = -trough_prop['peak_heights'] # trough heights\n peakt = [index2ms(x,sampr) for x in peaks_positive] # time of peaks\n trought = [index2ms(x,sampr) for x in peaks_negative] # time of troughs\n interpeakt = diff(peakt) # time between peaks\n intertrought = diff(trought) # time between troughs\n npk,ntrgh = len(peaks_positive),len(peaks_negative)\n decayt,decayh,decayslope = [],[],[] # decay time,height,slope\n riset,riseh,riseslope = [],[],[] # rise time,height,slope\n rdsym = [] # rise-decay symmetry\n amp = [] # amplitude of a cycle (average of two consecutive peak heights)\n midts = []\n pktrghsym = []\n i,j=0,0\n dmidpkleft,dmidpkright = {},{}\n dmidtrghleft,dmidtrghright = {},{}\n while i < npk and j < ntrgh:\n if peakt[i] < trought[j]:\n m = getmidts(sig,peaks_positive[i],peaks_negative[j])\n midts.append( m )\n dmidpkright[i] = dmidtrghleft[j] = m\n i += 1\n else:\n m = getmidts(sig,peaks_negative[j],peaks_positive[i])\n midts.append( m )\n dmidpkleft[i] = dmidtrghright[j] = m \n j += 1\n pkw = calcwidth(npk, dmidpkleft, dmidpkright, sampr)\n trghw = calcwidth(ntrgh, dmidtrghleft, dmidtrghright, sampr)\n i,j=0,0\n while i < npk and j < ntrgh:\n if peakt[i] < trought[j]:\n decayt.append( trought[j] - peakt[i] )\n decayh.append( peakh[i] - troughh[j] )\n decayslope.append( decayh[-1] / decayt[-1] )\n if pkw[i] > 0. and trghw[j] > 0.:\n pktrghsym.append( pkw[i] / (pkw[i] + trghw[j]) )\n i += 1\n else:\n riset.append( peakt[i] - trought[j] )\n riseh.append( peakh[i] - troughh[j] )\n riseslope.append( riseh[-1] / riset[-1] )\n if len(decayt) > 0 and (riset[-1] + decayt[-1]) > 0.:\n rdsym.append(riset[-1] / (riset[-1] + decayt[-1]))\n j += 1\n peakF = [1e3/x for x in interpeakt if x > 0.]\n troughF = [1e3/x for x in intertrought if x > 0.]\n if len(peakh) > 1:\n amp = [(x+y)/2.0 for x,y in zip(peakh,peakh[1:])]\n elif len(peakh) > 0:\n amp = peakh\n return OrderedDict({'peakidx':peaks_positive,'peakh':np.array(peakh),'peakt':np.array(peakt),'interpeakt':np.array(interpeakt),\\\n 'troughidx':peaks_negative,'troughh':np.array(troughh),'trought':np.array(trought),'intertrought':np.array(intertrought),\\\n 'decayt':np.array(decayt), 'decayh':np.array(decayh), 'decayslope':np.array(decayslope),\\\n 'riset':np.array(riset), 'riseh':np.array(riseh), 'riseslope':np.array(riseslope),\\\n 'rdsym':np.array(rdsym),'peakF':peakF,'troughF':troughF,\\\n 'npeak':len(peakh),'ntrough':len(troughh),\\\n 'peakCV2':getCV2(interpeakt),'troughCV2':getCV2(intertrought),\n 'amp':np.array(amp),'midts':midts,'peaktroughsym':np.array(pktrghsym),'peakw':np.array(pkw),'troughw':np.array(trghw)})\n\n#\ndef getcyclekeys ():\n return ['peakidx','peakh','peakt','interpeakt',\\\n 'troughidx','troughh','trought','intertrought',\\\n 'decayt', 'decayh', 'decayslope',\\\n 'riset', 'riseh', 'riseslope',\\\n 'rdsym','peakF','troughF',\\\n 'npeak','ntrough',\\\n 'peakCV2','troughCV2',\n 'amp','midts','peaktroughsym','peakw','troughw'] \n\n#\ndef drawcyclefeatures (sig, sampr, maxF = None, dprop = None):\n if dprop is None:\n dprop = getcyclefeatures(sig, sampr, maxF)\n tsig = np.linspace(0,(1e3/sampr)*len(sig),len(sig))\n plot(tsig,sig,'k')\n plot([tsig[i] for i in dprop['peakidx']], [sig[i] for i in dprop['peakidx']],'ro')\n plot([tsig[i] for i in dprop['troughidx']], [sig[i] for i in dprop['troughidx']],'go')\n plot([tsig[x] for x in dprop['midts']], [sig[x] for x in dprop['midts']], 'bo')\n\n \n" ]
[ [ "numpy.array", "scipy.signal.find_peaks", "numpy.diff" ] ]
wood-b/dihedral_model
[ "37b7b1648ff2c36f6319401aa50b8400183312f6" ]
[ "tests/test_utils.py" ]
[ "import numpy as np\n#import math\n#import json\nimport unittest\n\nfrom utils import utils\n\n__author__ = \"Brandon Wood\"\n\n\nclass TestUtilFunctions(unittest.TestCase):\n @staticmethod\n def test_uvec():\n pt1 = np.array([0.0, 0.0, 0.0])\n pt2 = np.array([1.0, 0.0, 0.0])\n uvec = np.array([1.0, 0.0, 0.0])\n np.testing.assert_array_equal(uvec, utils.unit_vector(pt1, pt2))\n\n def test_point_rotate(self):\n # rotate pt on x-axis around z-axis\n pt = np.array([2.0, 0.0, 0.0])\n uvec = np.array([0.0, 0.0, 1.0])\n pt_n90 = np.array([0.0, 2.0, 0.0])\n pt_0 = np.array([-2.0, 0.0, 0.0])\n pt_90 = np.array([0.0, -2.0, 0.0])\n pt_180 = pt\n pt_n180 = pt\n np.testing.assert_almost_equal(utils.point_rotation(pt, -90, uvec), pt_n90)\n np.testing.assert_almost_equal(utils.point_rotation(pt, -0, uvec), pt_0)\n np.testing.assert_almost_equal(utils.point_rotation(pt, 90, uvec), pt_90)\n np.testing.assert_almost_equal(utils.point_rotation(pt, 180, uvec), pt_180)\n np.testing.assert_almost_equal(utils.point_rotation(pt, -180, uvec), pt_n180)\n\n def test_eV_to_kJmol(self):\n value = 2 * 96.48533646\n test_val = utils.eV_to_kJmol([2])\n np.testing.assert_almost_equal(value, test_val)\n\n def test_eV_to_kcalmol(self):\n value = 2 * 23.06054887\n test_val = utils.eV_to_kcalmol([2])\n np.testing.assert_almost_equal(value, test_val)\n\n def test_rel_energy(self):\n energy = [2.0, 4.0, 7.0, 5.0]\n value = [0.0, 2.0, 5.0, 3.0]\n test_val = utils.relative_energy(energy)\n np.testing.assert_array_equal(value, test_val)\n\n def test_coor_fn(self):\n # test case 1\n pt1 = np.array([0.0, 0.0, 0.0])\n pt2 = np.array([1.0, 0.0, 0.0])\n pt3 = pt1\n pt4 = pt2\n test_val = utils.correlation(pt1, pt2, pt3, pt4)\n np.testing.assert_array_equal(test_val, 1.0)\n # test case 2\n pt2 = np.array([2.0, 0.0, 0.0])\n pt3 = np.array([2.5, 1.0, 0.0])\n pt4 = np.array([4.5, 1.5, 0.0])\n test_val = utils.correlation(pt1, pt2, pt3, pt4)\n np.testing.assert_almost_equal(test_val, 0.970142500145332)\n\n '''def test_planarity(self):\n # test case 1, all points in the xy plane\n pt1 = np.array([0.0, 0.0, 0.0])\n pt2 = np.array([1.48, 0.0, 0.0])\n pt3 = np.array([2.90957022, -0.38305219, 0.0])\n pt4 = pt2\n pt5 = pt3\n pt6 = np.array([4.38957022, -0.38305219, 0.0])\n test_val = utils.planarity(pt1, pt2, pt3, pt4, pt5, pt6)\n np.testing.assert_almost_equal(test_val, 1.0)\n # test case 2, 45 degree angle between planes\n pt6 = np.array([4.22032561, -1.0146817, -0.2708588])\n test_val = utils.planarity(pt1, pt2, pt3, pt4, pt5, pt6)\n ans = 0.25\n np.testing.assert_almost_equal(test_val, ans)\n # test case 3, 90 degree angle between planes\n pt6 = np.array([4.29042902, -0.75305219, -0.38305219])\n test_val = utils.planarity(pt1, pt2, pt3, pt4, pt5, pt6)\n ans = - 0.5\n np.testing.assert_almost_equal(test_val, ans)'''\n\n def test_RB_potential(self):\n value = 3.281250000000001\n test_val = utils.RB_potential(120.0, 5.0, 4.0, 3.0, 5.0, 4.0, 3.0)\n np.testing.assert_almost_equal(value, test_val)\n\n def test_boltz_dist(self):\n energies = [0.019, 0.15, 0.23, 0.026]\n temp = 300.0\n values = np.array([0.56517132, 0.00356022, 0.00016126, 0.43110720])\n test_vals = utils.boltz_dist(temp, energies)\n np.testing.assert_almost_equal(values, test_vals)\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.testing.assert_almost_equal", "numpy.array" ] ]
AI-secure/Uncovering-the-Connections-BetweenAdversarial-Transferability-and-Knowledge-Transferability
[ "a2fb10f56618c6d6dd1638967d59c4a83ffa1c05" ]
[ "image_exp/adv2know.py" ]
[ "import argparse\nimport os\nfrom datetime import datetime\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nfrom torch.optim.lr_scheduler import CosineAnnealingLR\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom subloader import CIFAR10_SubLoader, STL10_SubLoader, CIFAR100_SubLoader\nimport models\nfrom utils import progress_bar\nimport numpy as np\nfrom tqdm import tqdm\nfrom attack import attack, attack_feature\n\n# pylint: disable=invalid-name,redefined-outer-name,global-statement\n\nmodel_names = sorted(name for name in models.__dict__ if not name.startswith(\n \"__\") and callable(models.__dict__[name]))\nparser = argparse.ArgumentParser(description='attack and transfer')\nparser.add_argument('-d', '--data', default='cifar10', help = 'choice of dataset')\n\nparser.add_argument('-a', '--arch', metavar='ARCH', default='res_net18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: vgg16)')\n\nparser.add_argument('-j', '--workers', default=2, type=int, metavar='N',\n help='number of data loading workers (default: 2)')\n\nparser.add_argument('-b', '--batch-size', default=1, type=int,\n metavar='N',\n help='mini-batch size (default: 64), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\n\n\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n\nparser.add_argument('--eps', default = 0.06, type = float)\nparser.add_argument('--attack', default = 'fgsm', type = str)\nparser.add_argument('--include_list', nargs='+', type=int)\nparser.add_argument('--superclass', default = \"aquatic_mammals\", type = str)\nparser.add_argument('--load_name', default = '', type=str, help='name of the checkpoint')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nprint('==> Preparing data..')\n\nif args.data == 'cifar10':\n exclude_list = [i for i in range(10) if i not in args.include_list]\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))\n ])\n trainset = CIFAR10_SubLoader('../data/cifar10', exclude_list = exclude_list, train=True, transform=transform_train, download=True)\n trainloader = torch.utils.data.DataLoader(trainset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.workers)\n testset = CIFAR10_SubLoader('../data/cifar10', exclude_list = exclude_list, train=False, transform=transform_test, download=True)\n testloader = torch.utils.data.DataLoader(testset,\n batch_size=1,\n shuffle=False,\n num_workers=args.workers)\n validateset = testset\n validateloader = torch.utils.data.DataLoader(validateset,\n batch_size=1,\n shuffle=False,\n num_workers=args.workers)\n num_classes = len(args.include_list)\n \nelif args.data == 'stl10':\n exclude_list = [i for i in range(10) if i not in args.include_list]\n transform_train = transforms.Compose([\n transforms.Resize(32),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)),\n ])\n transform_test = transforms.Compose([\n transforms.Resize(32),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]\n )\n trainset = STL10_SubLoader('../data/stl10', exclude_list = exclude_list, split='train',transform=transform_train, download=True)\n trainloader = torch.utils.data.DataLoader(trainset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.workers)\n\n testset = STL10_SubLoader('../data/stl10', exclude_list = exclude_list, split='test', transform=transform_test,download=True)\n testloader = torch.utils.data.DataLoader(testset,\n batch_size=1,\n shuffle=False,\n num_workers=args.workers)\n num_classes = len(args.include_list)\n validateset = testset\n validateloader = torch.utils.data.DataLoader(validateset,\n batch_size=1,\n shuffle=False,\n num_workers=args.workers)\nelif args.data == 'cifar100':\n transform_train = transforms.Compose([\n transforms.Resize(32),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)), \n ])\n transform_test = transforms.Compose([\n transforms.Resize(32),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]\n )\n trainset = CIFAR100_SubLoader('../data/cifar100', superclass = args.superclass, train = True,transform=transform_train, download=True)\n trainloader = torch.utils.data.DataLoader(trainset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=args.workers)\n\n testset = CIFAR100_SubLoader('../data/cifar100', superclass = args.superclass, train = False, transform=transform_test,download=True)\n testloader = torch.utils.data.DataLoader(testset,\n batch_size=100,\n shuffle=True,\n num_workers=args.workers)\n num_classes = 5\n validateset = testset\n validateloader = torch.utils.data.DataLoader(validateset,\n batch_size=100,\n shuffle=True,\n num_workers=args.workers)\nelse:\n raise Exception('no such dataset!')\n\nmean = np.array([0.0, 0.0, 0.0]).reshape((3, 1, 1))\nstd = np.array([1.0, 1.0, 1.0]).reshape((3, 1, 1))\n\nref_model = models.__dict__['res_net18'](num_classes = num_classes)\nif args.cuda:\n ref_model = ref_model.cuda()\nref_state_dict = torch.load('checkpoint/' + 'stl10_resnet18.pth')['model']\nnew_ref_state_dict = {}\nfor k,v in ref_state_dict.items():\n new_ref_state_dict[k[7:]] = v\nref_model.load_state_dict(new_ref_state_dict)\nref_model.eval()\n\ntransfer_model = models.__dict__[args.arch](num_classes = num_classes)\nif args.cuda:\n transfer_model = transfer_model.cuda()\ntransfer_state_dict = torch.load('checkpoint/' + args.load_name)['model']\nnew_transfer_state_dict = {}\nfor k,v in transfer_state_dict.items():\n new_transfer_state_dict[k[7:]] = v\ntransfer_model.load_state_dict(new_transfer_state_dict)\ntransfer_model.eval()\nprint('finish loading')\n\n\ninference_model = models.__dict__[args.arch](num_classes = num_classes)\nif args.cuda:\n inference_model = inference_model.cuda()\ninf_state_dict = torch.load('checkpoint/transfer_last_'+args.load_name)['model']\nnew_inf_state_dict = {}\nfor k,v in inf_state_dict.items():\n new_inf_state_dict[k[7:]] = v\ninference_model.load_state_dict(new_inf_state_dict)\ninference_model.eval()\n\neps = args.eps\nattack_dict = {\n 'fgsm':lambda model,image:attack(model, image, eps = eps, itr = 1),\n 'pgd':lambda model,image:attack(model, image, eps = eps, itr = 50)\n}\n\nsuccess = 0\nbase_success = 0\ntotal = 0\nalpha = 0\ngamma = 0\ncombined = 0\nattack_method = attack_dict[args.attack]\ncriterion = nn.CrossEntropyLoss()\nalphas = []\nlosses = []\n\nfor batch_idx,(images,labels) in enumerate(tqdm(testloader)):\n images = images.cuda()\n labels = labels.cuda()\n \n if ref_model(images).max(1, keepdim = True)[1].item()!= labels[0].item():\n continue\n \n losses.append(criterion(inference_model(images), labels).item()) \n true_outputs = transfer_model(images)\n \n adversarials = attack_method(ref_model, images) \n outputs = transfer_model(adversarials)\n \n base_adversarials = attack_method(transfer_model, images)\n base_outputs = transfer_model(base_adversarials)\n \n total+= images.shape[0]\n if outputs.max(1, keepdim = True)[1].item()!= labels[0].item():\n success+=1\n if base_outputs.max(1, keepdim = True)[1].item()!= labels[0].item():\n base_success+=1\n \n a = (torch.norm(true_outputs - outputs)/torch.norm(true_outputs - base_outputs)).item()\n alphas.append(a)\n alpha += a\n \n v1 = (ref_model(images) - ref_model(adversarials)).flatten().data.cpu().numpy()\n v1 = v1/np.linalg.norm(v1)\n v2 = (true_outputs - outputs).flatten().data.cpu().numpy()\n v2 = v2/np.linalg.norm(v2)\n g = np.outer(v1, v2)\n gamma += g\n \n c = a * np.outer(v1, v2)\n combined += c\n\ngamma = np.linalg.norm(gamma / total)**2\ncombined = np.linalg.norm(combined / total)**2\nsuccess = success/total\nbase_success = base_success/total\nalpha = alpha /total\nprint(' alpha:', alpha, 'gamma:', gamma, 'combined:', combined)\nprint(' alpha:', alpha, 'gamma:', gamma, 'combined:', combined)\nnp.save('alphas/alpha_'+args.load_name,np.array(alphas))\nnp.save('alphas/loss_'+args.load_name,np.array(losses))\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.norm", "torch.load", "torch.utils.data.DataLoader", "numpy.linalg.norm", "torch.cuda.is_available", "numpy.outer", "numpy.array" ] ]
uwaa-ndcl/ACC_2019_Avant
[ "d03e3715a030e52135baa9bf4e6a4d7a8b2c0881" ]
[ "pose_estimation/gramian/trajectories.py" ]
[ "import os\nimport math\nimport pickle\nimport numpy as np\nimport transforms3d as t3d\n\nimport pose_estimation.directories as dirs\nimport pose_estimation.tools.math as tm\nimport pose_estimation.blender.render as br\nimport pose_estimation.gramian.functions as gf\nfrom pose_estimation.blender.render_properties import RenderProperties\n\n# object and camera\n#model_name = 'cone'\nmodel_name = 'car2'\n\n# where to put object\nxyz = np.array([0, 0, 0])\nxyz_col = np.expand_dims(xyz, 1)\n\n# center and default rotation of object\nxyz_cent = np.array([0, 0, 0])\nquat = np.array([1, 0, 0, 0])\nxyz_cent_col = np.expand_dims(xyz_cent, 1)\nquat_col = np.expand_dims(quat, 1)\n\n# angles\nif model_name == 'cone':\n rad = 5 # radius of semicircle\nelif model_name == 'car2':\n rad = 8\n\nn_pts = 10 # points along the trajectory for taking images\nn_ang_x = 10\nn_ang_z = 10\nn_ang = n_ang_x * n_ang_z\nang_x_vals = np.linspace(-math.pi/2, math.pi/2, n_ang_x)\nang_z_vals = np.linspace(0, math.pi, n_ang_z)\nang_x, ang_z = np.meshgrid(ang_x_vals, ang_z_vals)\nang_x = np.reshape(ang_x, n_ang)\nang_z = np.reshape(ang_z, n_ang)\nang_xz = np.stack((ang_x, ang_z), 1)\n\n# camera properties\nlens = 32\nsensor_width = 36\nsensor_height = 36\n\n# keys, colors, and plot names\nkys = ['ang_xz_det_min', 'ang_xz_det_max',\n 'ang_xz_trace_min', 'ang_xz_trace_max',\n 'ang_xz_min_eval_min', 'ang_xz_min_eval_max',\n 'ang_xz_cond_num_min', 'ang_xz_cond_num_max']\n# red blue yellow white\nclrs_4 = [[.8,0,0], [0,0,.8], [.8,.8,0], [1,1,1]]\nclrs = [val for pair in zip(clrs_4, clrs_4) for val in pair]\n\ntex_names = [\n r'det($\\widehat{\\mathbf{W}}$)',\n r'det($\\widehat{\\mathbf{W}}$)',\n r'tr($\\widehat{\\mathbf{W}}$)',\n r'tr($\\widehat{\\mathbf{W}}$)',\n r'$\\lambda_{\\text{min}}(\\widehat{\\mathbf{W}}$)',\n r'$\\lambda_{\\text{min}}(\\widehat{\\mathbf{W}}$)',\n r'$\\frac{\\lambda_{\\text{max}}}{\\lambda_{\\text{min}}}' \\\n '(\\widehat{\\mathbf{W}}$)',\n r'$\\frac{\\lambda_{\\text{max}}}{\\lambda_{\\text{min}}}' \\\n '(\\widehat{\\mathbf{W}}$)']\n\n\ndef semicircle(rad, n_coord, ang_x, ang_z, xyz_offset):\n '''\n create a semi-circular curve\n '''\n\n # coordinates of the points which define the curve\n theta = np.linspace(0, math.pi, n_coord)\n coord = np.full((3, n_coord), np.nan)\n \n # camera\n # rotation matrix for camera when ang_x = ang_z = 0\n # blender frame to default frame (y forward, x right, z up)\n R_def = tm.R_x(math.pi/2)\n R_semi = tm.R_z(-math.pi/2) # default frame to frame of 1st semicircle\n cam_quat = np.full((4, n_coord), np.nan) # to be filled\n\n # calcuate each coordinate\n for i in range(n_coord):\n # coordinates of semicircle, negative-to-positive in x-direction\n coord[:,i] = [-rad*math.cos(theta[i]), 0, rad*math.sin(theta[i])]\n\n # rotation matrix from camera frame to default Blender camera\n R_ij = tm.R_z(ang_z) @ tm.R_x(ang_x) @ tm.R_y(theta[i]) @ R_semi \\\n @ R_def\n\n cam_quat[:,i] = t3d.quaternions.mat2quat(R_ij)\n\n # rotate all coordinates\n coord = tm.R_z(ang_z) @ tm.R_x(ang_x) @ coord\n coord += np.tile(xyz_offset, n_coord)\n\n return coord, cam_quat\n\n\ndef evaluate_all_trajectories():\n '''\n evaluate all candidate trajectories, calculate the Gramian for each, and\n calculate the optimal Gramian over all trajectories\n '''\n\n # rendering\n save_dir = dirs.trajectories_dir\n # integrated gramian for all trajectories\n gram_all = np.full((6, 6, n_ang), np.nan)\n # loop over semicircles\n for i in range(n_ang):\n gram = np.full((6, 6), 0.0)\n coord, cam_quat = semicircle(rad, n_pts, ang_x[i], ang_z[i],\n xyz_cent_col)\n\n # loop over points along semicircle\n for j in range(n_pts):\n\n # render\n cam_pos_ij = np.expand_dims(coord[:,j], 1)\n cam_quat_ij = np.expand_dims(cam_quat[:,j], 1)\n\n # render properties object\n to_render_pkl = os.path.join(save_dir, 'to_render.pkl')\n render_props = RenderProperties()\n render_props.model_name = model_name\n render_props.xyz = xyz_col\n render_props.quat = quat_col\n render_props.cam_xyz = cam_pos_ij\n render_props.cam_quat = cam_quat_ij\n render_props.lens = lens\n render_props.sensor_width = sensor_width\n render_props.sensor_height = sensor_height\n render_props.compute_gramian = True\n render_props.alpha = False\n render_props.image_names=['%03d' % j]\n\n with open(to_render_pkl, 'wb') as output:\n pickle.dump(render_props, output, pickle.HIGHEST_PROTOCOL)\n br.blender_render(save_dir)\n \n # load gramian\n gram_npz = os.path.join(save_dir, 'gramian.npz')\n gram_data = np.load(gram_npz)\n gram_ij = gram_data['gram']\n gram += gram_ij[:,:,0]\n\n # save integrated gramian\n gram_all[:,:,i] = gram\n \n # calculate measures of all integrated gramians\n grm = gf.gramian_measures(gram_all)\n det_min_ind = np.argmin(grm['det'])\n det_max_ind = np.argmax(grm['det'])\n trace_min_ind = np.argmin(grm['trace'])\n trace_max_ind = np.argmax(grm['trace'])\n min_eval_min_ind = np.argmin(grm['min_eval'])\n min_eval_max_ind = np.argmax(grm['min_eval'])\n cond_num_min_ind = np.argmin(grm['cond_num'])\n cond_num_max_ind = np.argmax(grm['cond_num'])\n\n opt_ang = {\n kys[0]: ang_xz[det_min_ind],\n kys[1]: ang_xz[det_max_ind],\n kys[2]: ang_xz[trace_min_ind],\n kys[3]: ang_xz[trace_max_ind],\n kys[4]: ang_xz[min_eval_min_ind],\n kys[5]: ang_xz[min_eval_max_ind],\n kys[6]: ang_xz[cond_num_min_ind],\n kys[7]: ang_xz[cond_num_max_ind]}\n opt_ang_npz = os.path.join(save_dir, 'opt_ang.npz')\n np.savez(opt_ang_npz, xyz=xyz, ang_x=ang_x, ang_z=ang_z, opt_ang=opt_ang)\n\n\nif __name__ == '__main__':\n evaluate_all_trajectories()\n" ]
[ [ "numpy.expand_dims", "numpy.savez", "numpy.linspace", "numpy.meshgrid", "numpy.reshape", "numpy.tile", "numpy.stack", "numpy.full", "numpy.argmax", "numpy.argmin", "numpy.load", "numpy.array" ] ]
leewujung/echopype-lfs-test
[ "b76dcf42631d0ac9cef0efeced9be4afdc15e659" ]
[ "echopype/convert/utils/set_groups_base.py" ]
[ "from __future__ import absolute_import, division, print_function\nimport os\nimport numpy as np\nimport netCDF4\nimport zarr\nimport xarray as xr\n\n\nclass SetGroupsBase:\n \"\"\"Base class for setting groups in netCDF file.\n \"\"\"\n\n def __init__(self, file_path='test.nc', compress=True):\n self.file_path = file_path\n filename, ext = os.path.splitext(file_path)\n self.format = ext\n self.compress = compress\n\n def set_toplevel(self, tl_dict):\n \"\"\"Set attributes in the Top-level group.\"\"\"\n if self.format == '.nc':\n with netCDF4.Dataset(self.file_path, \"w\", format=\"NETCDF4\") as ncfile:\n [ncfile.setncattr(k, v) for k, v in tl_dict.items()]\n elif self.format == '.zarr':\n zarrfile = zarr.open(self.file_path, mode=\"w\")\n for k, v in tl_dict.items():\n zarrfile.attrs[k] = v\n else:\n raise ValueError(\"Unsupported file format\")\n\n def set_provenance(self, src_file_names, prov_dict):\n \"\"\"Set the Provenance group in the nc file.\n\n Parameters\n ----------\n src_file_names\n list of source filenames\n prov_dict\n dictionary containing file conversion parameters\n prov_dict['conversion_software_name']\n prov_dict['conversion_software_version']\n prov_dict['conversion_time']\n \"\"\"\n # Save the source filenames as a data variable\n ds = xr.Dataset(\n {\n 'filenames': ('file_num', src_file_names, {'long_name': 'Source filenames'})\n },\n coords={'file_num': np.arange(len(src_file_names))},\n )\n\n # Save all attributes\n for k, v in prov_dict.items():\n ds.attrs[k] = v\n\n # save to file\n if self.format == '.nc':\n ds.to_netcdf(path=self.file_path, mode='a', group='Provenance')\n elif self.format == '.zarr':\n ds.to_zarr(store=self.file_path, mode='a', group='Provenance')\n\n def set_sonar(self, sonar_dict):\n \"\"\"Set the Sonar group in the nc file.\n\n Parameters\n ----------\n sonar_dict\n dictionary containing sonar parameters\n \"\"\"\n # create group\n if self.format == '.nc':\n ncfile = netCDF4.Dataset(self.file_path, \"a\", format=\"NETCDF4\")\n snr = ncfile.createGroup(\"Sonar\")\n\n # set group attributes\n for k, v in sonar_dict.items():\n snr.setncattr(k, v)\n\n # close nc file\n ncfile.close()\n elif self.format == '.zarr':\n zarrfile = zarr.open(self.file_path, mode='a')\n snr = zarrfile.create_group('Sonar')\n\n for k, v in sonar_dict.items():\n snr.attrs[k] = v\n\n def set_nmea(self, nmea_dict):\n \"\"\"Set the Platform/NMEA group in the nc file.\n\n Parameters\n ----------\n nmea_dict\n dictionary containing platform parameters\n \"\"\"\n # Only save platform group if file_path exists\n if not os.path.exists(self.file_path):\n print('netCDF file does not exist, exiting without saving Platform group...')\n else:\n # Convert np.datetime64 numbers to seconds since 1900-01-01\n # due to xarray.to_netcdf() error on encoding np.datetime64 objects directly\n time = (nmea_dict['nmea_time'] - np.datetime64('1900-01-01T00:00:00')) \\\n / np.timedelta64(1, 's')\n ds = xr.Dataset(\n {'NMEA_datagram': (['time'], nmea_dict['nmea_datagram'],\n {'long_name': 'NMEA datagram'})\n },\n coords={'time': (['time'], time,\n {'axis': 'T',\n 'calendar': 'gregorian',\n 'long_name': 'Timestamps for NMEA datagrams',\n 'standard_name': 'time',\n 'units': 'seconds since 1900-01-01'})},\n attrs={'description': 'All NMEA sensor datagrams'})\n # save to file\n if self.format == '.nc':\n ds.to_netcdf(path=self.file_path, mode='a', group='Platform/NMEA')\n elif self.format == '.zarr':\n ds.to_zarr(store=self.file_path, mode='a', group='Platform/NMEA')\n" ]
[ [ "numpy.timedelta64", "numpy.datetime64" ] ]
irmaps/easy-few-shot-learning
[ "afb315589c42ea9380f908380b46b5cb3a200dad" ]
[ "easyfsl/methods/relation_networks.py" ]
[ "\"\"\"\nSee original implementation at\nhttps://github.com/floodsung/LearningToCompare_FSL\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom easyfsl.methods import AbstractMetaLearner\nfrom easyfsl.utils import compute_prototypes\n\n\nclass RelationNetworks(AbstractMetaLearner):\n \"\"\"\n Sung, Flood, Yongxin Yang, Li Zhang, Tao Xiang, Philip HS Torr, and Timothy M. Hospedales.\n \"Learning to compare: Relation network for few-shot learning.\" (2018)\n https://openaccess.thecvf.com/content_cvpr_2018/papers/Sung_Learning_to_Compare_CVPR_2018_paper.pdf\n\n In the Relation Networks algorithm, we first extract feature maps for both support and query\n images. Then we compute the mean of support features for each class (called prototypes).\n To predict the label of a query image, its feature map is concatenated with each class prototype\n and fed into a relation module, i.e. a CNN that outputs a relation score. Finally, the\n classification vector of the query is its relation score to each class prototype.\n\n Note that for most other few-shot algorithms we talk about feature vectors, because for each\n input image, the backbone outputs a 1-dim feature vector. Here we talk about feature maps,\n because for each input image, the backbone outputs a \"feature map\" of shape\n (n_channels, width, height). This raises different constraints on the architecture of the\n backbone: while other algorithms require a \"flatten\" operation in the backbone, here \"flatten\"\n operations are forbidden.\n \"\"\"\n\n def __init__(self, *args, inner_relation_module_channels: int = 8):\n \"\"\"\n Build Relation Networks by calling the constructor of AbstractMetaLearner.\n Args:\n *args: all arguments of the init method of AbstractMetaLearner\n inner_relation_module_channels: number of hidden channels between the linear layers of\n the relaiton module. Defaults to 8.\n\n Raises:\n ValueError: if the backbone doesn't outputs feature maps, i.e. if its output for a\n given image is not a tensor of shape (n_channels, width, height)\n \"\"\"\n super().__init__(*args)\n\n if len(self.backbone_output_shape) != 3:\n raise ValueError(\n \"Illegal backbone for Relation Networks. Expected output for an image is a 3-dim \"\n \"tensor of shape (n_channels, width, height).\"\n )\n\n # Relation Networks use Mean Square Error.\n # This is unusual because this is a classification problem.\n # The authors justify this choice by the fact that the output of the model is a relation\n # score, which makes it a regression problem. See the article for more details.\n self.loss_function = nn.MSELoss()\n\n # Here we build the relation module that will output the relation score for each\n # (query, prototype) pair. See the function docstring for more details.\n self.relation_module = self.build_relation_module(\n inner_relation_module_channels\n )\n\n # Here we create the field so that the model can store the prototypes for a support set\n self.prototypes = None\n\n def build_relation_module(self, inner_relation_module_channels: int) -> nn.Module:\n \"\"\"\n Build the relation module that takes as input the concatenation of two feature\n maps (in our case the feature map of a query and the feature map of a class prototype).\n In order to make the network robust to any change in the dimensions of the input images,\n we made some changes to the architecture defined in the original implementation (typically\n the use of adaptive pooling).\n Args:\n inner_relation_module_channels: number of hidden channels between the linear layers of\n the relaiton module\n\n Returns:\n the constructed relation module\n \"\"\"\n return nn.Sequential(\n nn.Sequential(\n nn.Conv2d(\n self.feature_dimension * 2,\n self.feature_dimension,\n kernel_size=3,\n padding=1,\n ),\n nn.BatchNorm2d(self.feature_dimension, momentum=1, affine=True),\n nn.ReLU(),\n nn.AdaptiveMaxPool2d((5, 5)),\n ),\n nn.Sequential(\n nn.Conv2d(\n self.feature_dimension,\n self.feature_dimension,\n kernel_size=3,\n padding=0,\n ),\n nn.BatchNorm2d(self.feature_dimension, momentum=1, affine=True),\n nn.ReLU(),\n nn.AdaptiveMaxPool2d((1, 1)),\n ),\n nn.Flatten(),\n nn.Linear(self.feature_dimension, inner_relation_module_channels),\n nn.ReLU(),\n nn.Linear(inner_relation_module_channels, 1),\n nn.Sigmoid(),\n )\n\n def process_support_set(\n self,\n support_images: torch.Tensor,\n support_labels: torch.Tensor,\n ):\n \"\"\"\n Overrides process_support_set of AbstractMetaLearner.\n Extract feature maps from the support set and store class prototypes.\n\n Args:\n support_images: images of the support set\n support_labels: labels of support set images\n \"\"\"\n\n support_features = self.backbone(support_images)\n self.prototypes = compute_prototypes(support_features, support_labels)\n\n def forward(self, query_images: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Overrides method forward in AbstractMetaLearner.\n Predict the label of a query image by concatenating its feature map with each class\n prototype and feeding the result into a relation module, i.e. a CNN that outputs a relation\n score. Finally, the classification vector of the query is its relation score to each class\n prototype.\n\n Args:\n query_images: images of the query set\n Returns:\n a prediction of classification scores for query images\n \"\"\"\n query_features = self.backbone(query_images)\n\n # For each pair (query, prototype), we compute the concatenation of their feature maps\n # Given that query_features is of shape (n_queries, n_channels, width, height), the\n # constructed tensor is of shape (n_queries * n_prototypes, 2 * n_channels, width, height)\n # (2 * n_channels because prototypes and queries are concatenated)\n query_prototype_feature_pairs = torch.cat(\n (\n self.prototypes.unsqueeze(dim=0).expand(\n query_features.shape[0], -1, -1, -1, -1\n ),\n query_features.unsqueeze(dim=1).expand(\n -1, self.prototypes.shape[0], -1, -1, -1\n ),\n ),\n dim=2,\n ).view(-1, 2 * self.feature_dimension, *query_features.shape[2:])\n\n # Each pair (query, prototype) is assigned a relation scores in [0,1]. Then we reshape the\n # tensor so that relation_scores is of shape (n_queries, n_prototypes).\n relation_scores = self.relation_module(query_prototype_feature_pairs).view(\n -1, self.prototypes.shape[0]\n )\n\n return relation_scores\n\n def compute_loss(\n self, classification_scores: torch.Tensor, query_labels: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Overrides the method compute_loss of AbstractMetaLearner because Relation Networks\n use the Mean Square Error (MSE) loss. MSE is a regression loss, so it requires the ground\n truth to be of the same shape as the predictions. In our case, this means that labels\n must be provided in a one hot fashion.\n\n Note that we need to enforce the number of classes by using the last computed prototypes,\n in case query_labels doesn't contain all possible labels.\n\n Args:\n classification_scores: predicted classification scores of shape (n_query, n_classes)\n query_labels: one hot ground truth labels of shape (n_query, n_classes)\n\n Returns:\n MSE loss between the prediction and the ground truth\n \"\"\"\n return self.loss_function(\n classification_scores,\n nn.functional.one_hot(\n query_labels, num_classes=self.prototypes.shape[0]\n ).float(),\n )\n" ]
[ [ "torch.nn.AdaptiveMaxPool2d", "torch.nn.Conv2d", "torch.nn.Flatten", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.functional.one_hot", "torch.nn.ReLU", "torch.nn.MSELoss" ] ]
xaviercucurull/FACIL
[ "41cb16211b2c086e6970500b6de5da5807495081" ]
[ "src/main_incremental.py" ]
[ "import os\nimport time\nimport torch\nimport argparse\nimport importlib\nimport numpy as np\nfrom functools import reduce\nimport pathlib\n\nimport utils\nimport approach\nfrom loggers.exp_logger import MultiLogger\nfrom datasets.data_loader import get_loaders\nfrom datasets.dataset_config import dataset_config\nfrom last_layer_analysis import last_layer_analysis\nfrom networks import tvmodels, allmodels, set_tvmodel_head_var\n\n\ndef main(argv=None):\n tstart = time.time()\n # Arguments\n parser = argparse.ArgumentParser(description='FACIL - Framework for Analysis of Class Incremental Learning')\n\n # miscellaneous args\n parser.add_argument('--gpu', type=int, default=0,\n help='GPU (default=%(default)s)')\n parser.add_argument('--results-path', type=str, default=str(pathlib.Path(__file__).parent.parent.joinpath('results')),\n help='Results path (default=%(default)s)')\n parser.add_argument('--exp-name', default=None, type=str,\n help='Experiment name (default=%(default)s)')\n parser.add_argument('--seed', type=int, default=0,\n help='Random seed (default=%(default)s)')\n parser.add_argument('--log', default=['disk'], type=str, choices=['disk', 'tensorboard'],\n help='Loggers used (disk, tensorboard) (default=%(default)s)', nargs='*', metavar=\"LOGGER\")\n parser.add_argument('--save-models', action='store_true',\n help='Save trained models (default=%(default)s)')\n parser.add_argument('--last-layer-analysis', action='store_true',\n help='Plot last layer analysis (default=%(default)s)')\n parser.add_argument('--no-cudnn-deterministic', action='store_true',\n help='Disable CUDNN deterministic (default=%(default)s)')\n # dataset args\n parser.add_argument('--datasets', default=['cifar100'], type=str, choices=list(dataset_config.keys()),\n help='Dataset or datasets used (default=%(default)s)', nargs='+', metavar=\"DATASET\")\n parser.add_argument('--num-workers', default=4, type=int, required=False,\n help='Number of subprocesses to use for dataloader (default=%(default)s)')\n parser.add_argument('--pin-memory', default=False, type=bool, required=False,\n help='Copy Tensors into CUDA pinned memory before returning them (default=%(default)s)')\n parser.add_argument('--batch-size', default=64, type=int, required=False,\n help='Number of samples per batch to load (default=%(default)s)')\n parser.add_argument('--num-tasks', default=4, type=int, required=False,\n help='Number of tasks per dataset (default=%(default)s)')\n parser.add_argument('--nc-first-task', default=None, type=int, required=False,\n help='Number of classes of the first task (default=%(default)s)')\n parser.add_argument('--use-valid-only', action='store_true',\n help='Use validation split instead of test (default=%(default)s)')\n parser.add_argument('--stop-at-task', default=0, type=int, required=False,\n help='Stop training after specified task (default=%(default)s)')\n # model args\n parser.add_argument('--network', default='resnet32', type=str, choices=allmodels,\n help='Network architecture used (default=%(default)s)', metavar=\"NETWORK\")\n parser.add_argument('--keep-existing-head', action='store_true',\n help='Disable removing classifier last layer (default=%(default)s)')\n parser.add_argument('--pretrained', action='store_true',\n help='Use pretrained backbone (default=%(default)s)')\n # training args\n parser.add_argument('--approach', default='finetuning', type=str, choices=approach.__all__,\n help='Learning approach used (default=%(default)s)', metavar=\"APPROACH\")\n parser.add_argument('--nepochs', default=200, type=int, required=False,\n help='Number of epochs per training session (default=%(default)s)')\n parser.add_argument('--lr', default=0.1, type=float, required=False,\n help='Starting learning rate (default=%(default)s)')\n parser.add_argument('--lr-min', default=1e-4, type=float, required=False,\n help='Minimum learning rate (default=%(default)s)')\n parser.add_argument('--lr-factor', default=3, type=float, required=False,\n help='Learning rate decreasing factor (default=%(default)s)')\n parser.add_argument('--lr-patience', default=5, type=int, required=False,\n help='Maximum patience to wait before decreasing learning rate (default=%(default)s)')\n parser.add_argument('--clipping', default=10000, type=float, required=False,\n help='Clip gradient norm (default=%(default)s)')\n parser.add_argument('--momentum', default=0.0, type=float, required=False,\n help='Momentum factor (default=%(default)s)')\n parser.add_argument('--weight-decay', default=0.0, type=float, required=False,\n help='Weight decay (L2 penalty) (default=%(default)s)')\n parser.add_argument('--warmup-nepochs', default=0, type=int, required=False,\n help='Number of warm-up epochs (default=%(default)s)')\n parser.add_argument('--warmup-lr-factor', default=1.0, type=float, required=False,\n help='Warm-up learning rate factor (default=%(default)s)')\n parser.add_argument('--multi-softmax', action='store_true',\n help='Apply separate softmax for each task (default=%(default)s)')\n parser.add_argument('--fix-bn', action='store_true',\n help='Fix batch normalization after first task (default=%(default)s)')\n parser.add_argument('--eval-on-train', action='store_true',\n help='Show train loss and accuracy (default=%(default)s)')\n # gridsearch args\n parser.add_argument('--gridsearch-tasks', default=-1, type=int,\n help='Number of tasks to apply GridSearch (-1: all tasks) (default=%(default)s)')\n\n # Args -- Incremental Learning Framework\n args, extra_args = parser.parse_known_args(argv)\n args.results_path = os.path.expanduser(args.results_path)\n base_kwargs = dict(nepochs=args.nepochs, lr=args.lr, lr_min=args.lr_min, lr_factor=args.lr_factor,\n lr_patience=args.lr_patience, clipgrad=args.clipping, momentum=args.momentum,\n wd=args.weight_decay, multi_softmax=args.multi_softmax, wu_nepochs=args.warmup_nepochs,\n wu_lr_factor=args.warmup_lr_factor, fix_bn=args.fix_bn, eval_on_train=args.eval_on_train)\n\n if args.no_cudnn_deterministic:\n print('WARNING: CUDNN Deterministic will be disabled.')\n utils.cudnn_deterministic = False\n\n utils.seed_everything(seed=args.seed)\n print('=' * 108)\n print('Arguments =')\n for arg in np.sort(list(vars(args).keys())):\n print('\\t' + arg + ':', getattr(args, arg))\n print('=' * 108)\n\n # Args -- CUDA\n if torch.cuda.is_available():\n torch.cuda.set_device(args.gpu)\n device = 'cuda'\n else:\n print('WARNING: [CUDA unavailable] Using CPU instead!')\n device = 'cpu'\n # Multiple gpus\n # if torch.cuda.device_count() > 1:\n # self.C = torch.nn.DataParallel(C)\n # self.C.to(self.device)\n ####################################################################################################################\n\n # Args -- Network\n from networks.network import LLL_Net\n if args.network in tvmodels: # torchvision models\n tvnet = getattr(importlib.import_module(name='torchvision.models'), args.network)\n if args.network == 'googlenet':\n init_model = tvnet(pretrained=args.pretrained, aux_logits=False)\n else:\n init_model = tvnet(pretrained=args.pretrained)\n set_tvmodel_head_var(init_model)\n else: # other models declared in networks package's init\n net = getattr(importlib.import_module(name='networks'), args.network)\n # WARNING: fixed to pretrained False for other model (non-torchvision)\n init_model = net(pretrained=False)\n\n # Args -- Continual Learning Approach\n from approach.incremental_learning import Inc_Learning_Appr\n Appr = getattr(importlib.import_module(name='approach.' + args.approach), 'Appr')\n assert issubclass(Appr, Inc_Learning_Appr)\n appr_args, extra_args = Appr.extra_parser(extra_args)\n print('Approach arguments =')\n for arg in np.sort(list(vars(appr_args).keys())):\n print('\\t' + arg + ':', getattr(appr_args, arg))\n print('=' * 108)\n\n # Args -- Exemplars Management\n from datasets.exemplars_dataset import ExemplarsDataset\n Appr_ExemplarsDataset = Appr.exemplars_dataset_class()\n if Appr_ExemplarsDataset:\n assert issubclass(Appr_ExemplarsDataset, ExemplarsDataset)\n appr_exemplars_dataset_args, extra_args = Appr_ExemplarsDataset.extra_parser(extra_args)\n print('Exemplars dataset arguments =')\n for arg in np.sort(list(vars(appr_exemplars_dataset_args).keys())):\n print('\\t' + arg + ':', getattr(appr_exemplars_dataset_args, arg))\n print('=' * 108)\n else:\n appr_exemplars_dataset_args = argparse.Namespace()\n\n # Args -- GridSearch\n if args.gridsearch_tasks > 0:\n from gridsearch import GridSearch\n gs_args, extra_args = GridSearch.extra_parser(extra_args)\n Appr_finetuning = getattr(importlib.import_module(name='approach.finetuning'), 'Appr')\n assert issubclass(Appr_finetuning, Inc_Learning_Appr)\n GridSearch_ExemplarsDataset = Appr.exemplars_dataset_class()\n print('GridSearch arguments =')\n for arg in np.sort(list(vars(gs_args).keys())):\n print('\\t' + arg + ':', getattr(gs_args, arg))\n print('=' * 108)\n\n assert len(extra_args) == 0, \"Unused args: {}\".format(' '.join(extra_args))\n ####################################################################################################################\n\n # Log all arguments\n full_exp_name = reduce((lambda x, y: x[0] + y[0]), args.datasets) if len(args.datasets) > 0 else args.datasets[0]\n full_exp_name += '_' + args.approach\n if args.exp_name is not None:\n full_exp_name += '_' + args.exp_name\n logger = MultiLogger(args.results_path, full_exp_name, loggers=args.log, save_models=args.save_models)\n logger.log_args(argparse.Namespace(**args.__dict__, **appr_args.__dict__, **appr_exemplars_dataset_args.__dict__))\n\n # Loaders\n utils.seed_everything(seed=args.seed)\n trn_loader, val_loader, tst_loader, taskcla = get_loaders(args.datasets, args.num_tasks, args.nc_first_task,\n args.batch_size, num_workers=args.num_workers,\n pin_memory=args.pin_memory)\n # Apply arguments for loaders\n if args.use_valid_only:\n tst_loader = val_loader\n max_task = len(taskcla) if args.stop_at_task == 0 else args.stop_at_task\n\n # Network and Approach instances\n utils.seed_everything(seed=args.seed)\n net = LLL_Net(init_model, remove_existing_head=not args.keep_existing_head)\n utils.seed_everything(seed=args.seed)\n # taking transformations and class indices from first train dataset\n first_train_ds = trn_loader[0].dataset\n transform, class_indices = first_train_ds.transform, first_train_ds.class_indices\n appr_kwargs = {**base_kwargs, **dict(logger=logger, **appr_args.__dict__)}\n if Appr_ExemplarsDataset:\n appr_kwargs['exemplars_dataset'] = Appr_ExemplarsDataset(transform, class_indices,\n is_img_dataset=first_train_ds.is_img_dataset,\n **appr_exemplars_dataset_args.__dict__)\n utils.seed_everything(seed=args.seed)\n appr = Appr(net, device, **appr_kwargs)\n\n # GridSearch\n if args.gridsearch_tasks > 0:\n ft_kwargs = {**base_kwargs, **dict(logger=logger,\n exemplars_dataset=GridSearch_ExemplarsDataset(transform, class_indices))}\n appr_ft = Appr_finetuning(net, device, **ft_kwargs)\n gridsearch = GridSearch(appr_ft, args.seed, gs_args.gridsearch_config, gs_args.gridsearch_acc_drop_thr,\n gs_args.gridsearch_hparam_decay, gs_args.gridsearch_max_num_searches)\n\n # Loop tasks\n print(taskcla)\n acc_taw = np.zeros((max_task, max_task))\n acc_tag = np.zeros((max_task, max_task))\n forg_taw = np.zeros((max_task, max_task))\n forg_tag = np.zeros((max_task, max_task))\n for t, (_, ncla) in enumerate(taskcla):\n # Early stop tasks if flag\n if t >= max_task:\n continue\n\n print('*' * 108)\n print('Task {:2d}'.format(t))\n print('*' * 108)\n\n # Add head for current task\n net.add_head(taskcla[t][1])\n net.to(device)\n\n # GridSearch\n if t < args.gridsearch_tasks:\n\n # Search for best finetuning learning rate -- Maximal Plasticity Search\n print('LR GridSearch')\n best_ft_acc, best_ft_lr = gridsearch.search_lr(appr.model, t, trn_loader[t], val_loader[t])\n # Apply to approach\n appr.lr = best_ft_lr\n gen_params = gridsearch.gs_config.get_params('general')\n for k, v in gen_params.items():\n if not isinstance(v, list):\n setattr(appr, k, v)\n\n # Search for best forgetting/intransigence tradeoff -- Stability Decay\n print('Trade-off GridSearch')\n best_tradeoff, tradeoff_name = gridsearch.search_tradeoff(args.approach, appr,\n t, trn_loader[t], val_loader[t], best_ft_acc)\n # Apply to approach\n if tradeoff_name is not None:\n setattr(appr, tradeoff_name, best_tradeoff)\n\n print('-' * 108)\n\n # Train\n appr.train(t, trn_loader[t], val_loader[t])\n print('-' * 108)\n\n # Test\n for u in range(t + 1):\n test_loss, acc_taw[t, u], acc_tag[t, u] = appr.eval(u, tst_loader[u])\n if u < t:\n forg_taw[t, u] = acc_taw[:t, u].max(0) - acc_taw[t, u]\n forg_tag[t, u] = acc_tag[:t, u].max(0) - acc_tag[t, u]\n print('>>> Test on task {:2d} : loss={:.3f} | TAw acc={:5.1f}%, forg={:5.1f}%'\n '| TAg acc={:5.1f}%, forg={:5.1f}% <<<'.format(u, test_loss,\n 100 * acc_taw[t, u], 100 * forg_taw[t, u],\n 100 * acc_tag[t, u], 100 * forg_tag[t, u]))\n logger.log_scalar(task=t, iter=u, name='loss', group='test', value=test_loss)\n logger.log_scalar(task=t, iter=u, name='acc_taw', group='test', value=100 * acc_taw[t, u])\n logger.log_scalar(task=t, iter=u, name='acc_tag', group='test', value=100 * acc_tag[t, u])\n logger.log_scalar(task=t, iter=u, name='forg_taw', group='test', value=100 * forg_taw[t, u])\n logger.log_scalar(task=t, iter=u, name='forg_tag', group='test', value=100 * forg_tag[t, u])\n\n # Save\n print('Save at ' + os.path.join(args.results_path, full_exp_name))\n logger.log_result(acc_taw, name=\"acc_taw\", step=t)\n logger.log_result(acc_tag, name=\"acc_tag\", step=t)\n logger.log_result(forg_taw, name=\"forg_taw\", step=t)\n logger.log_result(forg_tag, name=\"forg_tag\", step=t)\n logger.save_model(net.state_dict(), task=t)\n logger.log_result(acc_taw.sum(1) / np.tril(np.ones(acc_taw.shape[0])).sum(1), name=\"avg_accs_taw\", step=t)\n logger.log_result(acc_tag.sum(1) / np.tril(np.ones(acc_tag.shape[0])).sum(1), name=\"avg_accs_tag\", step=t)\n aux = np.tril(np.repeat([[tdata[1] for tdata in taskcla[:max_task]]], max_task, axis=0))\n logger.log_result((acc_taw * aux).sum(1) / aux.sum(1), name=\"wavg_accs_taw\", step=t)\n logger.log_result((acc_tag * aux).sum(1) / aux.sum(1), name=\"wavg_accs_tag\", step=t)\n\n # Last layer analysis\n if args.last_layer_analysis:\n weights, biases = last_layer_analysis(net.heads, t, taskcla, y_lim=True)\n logger.log_figure(name='weights', iter=t, figure=weights)\n logger.log_figure(name='bias', iter=t, figure=biases)\n\n # Output sorted weights and biases\n weights, biases = last_layer_analysis(net.heads, t, taskcla, y_lim=True, sort_weights=True)\n logger.log_figure(name='weights', iter=t, figure=weights)\n logger.log_figure(name='bias', iter=t, figure=biases)\n # Print Summary\n utils.print_summary(acc_taw, acc_tag, forg_taw, forg_tag)\n print('[Elapsed time = {:.1f} h]'.format((time.time() - tstart) / (60 * 60)))\n print('Done!')\n\n return acc_taw, acc_tag, forg_taw, forg_tag, logger.exp_path\n ####################################################################################################################\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.cuda.set_device", "numpy.ones", "torch.cuda.is_available", "numpy.repeat", "numpy.zeros" ] ]
garrettkatz/poppy-muffin
[ "43ac00e6a151346ca7df005c009fcb762f16bd35" ]
[ "pybullet/tasks/pick_and_place/eager_rl.py" ]
[ "import pickle as pk\nimport numpy as np\nimport torch as tr\nimport sys, time\nsys.path.append('../../envs')\nfrom blocks_world import BlocksWorldEnv, MovementPenaltyTracker\nfrom abstract_machine import make_abstract_machine, memorize_problem\nfrom nvm import virtualize\nimport neural_virtual_machine as nv\nimport block_stacking_problem as bp\nfrom restack import compute_symbolic_reward\nfrom failure_case import find_failure_case\n\ndef get_rvm_timesteps(rvm, problem, simulate=False, dbg=False):\n # run nvm for time-steps\n rvm.reset({\"jnt\": \"rest\"})\n rvm.mount(\"main\") # sets tick counter to 0\n memorize_problem(rvm, problem)\n if dbg: rvm.dbg()\n while True:\n done = rvm.tick()\n if dbg: rvm.dbg()\n if simulate and rvm.registers[\"jnt\"].content != rvm.registers[\"jnt\"].old_content:\n position = rvm.ik[rvm.registers[\"jnt\"].content]\n rvm.env.goto_position(position)\n if done: break\n return rvm.tick_counter\n\ndef run_nvm(nvm, batch_time_steps, W_init, v_init, dbg=False):\n nvm.net.clear_ticks()\n for t in range(max(batch_time_steps)):\n nvm.net.tick(W_init, v_init)\n if dbg:\n nvm.pullback(t)\n nvm.dbg()\n input('.')\n return nvm.net.weights, nvm.net.activities\n\nif __name__ == \"__main__\":\n \n tr.set_printoptions(precision=8, sci_mode=False, linewidth=1000)\n \n showresults = True\n run_exp = False\n\n results_file = \"erl.pkl\"\n\n detach_gates = True\n sigma = 0.0174 # stdev in random angular sampling (radians)\n\n batch_size = 1\n num_episodes = 8\n num_batch_iters = 3\n \n num_descent_iters = 3\n descent_error_tol = 6*(sigma/10)**2\n descent_lr = 1.0\n optimizer = tr.optim.SGD\n # descent_lr = 0.001\n # optimizer = tr.optim.Adam\n\n max_levels = 3\n num_blocks = 5\n num_bases = 5\n \n # prob_freq = \"batch\"\n prob_freq = \"once\"\n only_fails = True\n \n if run_exp:\n\n domain = bp.BlockStackingDomain(num_blocks, num_bases, max_levels)\n mp_tracker = MovementPenaltyTracker(period=5)\n env = BlocksWorldEnv(show=False, step_hook=mp_tracker.step_hook)\n \n # set up rvm and virtualize\n rvm = make_abstract_machine(env, domain)\n rvm.reset({\"jnt\": \"rest\"})\n rvm.mount(\"main\")\n \n nvm = virtualize(rvm, σ=nv.default_activator, detach_gates=detach_gates)\n nvm.mount(\"main\")\n W_init = {name: {0: nvm.net.batchify_weights(conn.W)} for name, conn in nvm.connections.items()}\n v_init = {name: {0: nvm.net.batchify_activities(reg.content)} for name, reg in nvm.registers.items()}\n v_init[\"jnt\"][0] = nvm.net.batchify_activities(tr.tensor(rvm.ik[\"rest\"]).float())\n \n # set up trainable connections\n inputable = (\"obj\",\"loc\",\"goal\")\n # trainable = [\"ik\", \"to\", \"tc\", \"po\", \"pc\", \"right\", \"above\", \"base\"]\n trainable = tuple(set(nvm.connections.keys()) - set(nvm.net.plastic_connections + inputable))\n train_params = {name: W_init[name][0] for name in trainable}\n for p in train_params.values(): p.requires_grad_()\n \n # save original values for comparison\n orig_params = {name: train_params[name].clone().detach() for name in trainable}\n \n # size up for problem instance batches\n for name in inputable: W_init[name][0] = tr.zeros((batch_size,) + W_init[name][0].shape[1:])\n \n if prob_freq == \"once\":\n if only_fails:\n problems, sym_rewards = zip(*[find_failure_case(env, domain, sym_cutoff=-2) for b in range(batch_size)])\n print(\" sym rewards: %s\" % str(sym_rewards))\n else: problems = [domain.random_problem_instance() for b in range(batch_size)]\n\n results = []\n for batch_iter in range(num_batch_iters):\n batch_iter_counter = time.perf_counter()\n \n # setup weights for problem instances\n if prob_freq == \"batch\":\n if only_fails:\n problems, sym_rewards = zip(*[find_failure_case(env, domain, sym_cutoff=-2) for b in range(batch_size)])\n print(\" sym rewards: %s\" % str(sym_rewards))\n else: problems = [domain.random_problem_instance() for b in range(batch_size)]\n\n batch_weights = {name: list() for name in inputable}\n batch_time_steps = []\n for b, problem in enumerate(problems):\n batch_time_steps.append(get_rvm_timesteps(rvm, problem, simulate=False, dbg=False))\n memorize_problem(nvm, problem)\n for name in inputable: batch_weights[name].append(nvm.connections[name].W)\n for name in inputable:\n W_init[name][0] = tr.stack(batch_weights[name]).clone().detach()\n \n # run nvm on instances\n perf_counter = time.perf_counter()\n with tr.no_grad():\n W, v = run_nvm(nvm, batch_time_steps, W_init, v_init)\n joint_output, time_index = {}, {}\n for b, num_time_steps in enumerate(batch_time_steps):\n joint_output[b], time_index[b] = [], []\n for t in range(2, num_time_steps):\n if nvm.decode(\"tar\", t-2, b) != nvm.decode(\"tar\", t-1, b):\n joint_output[b].append(nvm.net.activities[\"jnt\"][t][b,:,0])\n time_index[b].append(t)\n print(\" NVM run took %fs\" % (time.perf_counter() - perf_counter))\n \n # generate random nearby actions\n perf_counter = time.perf_counter()\n positions = {b: {e: list() for e in range(num_episodes)} for b in range(batch_size)}\n for b in range(batch_size):\n for k, mu in enumerate(joint_output[b]):\n dist = tr.distributions.normal.Normal(mu, sigma)\n for e in range(num_episodes):\n position = dist.sample() if e > 0 else mu # noiseless first episode\n positions[b][e].append(position)\n num_motions = [len(positions[b][0]) for b in range(batch_size)]\n print(\" actions took %fs (%d-%d motions)\" % (time.perf_counter() - perf_counter, min(num_motions), max(num_motions)))\n \n # simulate to get rewards\n perf_counter = time.perf_counter()\n rewards, sym = tuple(np.zeros((batch_size, num_episodes)) for _ in [0,1])\n for b, problem in enumerate(problems):\n for e in range(num_episodes):\n env.reset()\n env.load_blocks(problem.thing_below)\n for position in positions[b][e]:\n mp_tracker.reset()\n env.goto_position(position.detach().numpy(), speed=1.5)\n rewards[b,e] -= mp_tracker.penalty\n sym[b,e] = compute_symbolic_reward(env, problem.goal_thing_below)\n rewards[b,e] += sym[b,e]\n # print(\" %d,%d: %f\" % (b,e,rewards[b,e]))\n avg_reward = rewards[:,0].mean() # noiseless episodes\n opt_index = rewards.argmax(axis=1)\n print(\" simulation rewards took %fs\" % (time.perf_counter() - perf_counter))\n print(\" %d problems with better noisy episodes\" % (opt_index > 0).sum())\n \n final_batch = batch_iter+1 == num_batch_iters\n all_opt = (opt_index == 0).all()\n if final_batch or all_opt:\n print(\" batch iter %d took %fs, avg reward = %f\" % (batch_iter, time.perf_counter() - batch_iter_counter, avg_reward))\n results.append((avg_reward, rewards, {}, 0, []))\n with open(results_file, \"wb\") as f: pk.dump(results, f)\n if final_batch: break\n if all_opt: continue\n \n # descent on error from optimal\n descent_counter = time.perf_counter()\n targets = [tr.stack(positions[b][opt_index[b]]).clone().detach() for b in range(batch_size)]\n W_start = {name: W_init[name][0].clone().detach() for name in trainable}\n opt = optimizer(train_params.values(), lr=descent_lr)\n \n descent_log = []\n for descent_iter in range(num_descent_iters):\n descent_iter_counter = time.perf_counter()\n \n run_counter = time.perf_counter()\n W, v = run_nvm(nvm, batch_time_steps, W_init, v_init)\n outputs = [\n tr.stack([v[\"jnt\"][t][b,:,0] for t in time_index[b]])\n for b in range(batch_size)]\n # print(\" %d NVM run took %f\" % (descent_iter, time.perf_counter() - run_counter))\n \n objective_counter = time.perf_counter()\n objective = tr.sum(tr.stack([tr.sum((outputs[b] - targets[b])**2) for b in range(batch_size)]))\n objective /= sum(batch_time_steps)\n # print(\" %d L=%f vs %f, took %f\" % (descent_iter, objective, descent_error_tol, time.perf_counter() - objective_counter))\n \n backward_counter = time.perf_counter()\n objective.backward()\n # print(\" %d L grad took %f\" % (descent_iter, time.perf_counter() - backward_counter))\n \n update_counter = time.perf_counter()\n grad_sq_norm = tr.sum(tr.stack([tr.tensor(0.) if p.grad is None else tr.sum(p.grad**2) for p in train_params.values()]))\n descent_log.append((objective.item(), grad_sq_norm.item()))\n if objective > descent_error_tol:\n\n if all([p.grad is None for p in train_params.values()]): raise ValueError(\"All grads none!\")\n # for name, p in train_params.items():\n # if p.grad is None: continue\n # p.data -= p.grad * descent_lr\n # p.grad *= 0\n opt.step()\n opt.zero_grad()\n print(\" %d update took %f\" % (descent_iter, time.perf_counter() - update_counter))\n \n print(\" descent iter %d took %fs, L = %f, |grad L|**2 = %f\" % (\n descent_iter, time.perf_counter() - descent_iter_counter, objective, grad_sq_norm))\n \n if objective <= descent_error_tol: break\n \n sq_delta = tr.sum(tr.stack([tr.sum((W_init[name][0] - W_start[name][0])**2) for name in trainable]))\n print(\" descent took %fs, |delta W|**2 = %f, |v-th|**2 = %f\" % (\n time.perf_counter() - descent_counter, sq_delta, objective))\n \n print(\" batch iter %d took %fs, avg reward = %f\" % (batch_iter, time.perf_counter() - batch_iter_counter, avg_reward))\n delta = {name: (orig_params[name] - train_params[name]).abs().max().item() for name in trainable}\n print(\" delta from 0 = %f\" % max(delta.values()))\n \n results.append((avg_reward, rewards, delta, sq_delta, descent_log))\n with open(results_file, \"wb\") as f: pk.dump(results, f)\n \n if showresults:\n import matplotlib.pyplot as pt\n with open(results_file, \"rb\") as f: results = pk.load(f)\n x_avg_rewards, y_avg_rewards = [], []\n x_rewards, y_rewards = [], []\n x_descent, y_descent = [], []\n for avg_reward, rewards, delta, sq_delta, descent_log in results:\n x_avg_rewards.append(len(x_descent))\n y_avg_rewards.append(avg_reward)\n x_rewards += list((np.random.rand(*rewards.shape)*.5 + len(x_descent)).flat)\n y_rewards += list(rewards.flat)\n for objective, grad_sq_norm in descent_log:\n x_descent.append(len(x_descent))\n # y_descent.append(objective)\n y_descent.append(grad_sq_norm)\n pt.subplot(2,1,1)\n pt.plot(x_rewards, y_rewards, '.', c=(.75,)*3)\n pt.plot(x_avg_rewards, y_avg_rewards, 'ko-')\n pt.xlim([-1, x_avg_rewards[-1]+1])\n pt.subplot(2,1,2)\n pt.plot(x_descent, y_descent, 'k-')\n pt.xlim([-1, x_avg_rewards[-1]+1])\n pt.show()\n \n" ]
[ [ "torch.zeros", "torch.set_printoptions", "torch.sum", "torch.tensor", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplot", "torch.no_grad", "numpy.random.rand", "torch.stack", "torch.distributions.normal.Normal", "matplotlib.pyplot.show", "numpy.zeros" ] ]
harshal306/radiometric_normalization
[ "5c3970a01954dfcb19938312b0f64b811f36deff" ]
[ "radiometric_normalization/wrappers/display_wrapper.py" ]
[ "'''\nCopyright 2015 Planet Labs, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport numpy\nfrom osgeo import gdal\n\nfrom radiometric_normalization import display\nfrom radiometric_normalization import gimage\n\n\ndef create_pixel_plots(candidate_path, reference_path, base_name,\n last_band_alpha=False, limits=None, custom_alpha=None):\n c_ds, c_alpha, c_band_count = _open_image_and_get_info(\n candidate_path, last_band_alpha)\n r_ds, r_alpha, r_band_count = _open_image_and_get_info(\n reference_path, last_band_alpha)\n\n _assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count)\n\n if custom_alpha != None:\n combined_alpha = custom_alpha\n else:\n combined_alpha = numpy.logical_and(c_alpha, r_alpha)\n valid_pixels = numpy.nonzero(combined_alpha)\n\n for band_no in range(1, c_band_count + 1):\n c_band = gimage.read_single_band(c_ds, band_no)\n r_band = gimage.read_single_band(r_ds, band_no)\n file_name = '{}_{}.png'.format(base_name, band_no)\n display.plot_pixels(file_name, c_band[valid_pixels],\n r_band[valid_pixels], limits)\n\n\ndef create_all_bands_histograms(candidate_path, reference_path, base_name,\n last_band_alpha=False,\n color_order=['b', 'g', 'r', 'y'],\n x_limits=None, y_limits=None):\n c_gimg = gimage.load(candidate_path, last_band_alpha=last_band_alpha)\n r_gimg = gimage.load(reference_path, last_band_alpha=last_band_alpha)\n\n gimage.check_comparable([c_gimg, r_gimg])\n\n combined_alpha = numpy.logical_and(c_gimg.alpha, r_gimg.alpha)\n valid_pixels = numpy.nonzero(combined_alpha)\n\n file_name = '{}_histograms.png'.format(base_name)\n display.plot_histograms(\n file_name,\n [c_band[valid_pixels] for c_band in c_gimg.bands],\n [r_band[valid_pixels] for r_band in r_gimg.bands],\n color_order, x_limits, y_limits)\n\n\ndef _open_image_and_get_info(path, last_band_alpha):\n gdal_ds = gdal.Open(path)\n alpha_band, band_count = gimage.read_alpha_and_band_count(\n gdal_ds, last_band_alpha=last_band_alpha)\n return gdal_ds, alpha_band, band_count\n\n\ndef _assert_consistent(c_alpha, r_alpha, c_band_count, r_band_count):\n assert r_band_count == c_band_count\n assert r_alpha.shape == c_alpha.shape\n" ]
[ [ "numpy.logical_and", "numpy.nonzero" ] ]
r1cc4rdo/marble_solitaire
[ "1b465a232bdd81a3c0c9f9cd29c83bc7367d25f6" ]
[ "search.py" ]
[ "import numpy as np\n\nfrom board_io import load_board\n\npowers_of_2 = None # used as bit masks for each bit in the board representation\nboard_template = None # see board_io. -1 on unused grid elements, 0 empty, 1 filled with marble/peg\nmove_bit_masks = None # has 1s on the corresponding move representation bits\nvalid_test_fwd = None # (bit_board & move_masks) ^ valid_test is 0 iff valid i.e. src, mid, dest == 1, 1, 0\nvalid_test_bwd = None # (bit_board & move_masks) ^ valid_test is 0 iff valid i.e. src, mid, dest == 0, 0, 1\nshuffle_bit_masks = None # powers_of_2 * (bit_board & shuffle_mask != 0) is an rotation/reflection equivalent board\n\n\ndef generate_moves_and_shuffles(template):\n \"\"\"\n Given an initial board representation, returns the list of valid moves' indexes and the permutations\n required to generate equivalent board positions (equivalent up to an arbitrary rotation and flip).\n Valid moves are returned as list of triples of indexes in the bit representation of the board, specifying\n respectively the index of the starting, intermediate and final location of a move.\n \"\"\"\n bit_index = template.copy()\n valid_rc = np.argwhere(bit_index >= 0)\n bit_index[valid_rc[:, 0], valid_rc[:, 1]] = np.arange(len(valid_rc)) # from location to corresponding bit index\n\n valid_moves = [] # list of (start, intermediate, end) indexes\n for displacement in ((0, -2), (-2, 0), (0, 2), (2, 0)): # left, up, right, down\n for source, destination in zip(valid_rc, valid_rc + displacement):\n if destination.tolist() in valid_rc.tolist(): # tolist() to disable broadcasting\n (sr, sc), (ir, ic), (dr, dc) = source, (source + destination) // 2, destination\n valid_moves.append((bit_index[sr, sc], bit_index[ir, ic], bit_index[dr, dc]))\n\n shuffles = [] # permutations to obtain equivalent boards\n flipped_index = np.fliplr(bit_index) # mirrored\n for _ in range(4): # rotated 90, 180, 270 degrees\n shuffles.extend((bit_index, flipped_index))\n bit_index, flipped_index = map(np.rot90, (bit_index, flipped_index))\n\n return valid_moves, tuple(tuple(s[s >= 0]) for s in shuffles)\n\n\ndef initialize_for_board(board_name):\n \"\"\"\n Initializes global variables powers_of_2, board_template, move_bit_masks, valid_test_fwd, valid_test_bwd and\n shuffle_bit_masks for a given board type. The variables are shared across all functions in this module.\n See the comments at their declaration site for a detailed description of each.\n \"\"\"\n global powers_of_2, board_template, move_bit_masks, valid_test_fwd, valid_test_bwd, shuffle_bit_masks\n\n board_template = np.array(load_board(board_name))\n valid_moves, shuffles = generate_moves_and_shuffles(board_template)\n\n valid_moves_bitmasks = np.power(2, valid_moves)\n move_bit_masks = np.sum(valid_moves_bitmasks, axis=1)\n valid_test_fwd = np.sum(valid_moves_bitmasks[:, 0:2], axis=1)\n valid_test_bwd = valid_moves_bitmasks[:, 2]\n\n shuffle_bit_masks = np.power(2, shuffles)\n powers_of_2 = 2 ** np.arange(len(shuffles[0]))\n\n return board_to_int(board_template)\n\n\ndef board_to_int(board):\n return np.sum(powers_of_2[board[board >= 0] > 0])\n\n\ndef int_to_board(bit_board):\n board = np.copy(board_template)\n board[board >= 0] = (bit_board & powers_of_2) > 0\n return board\n\n\ndef parents(bit_boards):\n return next_boards(bit_boards, valid_test_bwd)\n\n\ndef children(bit_boards):\n return next_boards(bit_boards, valid_test_fwd)\n\n\ndef next_boards(bit_boards, valid_test):\n \"\"\"\n Computes the board configurations that can be reached through valid forward and backward moves.\n A forward move is the standard jump to remove mechanic; backward is the opposite process.\n \"\"\"\n local_move_mask = move_bit_masks\n all_next_boards = set()\n for bit_board in bit_boards:\n valid = ((bit_board & local_move_mask) ^ valid_test) == 0\n next_bit_boards = bit_board ^ local_move_mask[valid]\n all_next_boards.update(next_bit_boards)\n return all_next_boards\n\n\ndef equivalent_boards(bit_boards, include_self=True):\n\n if not isinstance(bit_boards, set):\n bit_boards = {bit_boards}\n shuffle_masks = shuffle_bit_masks if include_self else shuffle_bit_masks[1:]\n boards_bits = ((bit_board & powers_of_2) != 0 for bit_board in bit_boards)\n return {np.sum(shuffle_mask[board_bits]) for board_bits in boards_bits for shuffle_mask in shuffle_masks}\n\n\ndef unique_boards(bit_boards):\n\n unique_bit_boards = set()\n while bit_boards:\n bit_board = bit_boards.pop()\n unique_bit_boards.add(bit_board)\n bit_boards -= equivalent_boards(bit_board, include_self=False)\n return unique_bit_boards\n\n\ndef canonical(bit_boards):\n if not isinstance(bit_boards, set):\n bit_boards = {bit_boards}\n\n # compute min of equivalent boards\n\n # return" ]
[ [ "numpy.power", "numpy.fliplr", "numpy.argwhere", "numpy.copy", "numpy.sum" ] ]
RounakPython/reconcile-a-report-using-pandas
[ "b3308236cb7057dfb6b78adc8192b2f221899890" ]
[ "code.py" ]
[ "# --------------\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt \r\n\r\ndf = pd.read_csv(path)\r\n\r\ndf['state'] = df['state'].apply(lambda x:x.lower())\r\ndf['total'] = df['Jan'] + df['Feb'] + df['Mar']\r\n\r\nsum_row = df[['Jan', 'Feb', 'Mar', 'total']].sum()\r\nprint(sum_row)\r\n\r\ndf_final = df.append(sum_row, ignore_index=True)\n\n\n# --------------\nimport requests\n\n# Code starts here\nurl = 'https://en.wikipedia.org/wiki/List_of_U.S._state_abbreviations'\n\nresponse = requests.get(url)\ndf1 = pd.read_html(response.content)[0]\ndf1 = df1.iloc[11:, :]\ndf1 = df1.rename(columns=df1.iloc[0, :]).iloc[1:, :]\n\ndf1['United States of America'] = df1['United States of America'].apply(lambda x : x.replace(' ','')).astype(object)\n\n# Code ends here\n\n\n# --------------\ndf1['United States of America'] = df1['United States of America'].astype(str).apply(lambda x: x.lower())\ndf1['US'] = df1['US'].astype(str)\n\n# Code starts here\n\nmapping = df1.set_index('United States of America')['US'].to_dict()\ndf_final.insert(6, 'abbr', np.nan)\ndf_final['abbr'] = df_final['state'].map(mapping)\nprint(df_final.head(15))\n\n# Code ends here\n\n\n# --------------\n# Code stars here\n#df_final[df_final['state']=='mississipi']['abbr'].replace(np.nan, 'MS', inplace=True)\n#df_final[df_final['state']=='tenessee']['abbr'].replace(np.nan, 'TN', inplace=True)\n\ndf_mississipi = df_final[df_final['state'] == 'mississipi'].replace(np.nan, 'MS')\n\ndf_tenessee = df_final[df_final['state'] == 'tenessee'].replace(np.nan, 'TN')\n\n\n# replace the final_df\ndf_final.replace(df_final.iloc[6], df_mississipi, inplace=True)\ndf_final.replace(df_final.iloc[10], df_tenessee, inplace=True)\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\ndf_sub = df_final.groupby(['abbr'])['Jan', 'Feb', 'Mar', 'total'].sum()\n\nformatted_df = df_sub.applymap(lambda x:\"${:,.0f}\".format(x))\n\n# Code ends here\n\n\n# --------------\n# Code starts here\n\nsum_row = df_sub[['Jan', 'Feb', 'Mar', 'total']].sum()\ndf_sub_sum = pd.DataFrame(data=sum_row).T\n\ndf_sub_sum = df_sub_sum.applymap(lambda x: \"${:,.0f}\".format(x))\n\nfinal_table = formatted_df.append(df_sub_sum)\nprint(final_table)\n\nfinal_table = final_table.rename(index={0: 'Total'})\nprint(final_table)\n# Code ends here\n\n\n# --------------\n# Code starts here\n\ndf_sub['total'] = df_sub['Jan'] + df_sub['Feb'] + df_sub['Mar']\n#df_sub.head()\n#dict_total = df_sub['total'].value_counts().to_dict()\n\ndf_sub['total'].plot(kind= 'pie')\n\n#plt.figure(figsize=(10,10))\n#plt.pie(dict_total.values(), dict_total.keys(), autopct='%1.1f%%')\n#plt.axis('equal')\n# Code ends here\n\n\n" ]
[ [ "pandas.read_csv", "pandas.read_html", "pandas.DataFrame" ] ]
priyald17/emissions-assumptions
[ "3159182b84deaaa086af56c13a08dae6a510c0ac" ]
[ "data/date_helpers.py" ]
[ "#/usr/bin/env python3\n\n###############################################################################\n# Helper functions for managing dates and time zones.\n###############################################################################\n\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport pytz\nimport numpy as np\n\ndef get_df_dates_to_utc(df):\n '''Converts datetime index of data frame from Eastern to UTC.\n\n Note: pytz's localization function is a bit faulty during the EDT --> EST\n daylight savings transition (the time zone changes at the wrong hour), so\n this function is best-suited for situations where some off-by-one errors \n are not a problem during the daylight savings transition day.\n If off-by-one errors matter, use get_df_dates_tz_to_utc.\n\n Args:\n df: Pandas data frame with naive datetime index in eastern time.\n\n Returns:\n Input data frame with datetime index converted to UTC.\n '''\n\n # Ensure we don't modify the passed-in df\n df = df.copy()\n\n # Add time zone to dates\n eastern = pytz.timezone('US/Eastern')\n df['with_tz'] = df.index.map(lambda x: eastern.localize(x))\n \n # Convert to UTC\n df['DATE_UTC'] = df['with_tz'].map(lambda x: x.astimezone(pytz.utc))\n \n # Reorganize data frame\n df.set_index('DATE_UTC', inplace=True)\n df.drop('with_tz', axis=1, inplace=True)\n return df \n\ndef get_df_dates_tz_to_utc(df):\n '''Creates version of data frame with datetime index in UTC.\n\n Note: This function was developed to fix some issues with pytz's default\n localization. To use it, you must first create a column in the data frame\n with time zone information.\n\n Args:\n df: Pandas data frame with index containing naive datetimes\n and a 'tz' column indicating the time zone (EST or EDT)\n\n Returns:\n Input data frame with datetime index converted to UTC.\n '''\n\n # Ensure we don't modify the passed-in df\n df = df.copy()\n \n # Make date a normal column so we can add tzinfo\n df.index.name = 'DATE'\n df.reset_index(inplace=True)\n \n # Add time zone to dates\n get_offset = lambda tz: pytz.FixedOffset(-300 if tz == 'EST' else -240)\n df['with_tz'] = df.apply(lambda row: get_offset(row['tz']).localize(row['DATE']),\n axis=1)\n \n # Convert to UTC\n df['DATE_UTC'] = df['with_tz'].map(lambda x: x.astimezone(pytz.utc))\n\n # Reorganize data frame\n df.set_index('DATE_UTC', inplace=True)\n df.drop(['DATE', 'with_tz'], axis=1, inplace=True)\n return df\n\n\ndef get_tz_name(date_val, hr_val):\n '''Get time zone given date and hour.\n\n Note: This function was created to deal with some bugs in default \n pytz localization.\n\n Args:\n date_val: Datetime with correct year, month, and day values.\n hr_val: Hour value.\n\n Returns:\n Name of correct time zone (EST or EDT).\n '''\n\n # Get previous hour value, giving user the option to store fractional\n # hours when hour is ambiguous during EDT --> EST transition.\n # E.g. during the EDT --> EST transition, there are two 1ams.\n # For 1am EDT (stored as hr_val = 1), prev_hour = 0.\n # For 1am EST (stored as hr_val = 1.5), prev_hour = 1.\n prev_hour = max(0, int(np.floor(hr_val-0.5)))\n\n # Localize and get time zones for (prev_hour):59 and (curr_hour):00\n dt_59 = datetime(date_val.year, date_val.month, date_val.day,\n hour=prev_hour, minute=59)\n dt_00 = dt_59 + timedelta(minutes=1) \n \n eastern = pytz.timezone('US/Eastern')\n dt_minus_one = eastern.localize(dt_59)\n dt_orig_hr = eastern.localize(dt_00)\n\n minus_tz = dt_minus_one.tzinfo.tzname(dt_minus_one)\n orig_tz = dt_orig_hr.tzinfo.tzname(dt_orig_hr)\n \n # Figure out actual time zone\n if (minus_tz != orig_tz and dt_orig_hr.hour == 3):\n # If time zone transitioned and it's 3am, we're in EST --> EDT.\n # The EDT localization is correct.\n return orig_tz\n else:\n # In EDT --> EST case, the (prev_hour):59 localization is correct.\n # If no transition, we can return either tz.\n return minus_tz \n\nmonth_to_season = ['winter'] * 3 + ['trans'] + ['summer'] * 5 + ['trans'] + ['winter'] * 2\ndef add_season(df):\n '''Given DataFrame with datetime index, label season.\n\n Args: \n df: DataFrame with datetime index\n\n Returns:\n Copy of df with season column added\n '''\n df_out = pd.DataFrame(df)\n df_out['season'] = df_out.index.map(lambda x: month_to_season[x.month - 1])\n return df_out" ]
[ [ "numpy.floor", "pandas.DataFrame" ] ]
Tord-Zhang/DPT
[ "0fa1b6e65ce449c6576eca46492f4b84245e2022" ]
[ "hubconf.py" ]
[ "dependencies = [\"torch\"]\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport cv2\nimport util.io\nfrom torchvision.transforms import Compose\nfrom dpt.models import DPTDepthModel\nfrom dpt.transforms import Resize, NormalizeImage, PrepareForNet\n\ndef DPT(pretrained=True, model_type=\"dpt_hybrid\", optimize=False):\n \n default_models = {\n \"dpt_large\": \"dpt_large-midas-2f21e586.pt\",\n \"dpt_hybrid\": \"dpt_hybrid-midas-501f0c75.pt\",\n }\n\n if model_type not in default_models.keys():\n raise ValueError(\"Only support model type dpt_large or dpt_hybrid, dpt_hybrid for default setting\")\n \n state_dict = None\n if pretrained:\n checkpoint = (\n \"https://github.com/Tord-Zhang/DPT/releases/download/torchhub/{}\".format(\n default_models[model_type]\n )\n )\n state_dict = torch.hub.load_state_dict_from_url(\n checkpoint, map_location=torch.device(\"cpu\"), progress=True, check_hash=True\n )\n\n if model_type == \"dpt_large\": # DPT-Large\n model = DPTDepthModel(\n state_dict=state_dict,\n backbone=\"vitl16_384\",\n non_negative=True,\n enable_attention_hooks=False,\n )\n elif model_type == \"dpt_hybrid\": # DPT-Hybrid\n model = DPTDepthModel(\n state_dict=state_dict,\n backbone=\"vitb_rn50_384\",\n non_negative=True,\n enable_attention_hooks=False,\n )\n \n model.eval()\n return model\n\n\ndef transforms(model_type=\"dpt_hybrid\"):\n import cv2\n from torchvision.transforms import Compose\n from dpt.models import DPTDepthModel\n from dpt.midas_net import MidasNet_large\n from dpt.transforms import Resize, NormalizeImage, PrepareForNet\n\n\n if model_type == \"dpt_large\": # DPT-Large\n normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n elif model_type == \"dpt_hybrid\": # DPT-Hybrid\n normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n else:\n raise ValueError(\"Only support model type dpt_large or dpt_hybrid, dpt_hybrid for default setting\")\n\n transform = Compose(\n [\n Resize(\n 384,\n 384,\n resize_target=None,\n keep_aspect_ratio=True,\n ensure_multiple_of=32,\n resize_method=\"minimal\",\n image_interpolation_method=cv2.INTER_CUBIC,\n ),\n normalization,\n PrepareForNet(),\n ]\n )\n\n return transform\n\n\ndef read_image():\n return util.io.read_image\n" ]
[ [ "torch.device" ] ]
egracheva/SMILES-X
[ "e2ec7b070b491ed9eb204685b790219da6c46377" ]
[ "SMILESX/inference.py" ]
[ "import numpy as np\nimport pandas as pd\nimport os\n\nfrom rdkit import Chem\n\nfrom keras.models import load_model\nfrom keras import backend as K\nfrom keras import metrics\nimport tensorflow as tf\n\nfrom SMILESX import utils, model, token, augm\n\n##\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\nconfig.log_device_placement = True # to log device placement (on which device the operation ran)\nsess = tf.Session(config=config)\nK.set_session(sess) # set this TensorFlow session as the default session for Keras\n\n## Inference on the SMILESX predictions\n# smiles_list: targeted SMILES list for property inference (Default: ['CC','CCC','C=O'])\n# data_name: dataset's name\n# data_units: property's SI units\n# k_fold_number: number of k-folds used for inference\n# augmentation: SMILES's augmentation (Default: False)\n# outdir: directory for outputs (plots + .txt files) -> 'Inference/'+'{}/{}/'.format(data_name,p_dir_temp) is then created\n# returns:\n# Array of SMILES with their inferred property (mean, standard deviation) from models ensembling\ndef Inference(data_name, \n smiles_list = ['CC','CCC','C=O'], \n data_units = '',\n k_fold_number = 8,\n augmentation = False, \n outdir = \"../data/\"):\n \n if augmentation:\n p_dir_temp = 'Augm'\n else:\n p_dir_temp = 'Can'\n \n input_dir = outdir+'Main/'+'{}/{}/'.format(data_name,p_dir_temp)\n save_dir = outdir+'Inference/'+'{}/{}/'.format(data_name,p_dir_temp)\n os.makedirs(save_dir, exist_ok=True)\n \n print(\"***SMILES_X for inference starts...***\\n\\n\")\n np.random.seed(seed=123)\n seed_list = np.random.randint(int(1e6), size = k_fold_number).tolist()\n \n print(\"***Checking the SMILES list for inference***\\n\")\n smiles_checked = list()\n smiles_rejected = list()\n for ismiles in smiles_list:\n mol_tmp = Chem.MolFromSmiles(ismiles)\n if mol_tmp != None:\n smiles_can = Chem.MolToSmiles(mol_tmp)\n smiles_checked.append(smiles_can)\n else:\n smiles_rejected.append(ismiles)\n \n if len(smiles_rejected) > 0:\n with open(save_dir+'rejected_smiles.txt','w') as f:\n for ismiles in smiles_rejected:\n f.write(\"%s\\n\" % ismiles)\n \n if len(smiles_checked) == 0:\n print(\"***Process of inference automatically aborted!***\")\n print(\"The provided SMILES are all incorrect and could not be verified via RDKit.\")\n return\n \n smiles_x = np.array(smiles_checked)\n smiles_y = np.array([[np.nan]*len(smiles_checked)]).flatten()\n \n # data augmentation or not\n if augmentation == True:\n print(\"***Data augmentation.***\\n\")\n canonical = False\n rotation = True\n else:\n print(\"***No data augmentation has been required.***\\n\")\n canonical = True\n rotation = False\n\n smiles_x_enum, smiles_x_enum_card, smiles_y_enum = \\\n augm.Augmentation(smiles_x, smiles_y, canon=canonical, rotate=rotation)\n\n print(\"Enumerated SMILES: {}\\n\".format(smiles_x_enum.shape[0]))\n \n print(\"***Tokenization of SMILES.***\\n\")\n # Tokenize SMILES \n smiles_x_enum_tokens = token.get_tokens(smiles_x_enum)\n\n # models ensembling\n smiles_y_pred_mean_array = np.empty(shape=(0,len(smiles_checked)), dtype='float')\n for ifold in range(k_fold_number):\n \n # Tokens as a list\n tokens = token.get_vocab(input_dir+data_name+'_Vocabulary.txt')\n # Add 'pad', 'unk' tokens to the existing list\n vocab_size = len(tokens)\n tokens, vocab_size = token.add_extra_tokens(tokens, vocab_size)\n\n # Transformation of tokenized SMILES to vector of intergers and vice-versa\n token_to_int = token.get_tokentoint(tokens)\n int_to_token = token.get_inttotoken(tokens)\n \n # Best architecture to visualize from\n model_train = load_model(input_dir+'LSTMAtt_'+data_name+'_model.best_seed_'+str(seed_list[ifold])+'.hdf5', \n custom_objects={'AttentionM': model.AttentionM()})\n\n if ifold == 0:\n # Maximum of length of SMILES to process\n max_length = model_train.layers[0].output_shape[-1]\n print(\"Full vocabulary: {}\\nOf size: {}\\n\".format(tokens, vocab_size))\n print(\"Maximum length of tokenized SMILES: {} tokens\\n\".format(max_length))\n\n model_train.compile(loss=\"mse\", optimizer='adam', metrics=[metrics.mae,metrics.mse])\n\n # predict and compare for the training, validation and test sets\n smiles_x_enum_tokens_tointvec = token.int_vec_encode(tokenized_smiles_list = smiles_x_enum_tokens, \n max_length = max_length, \n vocab = tokens)\n\n smiles_y_pred = model_train.predict(smiles_x_enum_tokens_tointvec)\n\n # compute a mean per set of augmented SMILES\n smiles_y_pred_mean, _ = utils.mean_median_result(smiles_x_enum_card, smiles_y_pred)\n \n smiles_y_pred_mean_array = np.append(smiles_y_pred_mean_array, smiles_y_pred_mean.reshape(1,-1), axis = 0)\n \n if ifold == (k_fold_number-1):\n smiles_y_pred_mean_ensemble = np.mean(smiles_y_pred_mean_array, axis = 0)\n smiles_y_pred_sd_ensemble = np.std(smiles_y_pred_mean_array, axis = 0)\n\n pred_from_ens = pd.DataFrame(data=[smiles_x,\n smiles_y_pred_mean_ensemble,\n smiles_y_pred_sd_ensemble]).T\n pred_from_ens.columns = ['SMILES', 'ens_pred_mean', 'ens_pred_sd']\n \n print(\"***Inference of SMILES property done.***\")\n \n return pred_from_ens\n##\n" ]
[ [ "numpy.random.seed", "pandas.DataFrame", "tensorflow.ConfigProto", "numpy.std", "numpy.mean", "tensorflow.Session", "numpy.array" ] ]
MimansaSharma15/Animo
[ "351edbbaeb6f8bc529602a888fb39e9d2bee010d" ]
[ "src/main.py" ]
[ "import torch \nfrom torch import nn as nn \nfrom torch.nn import functional as f \nfrom data import main_data\nfrom torch.utils.data import DataLoader\nfrom models import Encoder, Decoder\nimport random\nimport time \nfrom tqdm import tqdm\nfrom models import Seq2Seq, Encoder, Decoder\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\n'''\n\nMAIN FILE TO TRAIN THE\nseq2seq MODEL\n-->\nIMPORTING THE PREVIOUSLY CREATED MODEL \n-->\nSETTING A TRAINING LOOP\n\n'''\n\n\ndevice = \"cpu\"\nbs = 3\nteacher_forcing_ratio = 0.5\n\ndata = main_data()\nmax, vocab_enc, vocab_dec = data.len_all()\n\ntrain_loader = DataLoader(data,batch_size=bs)\nencoder_net = Encoder(vocab_enc, 150, 200, 1, 0.3).to(\"cpu\")\ndecoder_net = Decoder(vocab_dec,150,200,vocab_dec,1,0.3,).to(\"cpu\")\n\nmodel = Seq2Seq(encoder_net, decoder_net,vocab_dec)\n\noptimizer = torch.optim.Adam(model.parameters(),lr=0.001)\ncriterion = nn.CrossEntropyLoss()\n\nwriter = SummaryWriter(f\"runs/tensor_board_loss\")\nstep = 0\n\ndef save_checkpoint(state, filename=\"my_checkpoint.pth.tar\"):\n print(\"Saving.. \")\n torch.save(state, filename)\n\n\nnum_epochs = 50\nsave_model = True\nload_model = False\n\nfor epoch in range(num_epochs):\n print(f\"[Epoch {epoch} / {num_epochs}]\")\n\n if save_model:\n checkpoint = {\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n }\n save_checkpoint(checkpoint) \n\n model.train()\n\n for batch_idx, batch in tqdm(enumerate(train_loader),total=len(train_loader),desc=\"Training\"):\n inp_data, target = batch\n\n try:\n output = model(inp_data, target)\n except:\n break\n\n \n output = output[1:].reshape(-1, output.shape[2])\n target = target[1:].reshape(-1)\n optimizer.zero_grad()\n loss = criterion(output, target)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)\n\n optimizer.step()\n\n # Plot to tensorboard\n writer.add_scalar(\"Training loss\", loss.item(), global_step=step)\n step += 1 \n\ntorch.save(model.state_dict(),\"model_for_faq.pt\")\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.utils.data.DataLoader", "torch.utils.tensorboard.SummaryWriter", "torch.save" ] ]
AWilcke/Dissertation
[ "b85ad38a7f336ee290d5883f5e942f54e140d0d0" ]
[ "src/svmreg/dataloader.py" ]
[ "import os\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom PIL import Image\nimport scipy.io as sio\nimport pickle\nfrom pathlib import Path\nimport numpy as np\n\nclass BasicDataset(Dataset):\n\n def __init__(self, labels_file, root_dir):\n \"\"\"\n Args:\n labels_file (string) : the .mat file containing the labels\n root_dir (string) : the path where the dataset is stored\n \"\"\"\n\n self.labels = sio.loadmat(os.path.join(root_dir, labels_file))['labels'][0]\n self.root_dir = root_dir\n self.preprocess = self._preprocess_fn()\n\n def _preprocess_fn(self):\n \"\"\"\n Resize image to 256x256, take a center crop of 224x224,\n squeeze between 0 and 1 and normalise according to pretraining.\n Args:\n image (array) : image to preprocess\n Returns:\n image (array) : preprocessed image\n \"\"\"\n data_transforms = transforms.Compose([\n transforms.Scale((256,256)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n return data_transforms\n\n def __len__(self):\n return len(self.labels)\n\n def __getitem__(self, idx):\n img_name = os.path.join(self.root_dir, 'jpg', 'image_{:05d}.jpg'.format(idx + 1))\n image = Image.open(img_name)\n image = self.preprocess(image)\n sample = {'image' : image, 'label' : self.labels[idx]}\n return sample\n\nclass FeatureDataset(Dataset):\n\n def __init__(self, features_file, labels_file, label, n, val_labels):\n\n with open(labels_file,'rb') as f:\n self.labels = pickle.load(f)\n\n with open(features_file,'rb') as f:\n self.features = pickle.load(f)\n \n # get all OTHER val indexes\n val_indexes = []\n for i, lab in enumerate(self.labels):\n if lab in val_labels and lab != label:\n val_indexes.append(i)\n\n lab_indexes = np.where(self.labels == label)[0]\n\n label_samples = np.random.choice(lab_indexes, size=n, replace=False)\n val_samples = np.random.choice(val_indexes, size=n, replace=False)\n\n self.data = [(i, 1) for i in label_samples] + [(i, 0) for i in val_samples]\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n index, label = self.data[idx]\n return (torch.from_numpy(self.features[index]).float(), label)\n\n\nclass SVMDataset(Dataset):\n\n def __init__(self, w0_folder, w1_file, features_file, split='train'):\n \"\"\"\n Args:\n w0_folder (string) : path to folder where w0 files are stored\n w1_file (string) : path to file where w1 files are stored\n features_file (string) : path to file where features are stored\n split (string) : train or val split\n \"\"\"\n\n self.w0_list = [os.path.join(w0_folder, split, filename) \n for filename in os.listdir(os.path.join(w0_folder, split))]\n\n with open(w1_file, 'rb') as f:\n self.w1 = pickle.load(f)\n\n with open(features_file, 'rb') as f:\n self.features = pickle.load(f)\n\n def __len__(self):\n return len(self.w0_list)\n\n def __getitem__(self, idx):\n with open(self.w0_list[idx], 'rb') as f:\n sample = pickle.load(f)\n\n sample['w0'] = torch.from_numpy(sample['w0'])\n sample['w1'] = torch.from_numpy(self.w1[sample['label']-1])\n\n correct_i = torch.from_numpy(self.features[sample['correct_i']])\n wrong_i = torch.from_numpy(self.features[sample['wrong_i']])\n\n # take negative of wrong samples and concat with correct samples\n # makes the hinge loss easier, as it removes the need for the\n # correct label to determine the correct sign\n sample['train'] = torch.cat([correct_i, -wrong_i], 0)\n\n del sample['label'], sample['correct_i'], sample['wrong_i']\n\n return sample\n" ]
[ [ "numpy.where", "torch.cat", "torch.from_numpy", "numpy.random.choice" ] ]
Meng-Xiang-Rui/qusource
[ "44aad9fa6c3477d145e2bdd76edbbe79eec10267" ]
[ "qusource/qusource.py" ]
[ "import numpy as np\nfrom scipy.linalg import expm\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nfrom scipy.stats import bernoulli\nimport random\n\ntrans_mat = np.zeros((4, 10))\ntrans_mat[0, 1] = trans_mat[1, 3] = trans_mat[2, 4] = trans_mat[3, 8] = 1\n\n\ndef int2bin(n, count=24):\n return \"\".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])\n\n\ndef loc(x):\n \"\"\"\n turn a string of 0/1 into its decimal number\n :param x: string of 0/1\n :return: Decimal number\n \"\"\"\n return int(x, 2)\n\n\ndef dagger(x):\n return x.T.conj()\n\n\ndef state_init(N, site):\n \"\"\"\n :param N: length of state\n :param site: bit string of the site\n :type site: str\n :return: state\n \"\"\"\n init_state = np.zeros(2**N)\n init_state[loc(site)] = 1\n return init_state\n\n\ndef W_state(N, log = False):\n\n coe = 1/pow(N, 0.5)\n state = np.zeros(2**N, dtype = 'complex')\n for i in range(N):\n state[2**i] = coe\n if log:\n f = open('w_psi'+str(N)+'.txt', mode='w')\n for i in range(2**N):\n f.write(str(state[i].real)+' '+str(state[i].imag)+'\\n')\n f.close()\n return state\n\n\ndef state_save(state, path=None):\n N = int(np.log2(len(state)))\n path = path if path else str(N)+' qubits_state'+'.txt'\n f = open(path, mode='w')\n for i in range(2 ** N):\n f.write(str(state[i].real) + ' ' + str(state[i].imag) + '\\n')\n f.close()\n print(path)\n\n\ndef amp_save(state, path=None):\n N = int(np.log2(len(state)))\n path = path if path else str(N)+' qubits_state_amp'+'.txt'\n f = open(path, mode='w')\n for i in range(2 ** N):\n f.write(str(np.abs(state[i])) + ' ' + str(0.0000) + '\\n')\n f.close()\n print(path)\n\n\ndef sparse_check(x):\n tmp = x.flatten(order='C')\n nonzero = 0\n for i in range(len(tmp)):\n if tmp[i] != 0:\n nonzero += 1\n return nonzero, nonzero/len(tmp)\n\n\ndef unitary_check(x):\n threshold = 1e-10\n distance = np.linalg.norm(np.dot(dagger(x),x)-np.eye(len(x)))\n if distance<threshold:\n return True\n else:\n print('not unitary, error = {}'.format(distance))\n\n\ndef set_bit_val(byte, index, N, val):\n \"\"\"\n 更改某个字节中某一位(Bit)的值\n\n :param byte: 准备更改的字节原值\n :param index: 待更改位的序号,从右向左0开始,0-7为一个完整字节的8个位\n :param val: 目标位预更改的值,0或1\n :returns: 返回更改后字节的值\n \"\"\"\n if val:\n return byte | (1 << (N-index))\n else:\n return byte & ~(1 << (N-index))\n\n\ndef site(data, N, i):\n return data >> (N-i) & 1\n\n\ndef fastmul(m,n, gate, state):\n N = int(np.log2(len(state)))\n index = [2*site(i,N,m)+site(i,N,n) for i in range(2**N)]\n gate = gate.T\n tmat = gate[:, index]\n v = np.arange(2**N).reshape(1,2**N).repeat(4,0)\n for i in range(4):\n p = site(i, 2, 1)\n q = site(i, 2, 2)\n v[i, :] = set_bit_val(v[i, :], m, N, p)\n v[i, :] = set_bit_val(v[i, :], n, N, q)\n v = state[v]\n tmat *= v\n res = tmat.sum(0)\n return res\n\n\n\n\n\ndef swap(U, J, t, Delta=0):\n H = np.array([[U, -np.sqrt(2)*J, 0, 0, 0, 0, 0, 0, 0,0],\n [-np.sqrt(2)*J, Delta, -np.sqrt(2)*J, 0, 0, 0, 0, 0, 0,0],\n [0, -np.sqrt(2)*J, U+2*Delta, 0, 0, 0, 0, 0, 0,0],\n [0, 0, 0, Delta, 0, -J, -J, 0, 0, 0],\n [0, 0, 0, 0, Delta, -J, -J, 0, 0, 0],\n [0, 0, 0, -J, -J, U, 0, 0, 0, 0],\n [0, 0, 0, -J, -J, 0, U+2*Delta, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, U, -np.sqrt(2)*J, 0],\n [0, 0, 0, 0, 0, 0, 0, -np.sqrt(2)*J, Delta, -np.sqrt(2)*J],\n [0, 0, 0, 0, 0, 0, 0, 0, -np.sqrt(2)*J, U+2*Delta]])\n Evolution = expm(H * 2*np.pi*t*-1j)\n swap = np.dot(trans_mat, Evolution)\n swap = np.dot(swap, trans_mat.T)\n swap /= 1j\n return swap\n\n\ndef sto(t, Delta):\n \"\"\"\n only for 01/10 base\n :param t:\n :param Delta:\n :return:\n \"\"\"\n phase = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 2*Delta, 0],\n [0, 0, 0, 0]])\n Evolution = expm(phase * 2*np.pi*t*-1j)\n return Evolution\n\n\ndef noise(x):\n mu, sigma = x, x/25.76\n np.random.seed()\n return np.random.normal(mu, sigma, 1)[0]\n\n\ndef swap_noise(U, J, t, Delta = 0):\n # U, J, t, Delta = noise(U), noise(J), noise(t), Delta = noise(Delta)\n return swap(noise(U), noise(J), noise(t), noise(Delta))\n\n\ndef sto_noise(t, Delta):\n return sto(noise(t), noise(Delta))\n\n\ndef NumberOf1(n):\n count = 0\n while n&0xffffffff != 0:\n count += 1\n n = n & (n-1)\n return count\n\n\ndef phase_shift(n):\n phase = 0\n for _ in range(n):\n phase += noise(2*np.pi)\n return phase\n\n\ndef dephase(n):\n numbers = []\n for i in range(2 ** n):\n numbers.append(NumberOf1(i))\n numbers = np.array(numbers)\n dephase_mat = np.diag([np.exp(phase_shift(i)*-1j) for i in numbers])\n return dephase_mat\n\n\ndef density_mat(x):\n n = len(x)\n sam_shape = len(x.shape)\n if sam_shape == 1:\n dim = len(x)\n state = x.reshape(1, dim)\n state /= np.linalg.norm(state)\n den_mat = np.dot(dagger(state), state)\n else:\n dim = len(x[0])\n den_mat = np.zeros((dim, dim))\n for i in range(n):\n state = x[i].reshape(1, dim)\n state /= np.linalg.norm(state)\n if not i:\n den_mat = np.dot(dagger(state), state)\n else:\n den_mat += np.dot(dagger(state), state)\n den_mat /= n\n return den_mat\n\n\ndef fidelity_vec(x, y):\n return (np.dot(x.conj(), y)*np.dot(y.conj(), x)/np.linalg.norm(x)**2/np.linalg.norm(y)**2).real\n\n\ndef fidelity_essemble(x,y):\n n = len(y)\n fidelity = 0\n for i in range(n):\n fidelity += fidelity_vec(x, y[i])\n return fidelity/n\n\n\ndef purity(x):\n return (np.trace(np.dot(x, x))).real\n\n\ndef distribution(x):\n x /= np.linalg.norm(x, 2)\n prob = np.zeros(x.size)\n for i in range(len(x)):\n prob[i] = np.abs(x[i])**2\n return prob\n\n\ndef dis2state(x):\n state = np.array([np.sqrt(x[i]) for i in range(len(x))])\n return state\n\n\ndef sample(x, n=1):\n N = int(np.log2(len(x)))\n res = stats.rv_discrete(values=(range(len(distribution(x))), distribution(x))).rvs(size=n)\n if n == 1:\n return res[0]\n else:\n dis = sample_distribution(res, N)\n kl = KL(distribution(x), dis)\n print(kl)\n return res, dis\n\n\ndef sample_distribution(sample, N):\n n = len(sample)\n sample_dis = np.zeros(2**N)\n for i in sample:\n sample_dis[i] += 1\n sample_dis /= n\n return sample_dis\n\n\ndef essemble_distribution(x):\n n = len(x)\n dis = np.array([x[i][i] for i in range(n)])\n return dis.real\n\n\ndef KL(p, q):\n divergence = 0\n for i in range(len(p)):\n if p[i] and q[i]:\n divergence += p[i]*np.log(p[i]/q[i])\n return divergence\n\n\ndef KL_new(P,Q):\n N = len(P)\n epsilon = 0.01/N\n P = P + epsilon\n Q = Q + epsilon\n divergence = np.sum(P*np.log(P/Q))\n return divergence\n\n\ndef sample_plot(dis, N, M, KL=None):\n x = [int2bin(i,N) for i in range(len(dis))]\n plt.bar(x, dis)\n plt.ylim(0,1)\n for x, y in enumerate(dis):\n plt.text(x, y+0.02, '%s' %y, ha='center')\n if not KL:\n plt.title('{} qubits with {} measurements'.format(N, M))\n else:\n plt.title('{} qubits with {} measurements\\n KL = {}'.format(N, M, KL))\n plt.ylabel('Probility')\n plt.show()\n\n\ndef trans_base(bases, x):\n Z2Z = np.eye(2)\n Z2X = 1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])\n Z2Y = 1 / np.sqrt(2) * np.array([[1, -1j], [1, 1j]])\n decode = {'Z': Z2Z, 'X': Z2X, 'Y': Z2Y, 'z': Z2Z, 'x': Z2X,'y': Z2Y}\n tmp_mat = decode[bases[0]]\n for i in range(1,len(bases)):\n tmp_mat = np.kron(tmp_mat, decode[bases[i]])\n return np.dot(tmp_mat, x)\n\n\ndef sample_bases(bases, state, M):\n N = int(np.log2(len(state)))\n f1 = open(str(N)+' qubits_measurement.txt', mode='w')\n f2 = open(str(N)+' qubits_measurement_bases.txt', mode='w')\n f3 = open(str(N)+' qubits_measurement_bases_set.txt', mode='w')\n for i in bases:\n measure = sample(trans_base(i, state), M)\n for j in measure:\n tmp = int2bin(j, N)\n for k in tmp:\n f1.write(k+' ')\n f1.write('\\n')\n for k in i:\n f2.write(k+' ')\n f2.write('\\n')\n for j in i:\n f3.write(j+' ')\n f3.write('\\n')\n f1.close()\n f2.close()\n f3.close()\n\n\ndef Z_sample(state, M, error=0):\n N = int(np.log2(len(state)))\n f1 = open(str(N)+' qubits_measurement_z.txt', mode='w')\n measure = sample(state, M)\n dis = sample_distribution(measure, N)\n kl = KL(dis, distribution(state))\n print(kl)\n if error:\n measure = sample_error(measure, N, error)\n dis = sample_distribution(measure, N)\n kl = KL(dis, distribution(state))\n print(kl)\n for j in measure:\n tmp = int2bin(j, N)\n for k in tmp:\n f1.write(k+' ')\n f1.write('\\n')\n f1.close()\n\n\ndef sample_error(samples, n, error):\n size = len(samples)\n flip = bernoulli.rvs(n * error, size=size)\n ker = [2 ** i for i in range(n)]\n count = 0\n for i in range(size):\n if flip[i]:\n count += 1\n flip[i] = random.choice(ker)\n print(count, count / size)\n sample_new = np.array([samples[i] ^ flip[i] for i in range(size)])\n return sample_new\n\n\ndef sample_save(samples, N, path=None):\n path = path if path else str(N)+' qubits_measurement_z.txt'\n f1 = open(path, mode='w')\n for j in samples:\n tmp = int2bin(j, N)\n for k in tmp:\n f1.write(k+' ')\n f1.write('\\n')\n f1.close()\n print(path)" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.kron", "scipy.stats.bernoulli.rvs", "numpy.arange", "numpy.eye", "matplotlib.pyplot.text", "numpy.zeros", "numpy.log", "matplotlib.pyplot.ylim", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.abs", "numpy.random.seed", "numpy.linalg.norm", "scipy.linalg.expm", "numpy.random.normal", "matplotlib.pyplot.bar" ] ]
forman/dectree
[ "5895967d3a91ce4d76806417689c66349fce2a85" ]
[ "examples/intertidal_flat_classif/verification_main.py" ]
[ "import time\n\nimport numpy as np\nimport pandas as pd\n#from intertidal_flat_classif import Input, Output, apply_rules\n#from intertidal_flat_classif_opt import Input, Output, apply_rules\nfrom intertidal_flat_classif_fuz import Input, Output, apply_rules\n\nINPUT_TXT = \"verification_input.txt\"\nEXPECTED_TXT = \"verification_expected.txt\"\n#OUTPUT_TXT = \"verification_output.txt\"\n#OUTPUT_TXT = \"verification_output_opt.txt\"\nOUTPUT_TXT = \"verification_output_fuz.txt\"\n\ninput_frame = pd.read_csv(INPUT_TXT, delimiter='\\t', skip_blank_lines=True, comment='#')\noutput_frame = pd.read_csv(EXPECTED_TXT, delimiter='\\t', skip_blank_lines=True, comment='#')\n\ninput_frame.sort_values(by=\"Label\")\noutput_frame.sort_values(by=\"Label\")\n\n# print(input_frame['Label'])\n# print(output_frame['Label'])\n\ninput_names = [\n (\"b1\", \"sand-tr_abundance\"),\n (\"b2\", \"sand-wc_abundance\"),\n (\"b3\", \"schatten_abundance\"),\n (\"b4\", \"summary_error\"),\n (\"b5\", \"steigung_red_nIR\"),\n (\"b6\", \"steigung_nIR_SWIR1\"),\n (\"b7\", \"flh\"),\n (\"b8\", \"ndvi\"),\n (\"b12\", \"reflec_483\"),\n (\"b13\", \"reflec_561\"),\n (\"b14\", \"reflec_655\"),\n (\"b15\", \"reflec_865\"),\n (\"b19\", \"muschelindex\"),\n (\"b16\", \"reflec_1609\"),\n (\"bsum\", \"reflec_sum\"),\n]\n\noutput_names = [\n \"nodata\",\n \"Wasser\",\n \"Schill\",\n \"Muschel\",\n \"dense2\",\n \"dense1\",\n \"Strand\",\n \"Sand\",\n \"Misch\",\n \"Misch2\",\n \"Schlick\",\n \"schlick_t\",\n \"Wasser2\",\n]\n\n\ndef to_array(frame, name):\n return np.array(frame[name].values)\n\n\nexpected_class = to_array(output_frame, \"Band_1\")\n\ndectree_input = Input(expected_class.size)\ndectree_output = Output(expected_class.size)\n\nfor input_name, column_name in input_names:\n setattr(dectree_input, input_name, to_array(input_frame, column_name))\n\nt0 = time.clock()\napply_rules(dectree_input, dectree_output)\nms_first_time = (time.clock() - t0) * 1000\n\nprint('Inputs:')\nfor input_name, _ in input_names:\n print('{}: {}'.format(input_name, getattr(dectree_input, input_name)))\n\nprint('Outputs:')\nprint('{}: {}'.format('expected_class', expected_class))\nfor output_name in output_names:\n print('{}: {}'.format(output_name, getattr(dectree_output, output_name)))\n\nframe = pd.DataFrame.from_items(zip(['expected_class'] + [output_name for output_name in output_names],\n [expected_class] + [getattr(dectree_output, output_name) for output_name in\n output_names]))\n\nframe.to_csv(path_or_buf=OUTPUT_TXT, sep='\\t')\n\n\n# Performance check:\n\nn = 25\ntsum = 0\nfor i in range(n):\n for input_name, column_name in input_names:\n setattr(dectree_input, input_name, to_array(input_frame, column_name))\n t0 = time.clock()\n apply_rules(dectree_input, dectree_output)\n tsum += time.clock() - t0\n\nms_per_pixel = (tsum / n / expected_class.size) * 1000\npixel_per_sec = 1000 / ms_per_pixel\nprint('apply_rules() took {} ms for the first time'.format(ms_first_time))\nprint('apply_rules() took {} ms per pixel according to {} pixels per second'.format(ms_per_pixel, pixel_per_sec))\n" ]
[ [ "numpy.array", "pandas.read_csv" ] ]
abelcarreras/symgroup
[ "66233426d5457aa606e720794dd3a3679ef830e8" ]
[ "python/setup.py" ]
[ "from numpy.distutils.core import setup, Extension\nfrom distutils.dir_util import copy_tree\nfrom distutils.errors import DistutilsFileError\n\n#import shutil\n\n\ndef get_version_number():\n for l in open('symgroupy/__init__.py', 'r').readlines():\n if not(l.find('__version__')):\n exec(l, globals())\n return __version__\n\n\n# Make python package\n\ntry:\n copy_tree('../src', './src', update=True)\nexcept DistutilsFileError:\n pass\n\ns_dir = 'src/'\n\nsymgroupy = Extension('symgroupy.symgrouplib',\n #include_dirs=include_dirs_numpy,\n extra_f77_compile_args=['-ffixed-line-length-0'],\n libraries=['lapack', 'blas'],\n sources=['symgrouplib.pyf',\n s_dir + 'symgrouplib.F',\n s_dir + 'radius.F',\n s_dir + 'connectivity.F',\n s_dir + 'jacobi.F',\n s_dir + 'linear_algebra.F',\n s_dir + 'mass.F',\n s_dir + 'measure.F',\n s_dir + 'operations.F'])\n\nsetup(name='symgroupy',\n version=get_version_number(),\n description='symgroupy',\n author='Abel Carreras',\n author_email='[email protected]',\n packages=['symgroupy'],\n ext_modules=[symgroupy])\n" ]
[ [ "numpy.distutils.core.Extension" ] ]
mcooper/tmtoolkit
[ "cdfbaf7f20095ea45edbdf9e773544e3bb63089d" ]
[ "tests/test_utils.py" ]
[ "import string\nimport random\n\nimport pytest\nimport hypothesis.strategies as st\nfrom hypothesis import given\nimport numpy as np\n\nfrom tmtoolkit.utils import (pickle_data, unpickle_file, require_listlike, require_dictlike, require_types,\n simplified_pos, apply_to_mat_column, flatten_list, tuplize, greedy_partitioning,\n mat2d_window_from_indices, normalize_to_unit_range)\n\nPRINTABLE_ASCII_CHARS = [chr(c) for c in range(32, 127)]\n\n\ndef test_pickle_unpickle():\n pfile = 'tests/data/test_pickle_unpickle.pickle'\n input_data = ('foo', 123, [])\n pickle_data(input_data, pfile)\n\n output_data = unpickle_file(pfile)\n\n for i, o in zip(input_data, output_data):\n assert i == o\n\n\ndef test_require_listlike():\n require_listlike([])\n require_listlike([123])\n require_listlike(tuple())\n require_listlike((1, 2, 3))\n require_listlike(set())\n require_listlike({1, 2, 3})\n\n with pytest.raises(ValueError): require_listlike({})\n with pytest.raises(ValueError): require_listlike({'x': 'y'})\n with pytest.raises(ValueError): require_listlike('a string')\n\n\ndef test_require_dictlike():\n from collections import OrderedDict\n require_dictlike({})\n require_dictlike(OrderedDict())\n\n with pytest.raises(ValueError): require_dictlike(set())\n\n\ndef test_require_types():\n types = (set, tuple, list, dict)\n for t in types:\n require_types(t(), (t, ))\n\n types_shifted = types[1:] + types[:1]\n\n for t1, t2 in zip(types, types_shifted):\n with pytest.raises(ValueError): require_types(t1, (t2, ))\n\n\ndef test_simplified_pos():\n assert simplified_pos('') is None\n assert simplified_pos('N') == 'N'\n assert simplified_pos('V') == 'V'\n assert simplified_pos('ADJ') == 'ADJ'\n assert simplified_pos('ADV') == 'ADV'\n assert simplified_pos('AD') is None\n assert simplified_pos('ADX') is None\n assert simplified_pos('PRP') is None\n assert simplified_pos('XYZ') is None\n assert simplified_pos('NN') == 'N'\n assert simplified_pos('NNP') == 'N'\n assert simplified_pos('VX') == 'V'\n assert simplified_pos('ADJY') == 'ADJ'\n assert simplified_pos('ADVZ') == 'ADV'\n\n assert simplified_pos('NNP', tagset='penn') == 'N'\n assert simplified_pos('VFOO', tagset='penn') == 'V'\n assert simplified_pos('JJ', tagset='penn') == 'ADJ'\n assert simplified_pos('JJX', tagset='penn') == 'ADJ'\n assert simplified_pos('RB', tagset='penn') == 'ADV'\n assert simplified_pos('RBFOO', tagset='penn') == 'ADV'\n\n\n@given(l=st.lists(st.integers(0, 10), min_size=2, max_size=2).flatmap(\n lambda size: st.lists(st.lists(st.integers(), min_size=size[0], max_size=size[0]),\n min_size=size[1], max_size=size[1])))\ndef test_flatten_list(l):\n l_ = flatten_list(l)\n\n assert type(l_) is list\n assert len(l_) == sum(map(len, l))\n\n\n@given(seq=st.lists(st.integers()))\ndef test_tuplize(seq):\n seq_ = tuplize(seq)\n\n for i, x in enumerate(seq_):\n assert type(x) is tuple\n assert x[0] == seq[i]\n\n\n@given(mat=st.lists(st.integers(0, 10), min_size=2, max_size=2).flatmap(\n lambda size: st.lists(\n st.lists(\n st.integers(),\n min_size=size[0],\n max_size=size[0]\n ),\n min_size=size[1],\n max_size=size[1]\n )\n), col_idx=st.integers(-1, 11))\ndef test_apply_to_mat_column_identity(mat, col_idx):\n identity_fn = lambda x: x\n\n # transform to list of tuples\n mat = [tuple(row) for row in mat]\n\n n_rows = len(mat)\n\n if n_rows > 0: # make sure the supplied matrix is not ragged\n unique_n_cols = set(map(len, mat))\n assert len(unique_n_cols) == 1\n n_cols = unique_n_cols.pop()\n else:\n n_cols = 0\n\n if n_rows == 0 or (n_rows > 0 and n_cols == 0) or col_idx < 0 or col_idx >= n_cols:\n with pytest.raises(ValueError):\n apply_to_mat_column(mat, col_idx, identity_fn)\n else:\n assert _mat_equality(mat, apply_to_mat_column(mat, col_idx, identity_fn))\n\n\n@given(mat=st.lists(st.integers(1, 5), min_size=2, max_size=2).flatmap(\n lambda size: st.lists(\n st.lists(\n st.text(PRINTABLE_ASCII_CHARS, max_size=5),\n min_size=size[0],\n max_size=size[0]\n ),\n min_size=size[1],\n max_size=size[1]\n )\n))\ndef test_apply_to_mat_column_transform(mat):\n # transform to list of tuples\n mat = [tuple(row) for row in mat]\n\n n_rows = len(mat)\n\n unique_n_cols = set(map(len, mat))\n assert len(unique_n_cols) == 1\n n_cols = unique_n_cols.pop()\n col_idx = random.randrange(0, n_cols)\n\n mat_t = apply_to_mat_column(mat, col_idx, lambda x: x.upper())\n\n assert n_rows == len(mat_t)\n\n for orig, trans in zip(mat, mat_t):\n assert len(orig) == len(trans)\n for x, x_t in zip(orig, trans):\n assert x.upper() == x_t.upper()\n\n\n@given(mat=st.lists(st.integers(1, 5), min_size=2, max_size=2).flatmap(\n lambda size: st.lists(\n st.lists(\n st.text(PRINTABLE_ASCII_CHARS, max_size=5),\n min_size=size[0],\n max_size=size[0]\n ),\n min_size=size[1],\n max_size=size[1]\n )\n))\ndef test_apply_to_mat_column_transform_expand(mat):\n # transform to list of tuples\n mat = [tuple(row) for row in mat]\n\n n_rows = len(mat)\n\n unique_n_cols = set(map(len, mat))\n assert len(unique_n_cols) == 1\n n_cols = unique_n_cols.pop()\n col_idx = random.randrange(0, n_cols)\n\n mat_t = apply_to_mat_column(mat, col_idx, lambda x: (x, x.lower(), x.upper()), expand=True)\n\n assert n_rows == len(mat_t)\n\n for orig, trans in zip(mat, mat_t):\n assert len(orig) == len(trans) - 2\n\n before, x, after = orig[:col_idx+1], orig[col_idx], orig[col_idx+1:]\n before_t, x_t, after_t = trans[:col_idx+1], trans[col_idx:col_idx+3], trans[col_idx+3:]\n\n assert before == before_t\n assert after == after_t\n\n assert len(x_t) == 3\n assert x == x_t[0]\n assert x.lower() == x_t[1]\n assert x.upper() == x_t[2]\n\n\n@given(mat=st.lists(st.integers(1, 10), min_size=2, max_size=2).flatmap(\n lambda size: st.lists(\n st.lists(\n st.integers(0, 99),\n min_size=size[0],\n max_size=size[0]\n ),\n min_size=size[1],\n max_size=size[1]\n )\n ),\n n_row_indices=st.integers(0, 10),\n n_col_indices=st.integers(0, 10),\n copy=st.booleans()\n)\ndef test_mat2d_window_from_indices(mat, n_row_indices, n_col_indices, copy):\n mat = np.array(mat)\n\n n_rows, n_cols = mat.shape\n\n if n_row_indices == 0:\n row_indices = None\n else:\n row_indices = np.random.choice(np.arange(n_rows), size=min(n_rows, n_row_indices), replace=False)\n\n if n_col_indices == 0:\n col_indices = None\n else:\n col_indices = np.random.choice(np.arange(n_cols), size=min(n_cols, n_col_indices), replace=False)\n\n window = mat2d_window_from_indices(mat, row_indices, col_indices, copy)\n\n if row_indices is None:\n asserted_y_shape = n_rows\n else:\n asserted_y_shape = len(row_indices)\n assert window.shape[0] == asserted_y_shape\n\n if col_indices is None:\n asserted_x_shape = n_cols\n else:\n asserted_x_shape = len(col_indices)\n assert window.shape[1] == asserted_x_shape\n\n if row_indices is None:\n row_indices_check = np.arange(n_rows)\n else:\n row_indices_check = row_indices\n\n if col_indices is None:\n col_indices_check = np.arange(n_cols)\n else:\n col_indices_check = col_indices\n\n for w_y, m_y in enumerate(row_indices_check):\n for w_x, m_x in enumerate(col_indices_check):\n assert window[w_y, w_x] == mat[m_y, m_x]\n\n\n@given(elems_dict=st.dictionaries(st.text(string.printable), st.floats(allow_nan=False, allow_infinity=False)),\n k=st.integers())\ndef test_greedy_partitioning(elems_dict, k):\n if k <= 0:\n with pytest.raises(ValueError):\n greedy_partitioning(elems_dict, k)\n else:\n bins = greedy_partitioning(elems_dict, k)\n\n if 1 < k <= len(elems_dict):\n assert k == len(bins)\n else:\n assert len(bins) == len(elems_dict)\n\n if k == 1:\n assert bins == elems_dict\n else:\n assert sum(len(b.keys()) for b in bins) == len(elems_dict)\n assert all((k in elems_dict.keys() for k in b.keys()) for b in bins)\n\n if k > len(elems_dict):\n assert all(len(b) == 1 for b in bins)\n\n\n@given(values=st.lists(st.floats(min_value=-1e10, max_value=1e10, allow_nan=False, allow_infinity=False)))\ndef test_normalize_to_unit_range(values):\n values = np.array(values)\n\n if len(values) < 2:\n with pytest.raises(ValueError):\n normalize_to_unit_range(values)\n else:\n min_ = np.min(values)\n max_ = np.max(values)\n if max_ - min_ == 0:\n with pytest.raises(ValueError):\n normalize_to_unit_range(values)\n else:\n norm = normalize_to_unit_range(values)\n assert isinstance(norm, np.ndarray)\n assert norm.shape == values.shape\n assert np.isclose(np.min(norm), 0)\n assert np.isclose(np.max(norm), 1)\n\n\ndef _mat_equality(a, b):\n return len(a) == len(b) and all(row_a == row_b for row_a, row_b in zip(a, b))\n\n\n# @given(example_list=st.lists(st.text()), example_matches=st.lists(st.booleans()), negate=st.booleans())\n# def test_filter_elements_in_dict(example_list, example_matches, negate):\n# d = {'foo': example_list}\n# matches = {'foo': example_matches}\n#\n# if len(example_list) != len(example_matches):\n# with pytest.raises(ValueError):\n# filter_elements_in_dict(d, matches, negate_matches=negate)\n# else:\n# d_ = filter_elements_in_dict(d, matches, negate_matches=negate)\n# if negate:\n# n = len(example_matches) - sum(example_matches)\n# else:\n# n = sum(example_matches)\n# assert len(d_['foo']) == n\n#\n#\n# def test_filter_elements_in_dict_differentkeys():\n# with pytest.raises(ValueError):\n# filter_elements_in_dict({'foo': []}, {'bar': []})\n# filter_elements_in_dict({'foo': []}, {'bar': []}, require_same_keys=False)\n" ]
[ [ "numpy.arange", "numpy.max", "numpy.array", "numpy.min" ] ]
johndunne2019/pands-project
[ "4a544be7a4074dc8a277775981b3619239c45872" ]
[ "Histogram.py" ]
[ "# 2019-04-15\n# John Dunne\n# To create a histogram of the data using matplotlib.pyplot \n# Script adapted from example analysis read here: https://machinelearningmastery.com/machine-learning-in-python-step-by-step/\n\nprint(\"The Histogram will appear on your screen momentarily\")\n# printed sentence to the screen as the historgram takes a few seconds to appear on the screen\nimport matplotlib.pyplot as pl\n# imported matplotlib.pyplot which will be used to plot the histogram\n# matplotlib.pyplot documentation: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html and https://matplotlib.org/gallery/statistics/hist.html\nimport pandas as pd\n# pandas imported - will be used to read in the data set from my repository\n# pandas documentation: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.hist.html\ndata = \"https://raw.githubusercontent.com/johndunne2019/pands-project/master/Fishers_Iris_data_set.csv\"\n# link to the data set saved in my repository\ndataset = pd.read_csv(data, header=0)\n# pandas.read used to read in the data set from my repository, header set to first row of data in the data set\n# I read about pandas.read here: https://pandas.pydata.org/pandas-docs/stable/getting_started/10min.html\ndataset.hist()\n# A histogram is plotted of the data set \n# Adapted from the example analysis here: https://machinelearningmastery.com/machine-learning-in-python-step-by-step/\n# Further reading: https://matplotlib.org/gallery/statistics/histogram_features.html\n# pyplot.hist: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html\npl.show()\n# pyplot.show() command shows the histogram on the screen \n# I read about the show command here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.show.html" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.show" ] ]
propelwise/ct-net-models
[ "d25f0a43eac316766f80fa93177add3bc5130bd6" ]
[ "load_dataset/custom_datasets.py" ]
[ "#custom_datasets.py\n#Copyright (c) 2020 Rachel Lea Ballantyne Draelos\n\n#MIT License\n\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n\n#The above copyright notice and this permission notice shall be included in all\n#copies or substantial portions of the Software.\n\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#SOFTWARE\n\nimport os\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom . import utils\n\n#Set seeds\nnp.random.seed(0)\ntorch.manual_seed(0)\ntorch.cuda.manual_seed(0)\ntorch.cuda.manual_seed_all(0)\n\n###################################################\n# PACE Dataset for Data Stored in 2019-10-BigData #-----------------------------\n###################################################\nclass CTDataset_2019_10(Dataset): \n def __init__(self, setname, label_type_ld,\n label_meanings, num_channels, pixel_bounds,\n data_augment, crop_type,\n selected_note_acc_files):\n \"\"\"CT Dataset class that works for preprocessed data in 2019-10-BigData.\n A single example (for crop_type == 'single') is a 4D CT volume:\n if num_channels == 3, shape [134,3,420,420]\n if num_channels == 1, shape [402,420,420]\n \n Variables:\n <setname> is either 'train' or 'valid' or 'test'\n <label_type_ld> is 'disease_new'\n <label_meanings>: list of strings indicating which labels should\n be kept. Alternatively, can be the string 'all' in which case\n all labels are kept.\n <num_channels>: number of channels to reshape the image to.\n == 3 if the model uses a pretrained feature extractor.\n == 1 if the model uses only 3D convolutions.\n <pixel_bounds>: list of ints e.g. [-1000,200]\n Determines the lower bound, upper bound of pixel value clipping\n and normalization.\n <data_augment>: if True, perform data augmentation.\n <crop_type>: is 'single' for an example consisting of one 4D numpy array\n <selected_note_acc_files>: This should be a dictionary\n with key equal to setname and value that is a string. If the value\n is a path to a file, the file must be a CSV. Only note accessions\n in this file will be used. If the value is not a valid file path,\n all available note accs will be used, i.e. the model will be\n trained on the whole dataset.\"\"\"\n self.setname = setname\n self.define_subsets_list()\n self.label_type_ld = label_type_ld\n self.label_meanings = label_meanings\n self.num_channels = num_channels\n self.pixel_bounds = pixel_bounds\n if self.setname == 'train':\n self.data_augment = data_augment\n else:\n self.data_augment = False\n print('For dataset',self.setname,'data_augment is',self.data_augment)\n self.crop_type = crop_type\n assert self.crop_type == 'single'\n self.selected_note_acc_files = selected_note_acc_files\n \n #Define location of the CT volumes\n self.main_clean_path = './load_dataset/fakedata'\n self.volume_log_df = pd.read_csv('./load_dataset/fakedata/CT_Scan_Preprocessing_Log_File_FINAL_SMALL.csv',header=0,index_col=0)\n \n #Get the example ids\n self.volume_accessions = self.get_volume_accessions()\n \n #Get the ground truth labels\n self.labels_df = self.get_labels_df()\n \n # Pytorch Required Methods #------------------------------------------------\n def __len__(self):\n return len(self.volume_accessions)\n \n def __getitem__(self, idx):\n \"\"\"Return a single sample at index <idx>. The sample is a Python\n dictionary with keys 'data' and 'gr_truth' for the image and label,\n respectively\"\"\"\n return self._get_pace(self.volume_accessions[idx])\n \n # Volume Accession Methods #------------------------------------------------\n def get_note_accessions(self):\n setname_file = self.selected_note_acc_files[self.setname]\n if os.path.isfile(setname_file):\n print('\\tObtaining note accessions from',setname_file)\n sel_accs = pd.read_csv(setname_file,header=0) \n assert sorted(list(set(sel_accs['Subset_Assigned'].values.tolist())))==sorted(self.subsets_list)\n note_accs = sel_accs.loc[:,'Accession'].values.tolist()\n print('\\tTotal theoretical note accessions in subsets:',len(note_accs))\n return note_accs\n else: \n print('\\tObtaining note accessions from complete identifiers file')\n #Read in identifiers file, which contains note_accessions\n #Columns are MRN, Accession, Set_Assigned, Set_Should_Be, Subset_Assigned\n all_ids = pd.read_csv('./load_dataset/fakedata/all_identifiers.csv',header=0)\n \n #Extract the note_accessions\n note_accs = []\n for subset in self.subsets_list: #e.g. ['imgvalid_a','imgvalid_b']\n subset_note_accs = all_ids[all_ids['Subset_Assigned']==subset].loc[:,'Accession'].values.tolist()\n note_accs += subset_note_accs\n print('\\tTotal theoretical note accessions in subsets:',len(note_accs))\n return note_accs\n \n def get_volume_accessions(self):\n note_accs = self.get_note_accessions()\n #Translate note_accessions to volume_accessions based on what data has been\n #preprocessed successfully. volume_log_df has note accessions as the\n #index, and the column 'full_filename_npz' for the volume accession.\n #The column 'status' should equal 'success' if the volume has been\n #preprocessed correctly.\n print('\\tTotal theoretical volumes in whole dataset:',self.volume_log_df.shape[0])\n self.volume_log_df = self.volume_log_df[self.volume_log_df['status']=='success']\n print('\\tTotal successfully preprocessed volumes in whole dataset:',self.volume_log_df.shape[0])\n volume_accs = []\n for note_acc in note_accs:\n if note_acc in self.volume_log_df.index.values.tolist():\n volume_accs.append(self.volume_log_df.at[note_acc,'full_filename_npz'])\n print('\\tFinal total successfully preprocessed volumes in requested subsets:',len(volume_accs))\n #According to this thread: https://github.com/pytorch/pytorch/issues/13246\n #it is better to use a numpy array than a list to reduce memory leaks.\n return np.array(volume_accs)\n \n # Ground Truth Label Methods #----------------------------------------------\n def get_labels_df(self):\n #Get the ground truth labels based on requested label type.\n labels_df = read_in_labels(self.label_type_ld, self.setname)\n \n #Now filter the ground truth labels based on the desired label meanings:\n if self.label_meanings != 'all': #i.e. if you want to filter\n labels_df = labels_df[self.label_meanings]\n return labels_df\n \n # Fetch a CT Volume (__getitem__ implementation) #--------------------------\n def _get_pace(self, volume_acc):\n \"\"\"<volume_acc> is for example RHAA12345_6.npz\"\"\"\n #Load compressed npz file: [slices, square, square]\n ctvol = np.load(os.path.join(self.main_clean_path, volume_acc))['ct']\n \n #Prepare the CT volume data (already torch Tensors)\n data = utils.prepare_ctvol_2019_10_dataset(ctvol, self.pixel_bounds, self.data_augment, self.num_channels, self.crop_type)\n \n #Get the ground truth:\n note_acc = self.volume_log_df[self.volume_log_df['full_filename_npz']==volume_acc].index.values.tolist()[0]\n gr_truth = self.labels_df.loc[note_acc, :].values\n gr_truth = torch.from_numpy(gr_truth).squeeze().type(torch.float)\n \n #When training on only one abnormality you must unsqueeze to prevent\n #a dimensions error when training the model:\n if len(self.label_meanings)==1:\n gr_truth = gr_truth.unsqueeze(0)\n \n #Create the sample\n sample = {'data': data, 'gr_truth': gr_truth, 'volume_acc': volume_acc}\n return sample\n \n # Sanity Check #------------------------------------------------------------\n def define_subsets_list(self):\n assert self.setname in ['train','valid','test']\n if self.setname == 'train':\n self.subsets_list = ['imgtrain']\n elif self.setname == 'valid':\n self.subsets_list = ['imgvalid_a']\n elif self.setname == 'test':\n self.subsets_list = ['imgtest_a','imgtest_b','imgtest_c','imgtest_d']\n print('Creating',self.setname,'dataset with subsets',self.subsets_list)\n\n#######################\n# Ground Truth Labels #---------------------------------------------------------\n#######################\n\ndef read_in_labels(label_type_ld, setname):\n \"\"\"Return a pandas dataframe with the dataset labels.\n Accession numbers are the index and labels (e.g. \"pneumonia\") are the columns.\n <setname> can be 'train', 'valid', or 'test'.\"\"\"\n assert label_type_ld == 'disease_new'\n labels_file = './load_dataset/fakedata/2019-12-18_duke_disease/img'+setname+'_BinaryLabels.csv'\n return pd.read_csv(labels_file, header=0, index_col = 0)\n " ]
[ [ "pandas.read_csv", "torch.cuda.manual_seed", "numpy.random.seed", "torch.manual_seed", "torch.from_numpy", "torch.cuda.manual_seed_all", "numpy.array" ] ]
bencevans/CameraTraps
[ "7e86a3e48602510e412bcaccdc9259388e9863b5" ]
[ "api/batch_processing/postprocessing/find_repeat_detections.py" ]
[ "########\n#\n# find_repeat_detections.py\n#\n# Looks through a sequence of detections in the API output json file, and finds candidates\n# that might be \"repeated false positives\", i.e. that random branch that the detector\n# thinks is an animal.\n#\n# Writes out a new .json file where \"suspicious\" detections have had their\n# probabilities multiplied by -1. Optionally (and slowly) also writes an html\n# result set so you can examine what was deemed \"suspicious\"\n#\n# Currently the unit within which images are compared is a *directory*.\n#\n########\n\n# %% Imports and environment\n\nimport argparse\nimport os\nimport sys\nimport warnings\nfrom datetime import datetime\nfrom itertools import compress\n\nimport jsonpickle\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\n\n# from ai4eutils; this is assumed to be on the path, as per repo convention\nimport write_html_image_list\nimport path_utils\n\nfrom api.batch_processing.postprocessing.load_api_results import load_api_results, write_api_results\nimport ct_utils\nfrom visualization.visualization_utils import open_image, render_detection_bounding_boxes\n\n# Imports I'm not using but use when I tinker with parallelization\n#\n# from multiprocessing import Pool\n# from multiprocessing.pool import ThreadPool\n# import multiprocessing\n# import joblib\n\n# ignoring all \"PIL cannot read EXIF metainfo for the images\" warnings\nwarnings.filterwarnings('ignore', '(Possibly )?corrupt EXIF data', UserWarning)\n# Metadata Warning, tag 256 had too many entries: 42, expected 1\nwarnings.filterwarnings('ignore', 'Metadata warning', UserWarning)\n\n\n##%% Classes\n\nclass RepeatDetectionOptions:\n # inputFlename = r'D:\\temp\\tigers_20190308_all_output.csv'\n\n # Relevant for rendering HTML or filtering folder of images\n imageBase = ''\n outputBase = ''\n\n # Don't consider detections with confidence lower than this as suspicious\n confidenceMin = 0.849\n\n # Don't consider detections with confidence higher than this as suspicious\n confidenceMax = 1.0\n\n # What's the IOU threshold for considering two boxes the same?\n iouThreshold = 0.9\n\n # How many occurrences of a single location (as defined by the IOU threshold)\n # are required before we declare it suspicious?\n occurrenceThreshold = 15\n\n # Ignore \"suspicious\" detections larger than some size; these are often animals\n # taking up the whole image. This is expressed as a fraction of the image size.\n maxSuspiciousDetectionSize = 0.2\n\n # A list of classes we don't want to treat as suspicious. Each element is an int.\n excludeClasses = [] # [annotation_constants.bbox_category_name_to_id['person']]\n\n # Set to zero to disable parallelism\n nWorkers = 10 # joblib.cpu_count()\n\n viz_target_width = 800\n\n # Load detections from a filter file rather than finding them from the detector output\n\n # .json file containing detections, should be called detectionIndex.json in the filtering_* folder \n # produced in the first pass\n filterFileToLoad = ''\n\n # (optional) List of filenames remaining after deletion of identified \n # repeated detections that are actually animals. This should be a flat\n # text file, one relative filename per line. See enumerate_images().\n filteredFileListToLoad = None\n\n # Turn on/off optional outputs\n bRenderHtml = False\n bWriteFilteringFolder = True\n\n debugMaxDir = -1\n debugMaxRenderDir = -1\n debugMaxRenderDetection = -1\n debugMaxRenderInstance = -1\n bParallelizeComparisons = True\n bParallelizeRendering = True\n\n bPrintMissingImageWarnings = True\n missingImageWarningType = 'once' # 'all'\n\n # State variables\n pbar = None\n\n # Replace filename tokens after reading, useful when the directory structure\n # has changed relative to the structure the detector saw\n filenameReplacements = {}\n\n # How many folders up from the leaf nodes should we be going to aggregate images?\n nDirLevelsFromLeaf = 0\n\n\nclass RepeatDetectionResults:\n \"\"\"\n The results of an entire repeat detection analysis\n \"\"\"\n\n # The data table (Pandas DataFrame), as loaded from the input json file via \n # load_api_results()\n detectionResults = None\n\n # The other fields in the input json file, loaded via load_api_results()\n otherFields = None\n\n # The data table after modification\n detectionResultsFiltered = None\n\n # dict mapping folder names to whole rows from the data table\n rowsByDirectory = None\n\n # dict mapping filenames to rows in the master table\n filenameToRow = None\n\n # An array of length nDirs, where each element is a list of DetectionLocation \n # objects for that directory that have been flagged as suspicious\n suspiciousDetections = None\n\n masterHtmlFile = None\n\n filterFile = None\n\n\nclass IndexedDetection:\n\n def __init__(self, iDetection=-1, filename='', bbox=[], confidence=-1, category='unknown'):\n \"\"\"\n A single detection event on a single image\n\n Args:\n iDetection: order in API output file\n filename: path to the image of this detection\n bbox: [x_min, y_min, width_of_box, height_of_box]\n \"\"\"\n self.iDetection = iDetection\n self.filename = filename\n self.bbox = bbox\n self.confidence = confidence\n self.category = category\n\n def __repr__(self):\n s = ct_utils.pretty_print_object(self, False)\n return s\n\n\nclass DetectionLocation:\n \"\"\"\n A unique-ish detection location, meaningful in the context of one\n directory\n \"\"\"\n\n def __init__(self, instance, detection, relativeDir):\n self.instances = [instance] # list of IndexedDetections\n self.bbox = detection['bbox']\n self.relativeDir = relativeDir\n self.sampleImageRelativeFileName = ''\n\n def __repr__(self):\n s = ct_utils.pretty_print_object(self, False)\n return s\n \n def to_api_detection(self):\n \"\"\"\n Converts to a 'detection' dictionary, making the semi-arbitrary assumption that\n the first instance is representative of confidence.\n \"\"\"\n detection = {'conf':self.instances[0].confidence,'bbox':self.bbox,'category':self.instances[0].category}\n return detection\n\n\n##%% Helper functions\n\ndef enumerate_images(dirName,outputFileName=None):\n \"\"\"\n Non-recursively enumerates all image files in *dirName* to the text file \n *outputFileName*, as relative paths. This is used to produce a file list\n after removing true positives from the image directory.\n \n Not used directly in this module, but provides a consistent way to enumerate\n files in the format expected by this module.\n \"\"\"\n imageList = path_utils.find_images(dirName)\n imageList = [os.path.basename(fn) for fn in imageList]\n \n if outputFileName is not None:\n with open(outputFileName,'w') as f:\n for s in imageList:\n f.write(s + '\\n')\n \n return imageList\n \n\ndef render_bounding_box(detection, inputFileName, outputFileName, lineWidth):\n \n im = open_image(inputFileName)\n d = detection.to_api_detection()\n render_detection_bounding_boxes([d],im,thickness=lineWidth,confidence_threshold=-10)\n im.save(outputFileName)\n\n\n##%% Look for matches (one directory) (function)\n\ndef find_matches_in_directory(dirName, options, rowsByDirectory):\n \n if options.pbar is not None:\n options.pbar.update()\n\n # List of DetectionLocations\n candidateDetections = []\n\n rows = rowsByDirectory[dirName]\n\n # iDirectoryRow = 0; row = rows.iloc[iDirectoryRow]\n for iDirectoryRow, row in rows.iterrows():\n\n filename = row['file']\n if not ct_utils.is_image_file(filename):\n continue\n\n # Don't bother checking images with no detections above threshold\n maxP = float(row['max_detection_conf'])\n if maxP < options.confidenceMin:\n continue\n\n # Array of dict, where each element is\n # {\n # 'category': '1', # str value, category ID\n # 'conf': 0.926, # confidence of this detections\n # 'bbox': [x_min, y_min, width_of_box, height_of_box] # (x_min, y_min) is upper-left,\n # all in relative coordinates and length\n # }\n detections = row['detections']\n assert len(detections) > 0\n\n # For each detection in this image\n for iDetection, detection in enumerate(detections):\n \n assert 'category' in detection and 'conf' in detection and 'bbox' in detection\n\n confidence = detection['conf']\n \n assert confidence >= 0.0 and confidence <= 1.0\n if confidence < options.confidenceMin:\n continue\n if confidence > options.confidenceMax:\n continue\n\n # Optionally exclude some classes from consideration as suspicious\n if len(options.excludeClasses) > 0:\n iClass = int(detection['category'])\n if iClass in options.excludeClasses:\n continue\n\n bbox = detection['bbox']\n confidence = detection['conf']\n \n # Is this detection too big to be suspicious?\n w, h = bbox[2], bbox[3]\n area = h * w\n\n # These are relative coordinates\n assert area >= 0.0 and area <= 1.0\n\n if area > options.maxSuspiciousDetectionSize:\n # print('Ignoring very large detection with area {}'.format(area))\n continue\n\n category = detection['category']\n \n instance = IndexedDetection(iDetection=iDetection,\n filename=row['file'], bbox=bbox, \n confidence=confidence, category=category)\n\n bFoundSimilarDetection = False\n\n # For each detection in our candidate list\n for iCandidate, candidate in enumerate(candidateDetections):\n\n # Is this a match? \n iou = ct_utils.get_iou(bbox, candidate.bbox)\n\n if iou >= options.iouThreshold:\n \n bFoundSimilarDetection = True\n\n # If so, add this example to the list for this detection\n candidate.instances.append(instance)\n\n # We *don't* break here; we allow this instance to possibly\n # match multiple candidates. There isn't an obvious right or\n # wrong here.\n\n # ...for each detection on our candidate list\n\n # If we found no matches, add this to the candidate list\n if not bFoundSimilarDetection:\n candidate = DetectionLocation(instance, detection, dirName)\n candidateDetections.append(candidate)\n\n # ...for each detection\n\n # ...for each row\n\n return candidateDetections\n\n# ...def find_matches_in_directory(dirName)\n\n \n##%% Render problematic locations to html (function)\n\ndef render_images_for_directory(iDir, directoryHtmlFiles, suspiciousDetections, options):\n \n nDirs = len(directoryHtmlFiles)\n\n if options.pbar is not None:\n options.pbar.update()\n\n if options.debugMaxRenderDir > 0 and iDir > options.debugMaxRenderDir:\n return None\n\n dirName = 'dir{:0>4d}'.format(iDir)\n\n # suspiciousDetectionsThisDir is a list of DetectionLocation objects\n suspiciousDetectionsThisDir = suspiciousDetections[iDir]\n\n if len(suspiciousDetectionsThisDir) == 0:\n return None\n\n timeStr = datetime.now().strftime('%H:%M:%S')\n print('Processing directory {} of {} ({})'.format(iDir, nDirs, timeStr))\n\n dirBaseDir = os.path.join(options.outputBase, dirName)\n os.makedirs(dirBaseDir, exist_ok=True)\n\n directoryDetectionHtmlFiles = []\n directoryDetectionImageInfo = []\n\n # For each problematic detection in this directory\n #\n # iDetection = 0; detection = suspiciousDetectionsThisDir[iDetection];\n nDetections = len(suspiciousDetectionsThisDir)\n bPrintedMissingImageWarning = False\n\n # iDetection = 0; detection = suspiciousDetectionsThisDir[0]\n for iDetection, detection in enumerate(suspiciousDetectionsThisDir):\n\n if options.debugMaxRenderDetection > 0 and iDetection > options.debugMaxRenderDetection:\n break\n\n nInstances = len(detection.instances)\n print('Processing detection {} of {} ({} instances)'.format(\n iDetection, nDetections, nInstances))\n detectionName = 'detection{:0>4d}'.format(iDetection)\n detectionBaseDir = os.path.join(dirBaseDir, detectionName)\n os.makedirs(detectionBaseDir, exist_ok=True)\n\n # _ = pretty_print_object(detection)\n assert (nInstances >= options.occurrenceThreshold)\n\n imageInfo = []\n\n # Render images\n\n # iInstance = 0; instance = detection.instances[iInstance]\n for iInstance, instance in enumerate(detection.instances):\n\n if options.debugMaxRenderInstance >= 0 and iInstance >= options.debugMaxRenderInstance:\n break\n\n imageRelativeFilename = 'image{:0>4d}.jpg'.format(iInstance)\n imageOutputFilename = os.path.join(detectionBaseDir,\n imageRelativeFilename)\n thisImageInfo = {}\n thisImageInfo['filename'] = imageRelativeFilename\n confidence = instance.confidence\n confidenceStr = '{:.2f}'.format(confidence)\n t = confidenceStr + ' (' + instance.filename + ')'\n thisImageInfo['title'] = t\n imageInfo.append(thisImageInfo)\n\n inputFileName = os.path.join(options.imageBase, instance.filename)\n if not os.path.isfile(inputFileName):\n if options.bPrintMissingImageWarnings:\n if (options.missingImageWarningType == 'all') or (not bPrintedMissingImageWarning):\n print('Warning: could not find file {}'.format(inputFileName))\n bPrintedMissingImageWarning = True\n else:\n render_bounding_box(detection, inputFileName, imageOutputFilename, 15)\n\n # ...for each instance\n\n # Write html for this detection\n detectionHtmlFile = os.path.join(detectionBaseDir, 'index.html')\n\n htmlOptions = write_html_image_list.write_html_image_list()\n htmlOptions['defaultImageStyle'] = 'max-width:650px;'\n write_html_image_list.write_html_image_list(detectionHtmlFile, imageInfo, htmlOptions)\n\n thisDirectoryImageInfo = {}\n directoryDetectionHtmlFiles.append(detectionHtmlFile)\n\n # Use the first image from this detection (arbitrary) as the canonical example\n # that we'll render for the directory-level page.\n thisDirectoryImageInfo['filename'] = os.path.join(detectionName, imageInfo[0]['filename'])\n detectionHtmlFileRelative = os.path.relpath(detectionHtmlFile, dirBaseDir)\n title = '<a href=\"{}\">{}</a>'.format(detectionHtmlFileRelative, detectionName)\n thisDirectoryImageInfo['title'] = title\n directoryDetectionImageInfo.append(thisDirectoryImageInfo)\n\n # ...for each detection\n\n # Write the html file for this directory\n directoryHtmlFile = os.path.join(dirBaseDir, 'index.html')\n\n htmlOptions = write_html_image_list.write_html_image_list()\n htmlOptions['defaultImageStyle'] = 'max-width:650px;'\n write_html_image_list.write_html_image_list(directoryHtmlFile,\n directoryDetectionImageInfo,\n htmlOptions)\n\n return directoryHtmlFile\n\n# ...def render_images_for_directory(iDir)\n\n\n##%% Update the detection table based on suspicious results, write .csv output\n\ndef update_detection_table(RepeatDetectionResults, options, outputFilename=None):\n \n detectionResults = RepeatDetectionResults.detectionResults\n\n # An array of length nDirs, where each element is a list of DetectionLocation \n # objects for that directory that have been flagged as suspicious\n suspiciousDetectionsByDirectory = RepeatDetectionResults.suspiciousDetections\n\n nBboxChanges = 0\n\n print('Updating output table')\n\n # For each suspicious detection (two loops)\n for iDir, directoryEvents in enumerate(suspiciousDetectionsByDirectory):\n\n for iDetectionEvent, detectionEvent in enumerate(directoryEvents):\n\n locationBbox = detectionEvent.bbox\n\n for iInstance, instance in enumerate(detectionEvent.instances):\n\n instanceBbox = instance.bbox\n\n # This should match the bbox for the detection event\n iou = ct_utils.get_iou(instanceBbox, locationBbox)\n # There are instances where iou is very close to the threshold so cannot use >\n assert iou >= options.iouThreshold\n\n assert instance.filename in RepeatDetectionResults.filenameToRow\n iRow = RepeatDetectionResults.filenameToRow[instance.filename]\n row = detectionResults.iloc[iRow]\n rowDetections = row['detections']\n detectionToModify = rowDetections[instance.iDetection]\n\n # Make sure the bounding box matches\n assert (instanceBbox[0:3] == detectionToModify['bbox'][0:3])\n\n # Make the probability negative, if it hasn't been switched by\n # another bounding box\n if detectionToModify['conf'] >= 0:\n detectionToModify['conf'] = -1 * detectionToModify['conf']\n nBboxChanges += 1\n\n # ...for each instance\n\n # ...for each detection\n\n # ...for each directory \n\n # Update maximum probabilities\n\n # For each row...\n nProbChanges = 0\n nProbChangesToNegative = 0\n nProbChangesAcrossThreshold = 0\n\n for iRow, row in detectionResults.iterrows():\n\n detections = row['detections']\n if len(detections) == 0:\n continue\n\n maxPOriginal = float(row['max_detection_conf'])\n assert maxPOriginal >= 0\n\n maxP = None\n nNegative = 0\n\n for iDetection, detection in enumerate(detections):\n p = detection['conf']\n\n if p < 0:\n nNegative += 1\n\n if (maxP is None) or (p > maxP):\n maxP = p\n \n if abs(maxP - maxPOriginal) > 1e-3:\n\n # We should only be making detections *less* likely\n assert maxP < maxPOriginal\n # row['max_confidence'] = str(maxP)\n detectionResults.at[iRow, 'max_detection_conf'] = maxP\n\n nProbChanges += 1\n\n if maxP < 0:\n nProbChangesToNegative += 1\n\n if maxPOriginal >= options.confidenceMin and maxP < options.confidenceMin:\n nProbChangesAcrossThreshold += 1\n\n # Negative probabilities should be the only reason maxP changed, so\n # we should have found at least one negative value\n assert nNegative > 0\n\n # ...if there was a meaningful change to the max probability for this row\n\n # ...for each row\n\n if outputFilename is not None:\n write_api_results(detectionResults, RepeatDetectionResults.otherFields, outputFilename)\n\n print(\n 'Finished updating detection table\\nChanged {} detections that impacted {} maxPs ({} to negative) ({} across confidence threshold)'.format(\n nBboxChanges, nProbChanges, nProbChangesToNegative, nProbChangesAcrossThreshold))\n\n return detectionResults\n\n\n# ...def update_detection_table(RepeatDetectionResults,options)\n\n\n##%% Main function\n\ndef find_repeat_detections(inputFilename, outputFilename, options=None):\n \n ##%% Input handling\n\n if options is None:\n options = RepeatDetectionOptions()\n\n toReturn = RepeatDetectionResults()\n\n\n ##%% Load file\n\n detectionResults, otherFields = load_api_results(inputFilename, normalize_paths=True,\n filename_replacements=options.filenameReplacements)\n toReturn.detectionResults = detectionResults\n toReturn.otherFields = otherFields\n\n\n ##%% Separate files into directories\n\n # This will be a map from a directory name to smaller data frames\n rowsByDirectory = {}\n\n # This is a mapping back into the rows of the original table\n filenameToRow = {}\n\n # TODO: in the case where we're loading an existing set of FPs after manual filtering,\n # we should load these data frames too, rather than re-building them from the input.\n\n print('Separating files into directories...')\n\n # iRow = 0; row = detectionResults.iloc[0]\n for iRow, row in detectionResults.iterrows():\n relativePath = row['file']\n dirName = os.path.dirname(relativePath)\n \n if len(dirName) == 0:\n assert options.nDirLevelsFromLeaf == 0, 'Can''t use the dirLevelsFromLeaf option with flat filenames'\n else:\n if options.nDirLevelsFromLeaf > 0:\n iLevel = 0\n while (iLevel < options.nDirLevelsFromLeaf):\n iLevel += 1\n dirName = os.path.dirname(dirName)\n assert len(dirName) > 0\n\n if not dirName in rowsByDirectory:\n # Create a new DataFrame with just this row\n # rowsByDirectory[dirName] = pd.DataFrame(row)\n rowsByDirectory[dirName] = []\n\n rowsByDirectory[dirName].append(row)\n\n assert relativePath not in filenameToRow\n filenameToRow[relativePath] = iRow\n\n # Convert lists of rows to proper DataFrames\n dirs = list(rowsByDirectory.keys())\n for d in dirs:\n rowsByDirectory[d] = pd.DataFrame(rowsByDirectory[d])\n\n toReturn.rowsByDirectory = rowsByDirectory\n toReturn.filenameToRow = filenameToRow\n\n print('Finished separating {} files into {} directories'.format(len(detectionResults),\n len(rowsByDirectory)))\n\n\n ##% Look for matches (or load them from file)\n\n dirsToSearch = list(rowsByDirectory.keys())\n if options.debugMaxDir > 0:\n dirsToSearch = dirsToSearch[0:options.debugMaxDir]\n\n # length-nDirs list of lists of DetectionLocation objects\n suspiciousDetections = [None] * len(dirsToSearch)\n\n # Are we actually looking for matches, or just loading from a file?\n if len(options.filterFileToLoad) == 0:\n\n # We're actually looking for matches...\n print('Finding similar detections...')\n\n allCandidateDetections = [None] * len(dirsToSearch)\n\n if not options.bParallelizeComparisons:\n\n options.pbar = None\n # iDir = 0; dirName = dirsToSearch[iDir]\n for iDir, dirName in enumerate(tqdm(dirsToSearch)):\n allCandidateDetections[iDir] = find_matches_in_directory(dirName, options, rowsByDirectory)\n\n else:\n\n options.pbar = tqdm(total=len(dirsToSearch))\n allCandidateDetections = Parallel(n_jobs=options.nWorkers, prefer='threads')(\n delayed(find_matches_in_directory)(dirName, options, rowsByDirectory) for dirName in tqdm(dirsToSearch))\n\n print('\\nFinished looking for similar bounding boxes')\n\n ##%% Find suspicious locations based on match results\n\n print('Filtering out repeat detections...')\n\n nImagesWithSuspiciousDetections = 0\n nSuspiciousDetections = 0\n\n # For each directory\n #\n # iDir = 51\n for iDir in range(len(dirsToSearch)):\n\n # A list of DetectionLocation objects\n suspiciousDetectionsThisDir = []\n\n # A list of DetectionLocation objects\n candidateDetectionsThisDir = allCandidateDetections[iDir]\n\n for iLocation, candidateLocation in enumerate(candidateDetectionsThisDir):\n\n # occurrenceList is a list of file/detection pairs\n nOccurrences = len(candidateLocation.instances)\n\n if nOccurrences < options.occurrenceThreshold:\n continue\n\n nImagesWithSuspiciousDetections += nOccurrences\n nSuspiciousDetections += 1\n\n suspiciousDetectionsThisDir.append(candidateLocation)\n # Find the images corresponding to this bounding box, render boxes\n\n suspiciousDetections[iDir] = suspiciousDetectionsThisDir\n\n print(\n 'Finished searching for repeat detections\\nFound {} unique detections on {} images that are suspicious'.format(\n nSuspiciousDetections, nImagesWithSuspiciousDetections))\n\n else:\n\n print('Bypassing detection-finding, loading from {}'.format(options.filterFileToLoad))\n\n # Load the filtering file\n detectionIndexFileName = options.filterFileToLoad\n sIn = open(detectionIndexFileName, 'r').read()\n suspiciousDetections = jsonpickle.decode(sIn)\n filteringBaseDir = os.path.dirname(options.filterFileToLoad)\n assert len(suspiciousDetections) == len(dirsToSearch)\n\n nDetectionsRemoved = 0\n nDetectionsLoaded = 0\n\n # We're skipping detection-finding, but to see which images are actually legit false\n # positives, we may be looking for physical files or loading from a text file. \n fileList = None\n if options.filteredFileListToLoad is not None:\n with open(options.filteredFileListToLoad) as f:\n fileList = f.readlines()\n fileList = [x.strip() for x in fileList]\n nSuspiciousDetections = sum([len(x) for x in suspiciousDetections])\n print('Loaded false positive list from file, will remove {} of {} suspicious detections'.format(\n len(fileList), nSuspiciousDetections))\n\n # For each directory\n # iDir = 0; detections = suspiciousDetections[0]\n for iDir, detections in enumerate(suspiciousDetections):\n\n bValidDetection = [True] * len(detections)\n nDetectionsLoaded += len(detections)\n\n # For each detection that was present before filtering\n # iDetection = 0; detection = detections[iDetection]\n for iDetection, detection in enumerate(detections):\n\n # Are we checking the directory to see whether detections were actually false positives,\n # or reading from a list?\n if fileList is None:\n \n # Is the image still there? \n imageFullPath = os.path.join(filteringBaseDir, detection.sampleImageRelativeFileName)\n\n # If not, remove this from the list of suspicious detections\n if not os.path.isfile(imageFullPath):\n nDetectionsRemoved += 1\n bValidDetection[iDetection] = False\n\n else:\n \n if detection.sampleImageRelativeFileName not in fileList:\n nDetectionsRemoved += 1\n bValidDetection[iDetection] = False\n\n # ...for each detection\n\n nRemovedThisDir = len(bValidDetection) - sum(bValidDetection)\n if nRemovedThisDir > 0:\n print('Removed {} of {} detections from directory {}'.format(nRemovedThisDir,\n len(detections), iDir))\n\n detectionsFiltered = list(compress(detections, bValidDetection))\n suspiciousDetections[iDir] = detectionsFiltered\n\n # ...for each directory\n\n print('Removed {} of {} total detections via manual filtering'.format(nDetectionsRemoved, nDetectionsLoaded))\n\n # ...if we are/aren't finding detections (vs. loading from file)\n\n toReturn.suspiciousDetections = suspiciousDetections\n\n if options.bRenderHtml:\n\n # Render problematic locations with html (loop)\n\n print('Rendering html')\n\n nDirs = len(dirsToSearch)\n directoryHtmlFiles = [None] * nDirs\n\n if options.bParallelizeRendering:\n\n # options.pbar = tqdm(total=nDirs)\n options.pbar = None\n\n directoryHtmlFiles = Parallel(n_jobs=options.nWorkers, prefer='threads')(delayed(\n render_images_for_directory)(iDir, directoryHtmlFiles, suspiciousDetections, options) for iDir in\n tqdm(range(nDirs)))\n\n else:\n\n options.pbar = None\n\n # For each directory\n # iDir = 51\n for iDir in range(nDirs):\n # Add this directory to the master list of html files\n directoryHtmlFiles[iDir] = render_images_for_directory(iDir, directoryHtmlFiles, suspiciousDetections,\n options)\n\n # ...for each directory\n\n # Write master html file\n\n masterHtmlFile = os.path.join(options.outputBase, 'index.html')\n os.makedirs(options.outputBase, exist_ok=True)\n toReturn.masterHtmlFile = masterHtmlFile\n\n with open(masterHtmlFile, 'w') as fHtml:\n\n fHtml.write('<html><body>\\n')\n fHtml.write('<h2><b>Repeat detections by directory</b></h2></br>\\n')\n\n for iDir, dirHtmlFile in enumerate(directoryHtmlFiles):\n\n if dirHtmlFile is None:\n continue\n\n relPath = os.path.relpath(dirHtmlFile, options.outputBase)\n dirName = dirsToSearch[iDir]\n\n # Remove unicode characters before formatting\n relPath = relPath.encode('ascii', 'ignore').decode('ascii')\n dirName = dirName.encode('ascii', 'ignore').decode('ascii')\n\n fHtml.write('<a href={}>{}</a><br/>\\n'.format(relPath, dirName))\n\n fHtml.write('</body></html>\\n')\n\n # ...if we're rendering html\n\n toReturn.allRowsFiltered = update_detection_table(toReturn, options, outputFilename)\n\n # Create filtering directory\n if options.bWriteFilteringFolder:\n\n print('Creating filtering folder...')\n\n dateString = datetime.now().strftime('%Y.%m.%d.%H.%M.%S')\n filteringDir = os.path.join(options.outputBase, 'filtering_' + dateString)\n os.makedirs(filteringDir, exist_ok=True)\n\n # iDir = 0; suspiciousDetectionsThisDir = suspiciousDetections[iDir]\n for iDir, suspiciousDetectionsThisDir in enumerate(tqdm(suspiciousDetections)):\n\n # suspiciousDetectionsThisDir is a list of DetectionLocation objects\n # iDetection = 0; detection = suspiciousDetectionsThisDir[0]\n for iDetection, detection in enumerate(suspiciousDetectionsThisDir):\n \n instance = detection.instances[0]\n relativePath = instance.filename\n inputFullPath = os.path.join(options.imageBase, relativePath)\n assert (os.path.isfile(inputFullPath)), 'Not a file: {}'.format(inputFullPath)\n outputRelativePath = 'dir{:0>4d}_det{:0>4d}.jpg'.format(iDir, iDetection)\n outputFullPath = os.path.join(filteringDir, outputRelativePath)\n render_bounding_box(detection, inputFullPath, outputFullPath, 15)\n detection.sampleImageRelativeFileName = outputRelativePath\n\n # Write out the detection index\n detectionIndexFileName = os.path.join(filteringDir, 'detectionIndex.json')\n jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)\n s = jsonpickle.encode(suspiciousDetections)\n with open(detectionIndexFileName, 'w') as f:\n f.write(s)\n toReturn.filterFile = detectionIndexFileName\n\n print('Done')\n\n # ...if we're writing filtering info\n\n return toReturn\n\n\n# ...find_repeat_detections()\n\n\n#%% Interactive driver\n\nif False:\n #%%\n\n baseDir = '/Users/siyuyang/Source/temp_data/CameraTrap/test_repeat_detection'\n\n options = RepeatDetectionOptions()\n options.bRenderHtml = True\n options.imageBase = baseDir\n options.outputBase = os.path.join(baseDir, 'repeat_detections')\n options.filenameReplacements = {} # {'20190430cameratraps\\\\':''}\n\n options.confidenceMin = 0.85\n options.confidenceMax = 1.01 # 0.99\n options.iouThreshold = 0.93 # 0.95\n options.occurrenceThreshold = 8 # 10\n options.maxSuspiciousDetectionSize = 0.2\n\n options.filterFileToLoad = ''\n options.filterFileToLoad = os.path.join(baseDir,\n r'repeatDetections\\filtering_2019.05.16.18.43.01\\detectionIndex.json')\n\n options.debugMaxDir = -1\n options.debugMaxRenderDir = -1\n options.debugMaxRenderDetection = -1\n options.debugMaxRenderInstance = -1\n\n options.bParallelizeComparisons = False # True\n options.bParallelizeRendering = False # True\n options.excludeClasses = [2]\n\n # inputFilename = os.path.join(baseDir, '5570_blah_detections.json')\n # outputFilename = mpt.insert_before_extension(inputFilename,\n # 'filtered')\n inputFilename = os.path.join(baseDir, 'detections_kitfox_20190620_short.json')\n outputFilename = os.path.join(baseDir, 'detections_kitfox_20190620_short_filter.json')\n\n results = find_repeat_detections(inputFilename, outputFilename, options)\n\n\n#%% Command-line driver\n\ndef main():\n # With HTML (debug)\n # python find_repeat_detections.py \"D:\\temp\\tigers_20190308_all_output.json\" \"D:\\temp\\tigers_20190308_all_output.filtered.json\" --renderHtml --debugMaxDir 100 --imageBase \"d:\\wildlife_data\\tigerblobs\" --outputBase \"d:\\temp\\repeatDetections\"\n\n # Without HTML (debug)\n # python find_repeat_detections.py \"D:\\temp\\tigers_20190308_all_output.json\" \"D:\\temp\\tigers_20190308_all_output.filtered.json\" --debugMaxDir 100 --imageBase \"d:\\wildlife_data\\tigerblobs\" --outputBase \"d:\\temp\\repeatDetections\"\n\n # With HTML (for real)\n # python find_repeat_detections.py \"D:\\temp\\tigers_20190308_all_output.json\" \"D:\\temp\\tigers_20190308_all_output.filtered.json\" --renderHtml --imageBase \"d:\\wildlife_data\\tigerblobs\" --outputBase \"d:\\temp\\repeatDetections\"\n\n defaultOptions = RepeatDetectionOptions()\n\n parser = argparse.ArgumentParser()\n parser.add_argument('inputFile')\n parser.add_argument('outputFile')\n parser.add_argument('--imageBase', action='store', type=str, default='',\n help='Image base dir, relevant if renderHtml is True or if omitFilteringFolder is not set')\n parser.add_argument('--outputBase', action='store', type=str, default='',\n help='Html or filtering folder output dir')\n parser.add_argument('--filterFileToLoad', action='store', type=str, default='', # checks for string length so default needs to be the empty string\n help='Path to detectionIndex.json, which should be inside a folder of images that are manually verified to _not_ contain valid animals')\n\n parser.add_argument('--confidenceMax', action='store', type=float,\n default=defaultOptions.confidenceMax,\n help='Detection confidence threshold; don\\'t process anything above this')\n parser.add_argument('--confidenceMin', action='store', type=float,\n default=defaultOptions.confidenceMin,\n help='Detection confidence threshold; don\\'t process anything below this')\n parser.add_argument('--iouThreshold', action='store', type=float,\n default=defaultOptions.iouThreshold,\n help='Detections with IOUs greater than this are considered \"the same detection\"')\n parser.add_argument('--occurrenceThreshold', action='store', type=int,\n default=defaultOptions.occurrenceThreshold,\n help='More than this many near-identical detections in a group (e.g. a folder) is considered suspicious')\n parser.add_argument('--nWorkers', action='store', type=int,\n default=defaultOptions.nWorkers,\n help='Level of parallelism for rendering and IOU computation')\n parser.add_argument('--maxSuspiciousDetectionSize', action='store', type=float,\n default=defaultOptions.maxSuspiciousDetectionSize,\n help='Detections larger than this fraction of image area are not considered suspicious')\n\n parser.add_argument('--renderHtml', action='store_true',\n dest='bRenderHtml', help='Should we render HTML output?')\n parser.add_argument('--omitFilteringFolder', action='store_false',\n dest='bWriteFilteringFolder',\n help='Should we create a folder of rendered detections for post-filtering?')\n parser.add_argument('--excludeClasses', action='store', nargs='+', type=int,\n default=defaultOptions.excludeClasses,\n help='List of classes (ints) to exclude from analysis, separated by spaces')\n\n parser.add_argument('--debugMaxDir', action='store', type=int, default=-1)\n parser.add_argument('--debugMaxRenderDir', action='store', type=int, default=-1)\n parser.add_argument('--debugMaxRenderDetection', action='store', type=int, default=-1)\n parser.add_argument('--debugMaxRenderInstance', action='store', type=int, default=-1)\n\n parser.add_argument('--forceSerialComparisons', action='store_false',\n dest='bParallelizeComparisons')\n parser.add_argument('--forceSerialRendering', action='store_false',\n dest='bParallelizeRendering')\n\n if len(sys.argv[1:]) == 0:\n parser.print_help()\n parser.exit()\n\n args = parser.parse_args()\n\n # Convert to an options object\n options = RepeatDetectionOptions()\n\n ct_utils.args_to_object(args, options)\n\n find_repeat_detections(args.inputFile, args.outputFile, options)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
shangtse/cleverhans
[ "c8e224dd6105aa8d664ed06f59b795f3609d3dcb" ]
[ "cleverhans/loss.py" ]
[ "\"\"\"Loss functions for training models.\"\"\"\nimport json\nimport os\nimport warnings\n\nimport tensorflow as tf\n\nfrom cleverhans.compat import softmax_cross_entropy_with_logits\nfrom cleverhans.model import Model\nfrom cleverhans.utils import safe_zip\n\n\nclass Loss(object):\n \"\"\"\n An abstract interface for loss wrappers that allows flexible control of\n real examples, adversarial examples and labels. These losses are used\n for defenses (during model training).\n \"\"\"\n\n def __init__(self, model, hparams=None, attack=None):\n \"\"\"\n :param model: Model instance, the model on which to apply the loss.\n :param hparams: dict, hyper-parameters for the loss.\n :param attack: callable, the attack function for adv. training.\n \"\"\"\n assert isinstance(model, Model)\n assert attack is None or callable(attack)\n self.model = model\n self.hparams = hparams\n self.attack = attack\n\n def save(self, path):\n \"\"\"Save loss in json format\n \"\"\"\n json.dump(dict(loss=self.__class__.__name__,\n params=self.hparams),\n open(os.path.join(path, 'loss.json'), 'wb'))\n\n def fprop(self, x, y):\n \"\"\"Forward propagate the loss.\n Loss should be a scalar value, independent of batch size (i.e. use\n reduce_mean over batch axis, don't use reduce_sum or return a tensor).\n Scalar losses are easier to add together, e.g. through `WeightedSum`.\n Mean losses are easier to redistribute across multiple replicas without\n needing to change learning rates, etc.\n :param x: tensor, a batch of inputs.\n :param y: tensor, a batch of outputs (1-hot labels typically).\n \"\"\"\n raise NotImplementedError\n\n\nclass WeightedSum(Loss):\n \"\"\"\n A Loss that adds up a weighted sum of other losses.\n \"\"\"\n\n def __init__(self, model, terms):\n self.terms = terms\n\n Loss.__init__(self, model, locals())\n\n def fprop(self, x, y, **kwargs):\n weights, loss_objects = safe_zip(*self.terms)\n for weight in weights:\n if isinstance(weight, float):\n continue\n if hasattr(weight, 'ndim'):\n assert weight.ndim == 0\n continue\n raise TypeError(\"weight of %s is not a type that this function \"\n \"knows it can accept yet\" % str(weight))\n losses = [loss.fprop(x, y, **kwargs) for loss in loss_objects]\n for loss, loss_object in safe_zip(losses, loss_objects):\n if len(loss.get_shape()) > 0:\n raise ValueError(\"%s.fprop returned a non-scalar value\" %\n str(loss_object))\n terms = [weight * loss for weight, loss in safe_zip(weights, losses)]\n\n return tf.add_n(terms)\n\n\nclass CrossEntropy(Loss):\n \"\"\"Cross-entropy loss for a multiclass softmax classifier.\n :param model: Model instance, the model on which to apply the loss.\n :param smoothing: float, amount of label smoothing for cross-entropy.\n :param attack: function, given an input x, return an attacked x'.\n \"\"\"\n def __init__(self, model, smoothing=0., attack=None, **kwargs):\n if smoothing < 0 or smoothing > 1:\n raise ValueError('Smoothing must be in [0, 1]', smoothing)\n self.kwargs = kwargs\n Loss.__init__(self, model, locals(), attack)\n self.smoothing = smoothing\n\n def fprop(self, x, y, **kwargs):\n kwargs.update(self.kwargs)\n if self.attack is not None:\n x = x, self.attack(x)\n else:\n x = x,\n\n # Catching RuntimeError: Variable -= value not supported by tf.eager.\n try:\n y -= self.smoothing * (y - 1. / tf.cast(y.shape[-1], y.dtype))\n except RuntimeError:\n y.assign_sub(self.smoothing * (y - 1. / tf.cast(y.shape[-1],\n y.dtype)))\n\n logits = [self.model.get_logits(x, **kwargs) for x in x]\n loss = sum(\n tf.reduce_mean(softmax_cross_entropy_with_logits(labels=y,\n logits=logit))\n for logit in logits)\n return loss\n\n\nclass MixUp(Loss):\n \"\"\"Mixup ( https://arxiv.org/abs/1710.09412 )\n :param model: Model instance, the model on which to apply the loss.\n :param beta: float, beta distribution parameter for MixUp.\n \"\"\"\n def __init__(self, model, beta, **kwargs):\n del kwargs\n Loss.__init__(self, model, locals())\n self.beta = beta\n\n def fprop(self, x, y, **kwargs):\n with tf.device('/CPU:0'):\n # Prevent error complaining GPU kernels unavailable for this.\n mix = tf.distributions.Beta(self.beta, self.beta)\n mix = mix.sample([tf.shape(x)[0]] + [1] * (len(x.shape) - 1))\n mix = tf.maximum(mix, 1 - mix)\n mix_label = tf.reshape(mix, [-1, 1])\n xm = x + mix * (x[::-1] - x)\n ym = y + mix_label * (y[::-1] - y)\n logits = self.model.get_logits(xm, **kwargs)\n loss = tf.reduce_mean(softmax_cross_entropy_with_logits(labels=ym,\n logits=logits))\n return loss\n\n\nclass FeaturePairing(Loss):\n \"\"\"Feature pairing loss.\n :param model: Model instance, the model on which to apply the loss.\n :param weight: float, with of logic pairing loss.\n :param attack: function, given an input x, return an attacked x'.\n \"\"\"\n\n def __init__(self, model, weight, attack, **kwargs):\n del kwargs\n Loss.__init__(self, model, locals(), attack)\n self.weight = weight\n\n def fprop(self, x, y, **kwargs):\n x_adv = self.attack(x)\n d1 = self.model.fprop(x, **kwargs)\n d2 = self.model.fprop(x_adv, **kwargs)\n pairing_loss = [tf.reduce_mean(tf.square(a - b))\n for a, b in\n zip(d1[Model.O_FEATURES], d2[Model.O_FEATURES])]\n pairing_loss = tf.reduce_mean(pairing_loss)\n loss = tf.reduce_mean(softmax_cross_entropy_with_logits(\n labels=y, logits=d1[Model.O_LOGITS]))\n loss += tf.reduce_mean(softmax_cross_entropy_with_logits(\n labels=y, logits=d2[Model.O_LOGITS]))\n return loss + self.weight * pairing_loss\n\n\nclass WeightDecay(Loss):\n \"\"\"Weight decay\"\"\"\n def fprop(self, x, y, **kwargs):\n terms = [tf.nn.l2_loss(param)\n for param in self.model.get_params()\n if len(param.get_shape()) > 1]\n out = tf.add_n(terms)\n assert len(out.get_shape()) == 0\n return out\n\n\ndef attack_softmax_cross_entropy(y, probs, mean=True):\n \"\"\"\n Define target loss for an Attack.\n :param y: 2D tensor, one hot labels.\n :param probs: 2D tensor, probability distribution output from the model.\n :param mean: bool, reduce mean loss when true.\n :return: return mean of loss if True, otherwise return vector with per\n sample loss\n \"\"\"\n logits = probs.op.inputs[0] if probs.op.type == 'Softmax' else probs\n out = softmax_cross_entropy_with_logits(logits=logits, labels=y)\n return tf.reduce_mean(out) if mean else out\n\n\nclass LossCrossEntropy(Loss):\n \"\"\"\n Deprecated version of `CrossEntropy` that returns per-example loss rather\n than mean loss.\n \"\"\"\n\n def __init__(self, model, smoothing=0., attack=None, **kwargs):\n \"\"\"Constructor.\n :param model: Model instance, the model on which to apply the loss.\n :param smoothing: float, amount of label smoothing for cross-entropy.\n :param attack: function, given an input x, return an attacked x'.\n \"\"\"\n if smoothing < 0 or smoothing > 1:\n raise ValueError('Smoothing must be in [0, 1]', smoothing)\n del kwargs\n Loss.__init__(self, model, locals(), attack)\n self.smoothing = smoothing\n\n def fprop(self, x, y, **kwargs):\n if self.attack is not None:\n x = x, self.attack(x)\n else:\n x = x,\n\n # Catching RuntimeError: Variable -= value not supported by tf.eager.\n try:\n y -= self.smoothing * (y - 1. / tf.cast(y.shape[-1], tf.float32))\n except RuntimeError:\n y.assign_sub(self.smoothing * (y - 1. / tf.cast(y.shape[-1],\n tf.float32)))\n\n logits = [self.model.get_logits(x, **kwargs) for x in x]\n loss = sum(\n softmax_cross_entropy_with_logits(labels=y,\n logits=logit)\n for logit in logits)\n warnings.warn(\"LossCrossEntropy is deprecated, switch to \"\n \"CrossEntropy. LossCrossEntropy may be removed on \"\n \"or after 2019-03-06.\")\n return loss\n\n\nclass LossFeaturePairing(Loss):\n \"\"\"Deprecated version of `FeaturePairing` that returns per-example loss\n rather than mean loss.\"\"\"\n\n def __init__(self, model, weight, attack, **kwargs):\n \"\"\"Constructor.\n :param model: Model instance, the model on which to apply the loss.\n :param weight: float, with of logic pairing loss.\n :param attack: function, given an input x, return an attacked x'.\n \"\"\"\n del kwargs\n Loss.__init__(self, model, locals(), attack)\n self.weight = weight\n\n def fprop(self, x, y, **kwargs):\n x_adv = self.attack(x)\n d1 = self.model.fprop(x, **kwargs)\n d2 = self.model.fprop(x_adv, **kwargs)\n pairing_loss = [tf.reduce_mean(tf.square(a - b))\n for a, b in\n zip(d1[Model.O_FEATURES], d2[Model.O_FEATURES])]\n pairing_loss = tf.reduce_mean(pairing_loss)\n loss = softmax_cross_entropy_with_logits(\n labels=y, logits=d1[Model.O_LOGITS])\n loss += softmax_cross_entropy_with_logits(\n labels=y, logits=d2[Model.O_LOGITS])\n warnings.warn(\"LossFeaturePairing is deprecated, switch to \"\n \"FeaturePairing. LossFeaturePairing may be removed \"\n \"on or after 2019-03-06.\")\n return loss + self.weight * pairing_loss\n\n\nclass LossMixUp(Loss):\n \"\"\"Deprecated version of `MixUp` that returns per-example loss\n rather than mean loss.\"\"\"\n\n def __init__(self, model, beta, **kwargs):\n \"\"\"Constructor.\n :param model: Model instance, the model on which to apply the loss.\n :param beta: float, beta distribution parameter for MixUp.\n \"\"\"\n del kwargs\n Loss.__init__(self, model, locals())\n self.beta = beta\n\n def fprop(self, x, y, **kwargs):\n mix = tf.distributions.Beta(self.beta, self.beta)\n mix = mix.sample([tf.shape(x)[0]] + [1] * (len(x.shape) - 1))\n xm = x + mix * (x[::-1] - x)\n ym = y + mix * (y[::-1] - y)\n logits = self.model.get_logits(xm, **kwargs)\n loss = softmax_cross_entropy_with_logits(labels=ym, logits=logits)\n warnings.warn(\"LossMixUp is deprecated, switch to \"\n \"MixUp. LossFeaturePairing may be removed \"\n \"on or after 2019-03-06.\")\n return loss\n" ]
[ [ "tensorflow.device", "tensorflow.reduce_mean", "tensorflow.shape", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.cast", "tensorflow.nn.l2_loss", "tensorflow.square", "tensorflow.distributions.Beta", "tensorflow.add_n" ] ]
ezatterin/pySPM
[ "ae40bfb79678b3f10c85daadaa9cd5ce9e4e2e42" ]
[ "pySPM/ITA.py" ]
[ "# -- coding: utf-8 --\n\n# Copyright 2018 Olivier Scholder <[email protected]>\n\n\"\"\"\nThis module gives the ability to ready and parse the ToF-SIMS ITA files from iontof.\nYou can mainly retrieve images and spectra for each channel and scan.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport struct\nimport os.path\nimport zlib\nimport re\nimport copy\n\nfrom .ITM import ITM\nfrom .collection import Collection\nfrom .SPM import SPM_image\nfrom .Block import MissingBlock\nfrom .utils.misc import deprecated, aliased, alias, PB\nimport warnings\n\n@aliased\nclass ITA(ITM):\n def __init__(self, filename, *args, **kargs):\n \"\"\"\n Open an ITA file.\n \n Parameters\n ----------\n filename : string\n the path of the ita file\n \n Returns\n -------\n Class<ITA>\n ITA Object\n\n Examples\n --------\n >>> import pySPM\n >>> filename = \"myfile.ita\"\n >>> A = pySPM.ITA(filename)\n \"\"\"\n ITM.__init__(self, filename, *args, **kargs)\n try:\n self.sx = self.root.goto('filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans/Image.XSize').get_ulong()\n self.sy = self.root.goto('filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans/Image.YSize').get_ulong()\n except MissingBlock:\n self.sx = self.size['pixels']['x']\n self.sy = self.size['pixels']['y']\n try:\n # self.Nscan = int(self.root.goto('filterdata/TofCorrection/ImageStack/Reduced Data'\\\n # '/ImageStackScans/Image.NumberOfScans').getLong())\n self.Nimg = int(self.root.goto('filterdata/TofCorrection/ImageStack/Reduced Data'\n '/ImageStackScans/Image.NumberOfImages').get_ulong())\n except:\n self.Nimg = 0\n self.img = self.get_intensity()\n \n try:\n self.fov = self.root.goto('Meta/SI Image[0]/fieldofview').get_double()\n except MissingBlock:\n self.fov = self.get_value(\"Registration.Raster.FieldOfView\")['float']\n \n @alias(\"getIntensity\")\n def get_intensity(self):\n \"\"\"\n Retrieve the total Ion image\n \"\"\"\n try:\n X, Y = self.size['pixels']['x'], self.size['pixels']['y']\n img = self.image(np.flipud(np.array(self.root.goto('Meta/SI Image/intensdata').get_data(\"f\"), dtype=np.float32).reshape((Y, X))), channel=\"SI count\")\n except Exception as e:\n try:\n img = self.get_added_image(0).pixels\n except:\n try:\n img = self.get_added_image_by_SN(self.get_channel_SN(\"total\"))\n except:\n import warnings\n warn.warnings(\"SI image cannot be retrieved\")\n return None\n return img\n \n def get_channel_SN(self, channel):\n \"\"\"\n New ITA fileformat assign a serial number (SN) in the form of a UUID for each channel.\n The SN corresponding to a given channel name can be retrieved by this function.\n\n Parameters\n ----------\n channel : string\n The channel name assigned to a given peak\n \"\"\"\n for x in self.root.goto(\"MassIntervalList\"):\n if x.name == 'mi':\n l = x.dict_list()\n if l['assign']['utf16'] == channel or l['desc']['utf16'] == channel:\n return l['SN']['utf16']\n\n raise Exception(\"Channel name \\\"{channel}\\\" not found\".format(channel=channel))\n \n @alias(\"getChannelsByName\")\n def get_channels_by_name(self, name, strict=False):\n \"\"\"\n Retrieve the channels for a given assignment name in the form of a list of dictionaries.\n The output can be formatted in a human readable way with the pySPM.ITA.showChannels function (see examples).\n\n Parameters\n ----------\n name : string or list of strings\n A regular expression (regex) used for the search\n strict : bool\n If strict is True, the search name won't be treated as a regexp, but rather the whole name should match.\n\n Returns\n -------\n list\n A list of dictionaries where each dictionary is a description of the selected channel. Which contains:\n - clsid : class ID. A useless information for the end-user\n - desc : a description string encoded in utf16.\n - color : a 32bits color encoding of the peak\n - symbolID : Not used\n - id : The ID of the channel. (The total counts is 0, the sum\n of the rest 1 and the first peak is 2, ... )\n - SN : an utf16 serial number which is useless for the end-used\n - assign : a utf16 string with the element assignment of the\n peak (e.g.: CH-, Na+, ...)\n - lmass : a long value indicating the lower mass of the peak (in u)\n - umass : a long value indicating the upper mass of the peak (in u)\n - cmass : a long value indicating the center mass of the peak\n \n Examples\n --------\n >>> A = pySPM.ITA(\"myfile.ita\")\n >>> ch = A.getChannelsByName(\"C\")\n >>> A.showChannels(ch)\n \tCH- (), mass: 12.99 - 13.03\n C_2- (), mass: 23.97 - 24.03\n C_2H- (), mass: 24.98 - 25.04\n CN- (), mass: 25.97 - 26.04\n Cl- (), mass: 34.93 - 35.01\n C_2O- (), mass: 39.96 - 40.04\n CHNO- (), mass: 42.97 - 43.05\n CHO_2- (), mass: 44.95 - 45.04\n Cs- (), mass: 132.81 - 133.01\n >>> ch = A.getChannelsByName(\"C[^a-z]\") # Only carbon atoms (meaning that the char after C cannot be lowercase)\n >>> A.showChannels(ch)\n \tCH- (), mass: 12.99 - 13.03\n C_2- (), mass: 23.97 - 24.03\n C_2H- (), mass: 24.98 - 25.04\n CN- (), mass: 25.97 - 26.04\n C_2O- (), mass: 39.96 - 40.04\n CHNO- (), mass: 42.97 - 43.05\n CHO_2- (), mass: 44.95 - 45.04\n >>> ch = A.getChannelsByName(\"CH\", True) # Only CH channel and not CHNO and CHO_2\n >>> A.showChannels(ch)\n \tCH- (), mass: 12.99 - 13.03\n \"\"\"\n res = []\n if strict:\n if type(name) in [list, tuple]:\n name = ['^'+n+'[+-]?$' for n in name]\n else:\n name = '^'+name+'[+-]?$'\n if type(name) is not list:\n name = [name]\n for n in name:\n for P in self.peaks:\n p = self.peaks[P]\n ma = re.compile(n, re.U)\n if ma.match(p['assign']['utf16']) or ma.match(p['desc']['utf16']):\n res.append(p)\n return res\n\n @deprecated(\"showChannels\")\n def show_channels(self, ch):\n \"\"\"\n Format a list of channels where each channel is represented by a dictionary (like the ones produced by pySPM.ITA.getChannelsByName) to a human readable output.\n\n Parameters\n ----------\n ch : list\n A list of dictionaries representing the channels\n\n Returns\n -------\n None\n It will print a list of channels with the assignment, the description in parenthesis followed by the lower - upper mass range.\n \"\"\"\n for z in ch:\n print(\"\\t{name} ({desc}), mass: {lower:.2f} - {upper:.2f}\"\n .format(desc=z['desc']['utf16'], name=z['assign']['utf16'],\n lower=z['lmass']['float'], upper=z['umass']['float']))\n\n @alias(\"getChannelByMass\")\n def get_channel_by_mass(self, mass, full=False):\n \"\"\"\n Retrieves the first channel ID which has a mass range containing a given mass.\n\n Parameters\n ---------\n mass : int, float\n The mass. If zero, the channel 0 will be returned and correspond to the Total count channel.\n full : bool\n If True, not only the ID is retrieved but the whole dictionary similarly as with pySPM.ITA.getChannelsByName\n\n Returns\n -------\n int\n The first channel ID containing the mass given in argument. If a mass 0 is given, the output will be 0 which corresponds to the total count channel.\n \"\"\"\n if mass == 0:\n return 0\n for P in self.peaks:\n p = self.peaks[P]\n \n if p['id']['long'] > 1 and p['lmass']['float'] <= mass and mass <= p['umass']['float']:\n if full:\n return p\n return p['id']['long']\n raise ValueError('Mass {:.2f} Not Found'.format(mass))\n\n @alias(\"getShiftCorrectedImageByName\")\n def get_shift_corrected_image_by_name(self, names, **kargs):\n \"\"\"\n Retrieve the drift corrected (or shift corrected) image for the sum of all channels matching a given name. The shift correction applied is the one saved in the ITA file.\n\n Parameters\n ---------\n names : string or list of strings\n A channel name of a list of channel names to be summed up\n\n Returns\n -------\n pySPM.SPM.SPM_image\n The image of the sum of all the selected channels\n list of dictionaries\n The list of all the channels selected. This list can be displayed in a human readable form by the pySPM.ITA.showChannels function\n\n \"\"\"\n return self.get_sum_image_by_name(names, shifts=[(-x, -y) for x, y in self.get_saved_shift()], **kargs)\n \n def __get_sum_image(self, scans, channels, **kargs):\n \"\"\"\n An internal function to retrieve the sum of several scans for several channel ID.\n \"\"\"\n Z = np.zeros((self.sy, self.sx))\n if 'shifts' in kargs:\n shifts = kargs['shifts']\n elif 'Shifts' in kargs:\n shifts = kargs['Shifts']\n else:\n shifts = [(-x,-y) for x,y in self.get_saved_shift()] \n for ch in channels:\n ID = ch['id']['long']\n Z += self.fast_get_image(ID, scans, shifts)\n return Z\n\n @alias(\"getSumImageBySN\")\n def get_sum_image_by_sn(self, SN, scans=None, prog=False, raw=False, **kargs):\n \"\"\"\n Retrieve the image for the sum of several scans for a given channel SN.\n \"\"\"\n if scans is None:\n scans = range(self.Nscan)\n if type(scans) == int:\n scans = [scans]\n if prog:\n scans= PB(scans)\n\n Z = np.zeros((self.sy, self.sx))\n for s in scans:\n node = self.root.goto(\"filterdata/TofCorrection/ImageStack/Reduced Data/Images/{SN}/ScanData/EDROff/{scan}\".format(SN=SN, scan=s))\n dat = node.decompress()\n data = struct.unpack(\"<{}I\".format(len(dat)//4), dat)\n Z += np.array(data, dtype=np.float).reshape((self.sy, self.sx))\n if raw:\n return Z\n channel = self.get_channel_by_sn(SN)\n return self.image(np.flipud(Z), channel=channel)\n\n @alias(\"getSumImageByName\")\n def get_sum_image_by_name(self, names, scans=None, strict=False, prog=False, raw=False, **kargs):\n \"\"\"\n Retrieve the image for the sum of several scans and channels selected by their channel name.\n\n Parameters\n ----------\n names : string or list of strings\n Similar as for pySPM.ITA.getChannelsByName\n scans : int, list of int or None\n The list of the scan number to be summed up. For the case of None (default) all the available scans are taken.\n strict : bool\n Is the name selection strict? (see pySPM.ITA.getChannelsByName)\n prog : bool\n If True a progressbar will be displayed to show the summing progress as this might be quite slow.\n raw : bool\n If True a numpy array will be returned instead of a pySPM.SPM.SPM_image\n \"\"\"\n if scans is None:\n scans = range(self.Nscan)\n if type(scans) == int:\n scans = [scans]\n \n channels = self.get_added_image_by_name(names, strict)\n if prog:\n scans = PB(scans)\n Z = self.__get_sum_image(scans, channels)\n if raw:\n return Z, channels\n channel_title = \",\".join([z['assign']['utf16'] for z in channels])\n return self.image(np.flipud(Z), channel=channel_title), channels\n\n def show(self, ax=None):\n \"\"\"\n Shows the total SI image with the indication of the field of view.\n\n Parameters\n ----------\n ax : matplotlib axis or None\n The axis in which the image will be shown. If None the current axis will be used (ax = plt.gca())\n\n Returns\n -------\n None\n \"\"\"\n import matplotlib.pyplot as plt\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=(5, 5))\n ax.imshow(self.img, extent=(0, self.fov*1e6, 0, self.fov*1e6))\n ax.set_title(\"Total SI\")\n ax.set_xlabel(\"x [$\\mu$m]\")\n ax.set_ylabel(\"y [$\\mu$m]\")\n\n @alias(\"getShiftsByMass\")\n def get_shifts_by_mass(self, masses, centered=True, prog=False, Filter=None):\n \"\"\"\n Deprecated. A relic function that the developer is not even sure what it was supposed to do ;)\n \"\"\"\n Shifts = [(0, 0)]\n if Filter is None:\n Filter = lambda z: z\n S0 = Filter(self.get_added_image_by_mass(masses, 0))\n Y = range(1, self.Nscan)\n if prog:\n Y = PB(Y)\n for i in Y:\n S = Filter(self.get_sum_image_by_mass(masses, i))\n Shift = np.real(np.fft.fftshift(np.fft.ifft2(\n np.conj(np.fft.fft2(S0)) * np.fft.fft2(S))))\n cord = np.unravel_index(np.argmax(Shift), S0.shape)\n trans = (cord[1]-S0.shape[1]/2, cord[0]-S0.shape[0]/2)\n Shifts.append(trans)\n if centered:\n avSx = np.round(np.mean([z[0] for z in Shifts]))\n avSy = np.round(np.mean([z[1] for z in Shifts]))\n Shifts = [(z[0]-avSx, z[1]-avSy) for z in Shifts]\n return Shifts\n\n @alias(\"getXsectionByMass\")\n def get_xsection_by_mass(self, x1, y1, x2, y2, masses, N=None, prog=False, ax=None, flip=False, col='w-', **kargs):\n \"\"\"\n Retrieves a Cross-Section for a given mass along the profile determined by coordinates (x1,y1) and (x2,y2).\n The output is a 2D image where the x-axis correspond to the position along the profile and the y-axis the scan number.\n\n Parameters\n ----------\n x1 : int\n y1 : int\n x2 : int\n y2 : int\n profile coordinates in pixel: (x1,y1) -> (x2,y2)\n masses : int, float, list of floats\n The masse or list of masses to sum\n N : int or None\n The number of value used along the profile (which will be interpolated).\n None (default) will take the roundest number of values closest to the pixel length of the profile\n prog : bool\n If True display a progressbar\n ax : None or matplotlib axis\n if not None, the axis representing the 2D image can be given in order to display the profile's position\n col : string (matplotlib color format)\n The color of the profile used in case ax is given\n flip : bool\n Flip the y-coordinates?\n **kargs : arguments\n All supplementary arguments are passed to the pySPM.ITA.getSumImageByMass\n\n Returns\n -------\n np.ndarray\n 2D numpy array containing the sum of all channels. The values are the count number\n \"\"\"\n y1 = self.sy-1-y1\n y2 = self.sy-1-y2 \n if N is None:\n N = int(np.sqrt((x2-x1)**2+(y2-y1)**2))+1\n x = np.linspace(x1, x2, N)\n y = np.linspace(y1, y2, N)\n out = np.zeros((self.Nscan, N))\n Y = range(self.Nscan)\n if ax is not None:\n if not flip:\n ax.plot([x1, x2], [self.sy-1-y1, self.sy-1-y2], col)\n if prog:\n Y = PB(Y)\n from scipy.ndimage import map_coordinates\n for s in Y:\n Z = self.get_sum_image_by_mass(masses, s, **kargs)\n P = map_coordinates(Z.pixels, np.vstack((y, x)))\n out[s, :] = P\n return out\n\n @alias(\"getAddedImageByName\")\n def get_added_image_by_name(self, names, strict=False, raw=False, **kargs):\n \"\"\"\n Retrieve the image for the sum of all scan (precomputed by iontof, but not shift-corrected) for given names\n\n Parameters\n ----------\n names : string or list of strings\n name of the channel (see pySPM.ITA.getChannelsByName)\n strict : bool\n If True the names are the exact names (see pySPM.ITA.getChannelsByName)\n raw : bool\n If True a 2D numpy array will be returned\n **kargs: supplementary arguments\n passed to pySPM.ITA.getAddedImage\n\n Returns\n -------\n pySPM.SPM.SPM_image\n Image of the result\n list of dictionaries\n List of all selected peaks used to compute the image.\n Note: Pass this list to pySPM.ITA.showChannels in order to print a human readable representation of it.\n \"\"\"\n Z = np.zeros((self.sy, self.sx))\n channels = self.get_channels_by_name(names, strict)\n for ch in channels:\n ID = ch['id']['long']\n Z += self.get_added_image(ID, **kargs)\n ch = self.get_masses(channels)\n if raw:\n return Z, ch\n return self.image(np.flipud(Z), channel=\",\".join([z['assign'] for z in ch])), ch\n\n @alias(\"getSavedShift\")\n def get_saved_shift(self):\n \"\"\"\n getSavedShift returns the shifts saved with the file. Usually this is the shift correction you perform with the IonToF software.\n\n Returns\n -------\n List of tuples\n each tuple is a (Δx,Δy) in pixels (one for each scan).\n \"\"\"\n try:\n X = zlib.decompress(self.root.goto('filterdata/TofCorrection/ImageStack/Reduced Data'\n '/ImageStackScans/ShiftCoordinates/ImageStack.ShiftCoordinates').value)\n except:\n return [(0,0) for x in range(self.Nscan)]\n D = struct.unpack('<'+str(len(X)//4)+'i', X)\n dx = D[::2]\n dy = D[1::2]\n return list(zip(dx, dy))\n \n @alias(\"getShiftCorrectedImageByMass\")\n def get_shift_corrected_image_by_mass(self, masses, **kargs):\n \"\"\"\n Shortcut function for pySPM.ITA.get_sum_image_by_mass using the saved shift corrections.\n \"\"\"\n return self.get_sum_image_by_mass(masses, shifts=[(-x,-y) for x,y in self.get_saved_shift()], **kargs)\n \n @alias(\"getSumImageByMass\")\n def get_sum_image_by_mass(self, masses, scans=None, prog=False, raw=False, **kargs):\n \"\"\"\n Similar to pySPM.ITA.getSumImageByName but instead of the names, the mass or list of mass is provided\n see pySPM.ITA.getSumImageByName for more details\n \"\"\"\n if scans is None:\n scans = range(self.Nscan)\n if type(scans) is int:\n scans = [scans]\n if type(masses) is int or type(masses) is float:\n masses = [masses]\n if prog:\n scans = PB(scans, leave=False)\n channels = [self.get_channel_by_mass(m, full=True) for m in masses]\n Z = self.__get_sum_image(scans, channels, **kargs)\n if raw:\n return Z, channels\n channels_name = [[\"{:.2f}u\".format(m['cmass']['float']),m['assign']['utf16']][m['assign']['utf16']!=''] for m in channels]\n return self.image(np.flipud(Z), channel=\"Masses: \"+\",\".join(channels_name))\n\n @alias(\"getAddedImageByMass\")\n def get_added_image_by_mass(self, masses, raw=False, **kargs):\n \"\"\"\n Retrieve the image for the sum of all scan (precomputed by iontof, but not shift-corrected) for (a) given masse(s)\n\n Parameters\n ----------\n masses : float or list of float\n mass of the channels to be used\n raw : bool\n If True a 2D numpy array will be returned\n **kargs: supplementary arguments\n passed to pySPM.ITA.getAddedImage\n\n Returns\n -------\n pySPM.SPM.SPM_image\n Image of the result\n list of dictionaries\n Only returned if raw is True\n List of all selected peaks used to compute the image.\n Note: Pass this list to pySPM.ITA.showChannels in order to print a human readable representation of it.\n \"\"\"\n if type(masses) is int or type(masses) is float:\n masses = [masses]\n Z = np.zeros((self.sy, self.sx))\n channels = []\n for m in masses:\n ch = self.get_channel_by_mass(m)\n m = self.get_masses()[ch]\n if m['assign'] != '':\n channels.append(m['assign'])\n else:\n channels.append(\"{cmass:.2f}u\".format(**m))\n Z += self.get_added_image(ch, **kargs)\n if raw:\n return Z, channels\n return self.image(np.flipud(Z), channel=\",\".join(channels))\n\n @alias(\"get_channel_by_sn\",\"getChannelBySN\")\n def get_channel_by_SN(self, SN):\n for node in self.root.goto(\"MassIntervalList\"):\n if node.name == \"mi\":\n l = node.dict_list()\n if l['SN']['utf16']==SN:\n name = l['assign']['utf16']\n if not name:\n name = l['desc']['utf16']\n if not name:\n name = '{:.2f}u'.format(l['cmass']['float'])\n return name\n\n @alias(\"get_added_image_by_sn\",\"getAddedImageBySN\")\n def get_added_image_by_SN(self, SN, raw=False):\n \"\"\"\n New ITA fileformat save images with their respective serial number (SN).\n This function return the image for a given SN.\n\n Parameters\n ----------\n\n SN: Serial Number of the channel\n \"\"\"\n node = self.root.goto(\"filterdata/TofCorrection/ImageStack/Reduced Data/Images/{SN}/SumImage/EDROff\".format(SN=SN))\n dat = node.decompress()\n data = struct.unpack(\"<{}I\".format(len(dat)//4), dat)\n img = np.array(data, dtype=np.float).reshape((self.sy, self.sx))\n if raw:\n return img\n channel = self.get_channel_by_SN(SN)\n return self.image(np.flipud(img), channel=channel)\n\n @alias(\"getAddedImage\")\n def get_added_image(self, channel, **kargs):\n \"\"\"\n Retrieve the numpy 2D array of a given channel ID for the sum of all scan (precomputed by iontof, but not shift-corrected)\n Note: It is preferable to use the pySPM.ITA.getAddedImageByMass or pySPM.ITA.getAddedImageByName\n \"\"\"\n assert type(channel) is int\n assert channel >= 0 and channel < self.Nimg\n c = self.root.goto('filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded'\n '/Image['+str(channel)+']/ImageArray.Long')\n D = zlib.decompress(c.value)\n V = np.array(struct.unpack('<'+str(self.sx*self.sy)+'I', D),\n dtype=np.float64).reshape((self.sy, self.sx))\n return V\n \n @alias(\"fastGetImage\")\n def fast_get_image(self, channel, scans, shifts=False, prog=False, **kargs):\n \"\"\"\n Retieve a 2D numpy array corresponding to a given channel ID for given scan(s) and return their sum.\n\n Parameters\n ----------\n channel : int\n The channel ID\n scans: int, list of ints or 1D numpy array\n List of scans\n shifts : False or list of tuples\n List of the shift correction in pixels for ALL the scans ( not only the selected ones).\n If Flase not shift correction is performed\n prog : bool\n Display a progressbar ?\n\n Returns\n -------\n 2D numpy array\n array data of the image\n \"\"\"\n # Old parameter name compatibility\n if 'Shifts' in kargs:\n shifts = kargs.pop(\"Shifts\")\n \n Z = np.zeros((self.sy, self.sx))\n if prog:\n scans = PB(scans)\n \n im_root = self.root.goto('filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans/Image['+str(channel)+']')\n for scan in scans:\n c = im_root.goto('ImageArray.Long['+str(scan)+']')\n V = np.array(c.get_data('I'), dtype=np.float).reshape((self.sy, self.sx))\n if shifts:\n r = [int(z) for z in shifts[scan]]\n V = np.roll(np.roll(V, -r[0], axis=1), -r[1], axis=0)\n rx = [max(0,-r[0]), self.sx-max(1,r[0])]\n ry = [max(0,-r[1]), self.sy-max(1,r[1])]\n Z[ry[0]:ry[1],rx[0]:rx[1]] += V[ry[0]:ry[1],rx[0]:rx[1]]\n else:\n Z += V\n return Z\n \n @alias(\"getImage\")\n def get_image(self, channel, scan, shifts=None, shift_mode='roll', const=0, **kargs):\n \"\"\"\n getImage retrieve the image of a specific channel (ID) and a specific scan.\n\n Parameters\n ----------\n channel : int\n channel ID\n scan : int\n scan number (start with 0)\n Shifts : None or list of tuples\n None: No shift\n list of tuple in the form of where each tuple is in the form (dx,dy) for a given scan\n ShiftMode : string\n roll : roll the data over. easy but non-physical\n const : replace missing values by a constant (given by argument const)\n NaN : the same as const but with const=numpy.NaN\n const : float\n if ShiftMode is 'const' then this parameter defines the constant used (default 0)\n \"\"\"\n # Compatibility with old parameter names\n if 'Shifts' in kargs:\n shifts = kargs.pop(\"Shifts\")\n if 'ShiftMode' in kargs:\n shift_mode = kargs.pop(\"ShiftMode\")\n \n assert type(channel) is int\n assert type(scan) is int\n assert channel >= 0 and channel < self.Nimg\n assert scan >= 0 and scan < self.Nscan\n c = self.root.goto('filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans'\n '/Image['+str(channel)+']/ImageArray.Long['+str(scan)+']')\n V = np.array(c.get_data(), dtype=np.float).reshape((self.sy, self.sx))\n if not shifts is None:\n r = [int(z) for z in shifts[scan]]\n V = np.roll(np.roll(V, -r[0], axis=1), -r[1], axis=0)\n if shift_mode == 'const' or shift_mode == 'NaN':\n if shift_mode == 'NaN':\n const = np.nan\n if r[1] < 0:\n V[:-r[1], :] = const\n elif r[1] > 0:\n V[-r[1]:, :] = const\n if r[0] < 0:\n V[:, :-r[0]] = const\n elif r[0] > 0:\n V[:, -r[0]:] = const\n return V\n \n @alias(\"getOperation\")\n def get_opertion(self, OpID):\n \"\"\"\n Test function to retrieve the operations used in the Worksheet.\n \"\"\"\n Nop = self.root.goto('Presentation/Imaging Worksheet/Worksheet/OPERATIONS/OpCount').get_ulong()\n for i in range(Nop):\n blk = self.root.goto('Presentation/Imaging Worksheet/Worksheet/OPERATIONS/Operation[{}]'.format(i))\n if blk.goto_item('OpID').get_ulong() == OpID:\n return blk\n return None\n \n @alias(\"showWorksheet\")\n def show_worksheet(self, page=0):\n \"\"\"\n In Dev. function to display the worksheet\n \"\"\"\n import matplotlib as mpl\n from .utils import sp\n num_pages = self.root.goto('Presentation/Imaging Worksheet/Worksheet/PAGES/COUNT').get_ulong()\n assert page < num_pages\n Nitems = self.root.goto('Presentation/Imaging Worksheet/Worksheet/PAGES/Page[{}]/ItemCount'.format(page)).get_ulong()\n sett = self.root.goto('Presentation/Imaging Worksheet/Worksheet/PAGES/Page[{}]/SETTINGS'.format(page)).dict_list()\n Nx = sett['Xsize']['ulong']\n Ny = sett['Ysize']['ulong']\n items = self.root.goto('Presentation/Imaging Worksheet/Worksheet/PAGES/Page[{}]/Items'.format(page)).get_data()\n ax = sp(len(items))\n IntV = {}\n for x in self.root.goto(\"MassIntervalList\"):\n if x.name == 'mi':\n d = x.dict_list()\n IntV[d['id']['long']] = d['desc']['utf16']+d['assign']['utf16']\n for i, it in enumerate(items):\n blk = self.get_operation(it)\n OPTYPE = blk.goto_item('OPTYPE').get_ulong()\n while OPTYPE !=3:\n OPTYPE = blk.goto_item('OPTYPE').get_ulong()\n if OPTYPE == 4:\n blk = self.getOperation(blk.goto_item('ArgOpIDs').get_ulong())\n elif OPTYPE==3:\n palette = np.array(blk.goto_item('BMP-Palette').get_data('B')).reshape((256, 4))\n B, G, R = palette[:, 0], palette[:, 1], palette[:, 2]\n dimx = blk.goto('Cache/IImage-Cache-DimX').get_ulong()\n dimy = blk.goto('Cache/IImage-Cache-DimY').get_ulong()\n img = np.array(blk.goto('Cache/IImage-Cache-Intensities').get_data('d')).reshape((dimy, dimx))\n RGB = np.hstack([R[:, None], G[:, None], B[:, None]])/256\n cm = mpl.colors.ListedColormap(RGB)\n ax[i].imshow(img, cmap=cm)\n\n def add_new_images(self, miblock, scans=None, added=None, prog=False, **kargs):\n # Compatibility with old parameter names\n if 'Scans' in kargs:\n scans = kargs.pop(\"Scans\")\n if 'Added' in kargs:\n added = kargs.pop(\"Added\")\n \n assert scans is not None or added is not None\n lvl = 3 # zlib encoding level\n sy, sx = self.size['pixels']['y'], self.size['pixels']['x']\n SN = miblock.goto(\"SN\").get_string()\n if added is None:\n added_img = np.zeros((sy, sx), dtype=np.uint32)\n chID = miblock.goto(\"id\").get_ulong()\n if scans is not None:\n N = self.root.goto(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans/Image.NumberOfImages\").get_ulong()\n AN = self.root.goto(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image.NumberOfImages\").get_ulong()\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"Image.MassIntervalSN\", SN.encode('utf8'))\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"Image.XSize\", struct.pack(\"<I\", sx))\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"Image.YSize\", struct.pack(\"<I\", sy))\n if scans is not None:\n RS = range(self.Nscan)\n if prog:\n RS = PB(RS)\n for i in RS:\n img = np.flipud(scans[i].astype(np.uint32, casting='unsafe'))\n data = zlib.compress(struct.pack(\"<{}I\".format(sx*sy), *np.ravel(img)), level=lvl)\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans/Image[{}]\".format(N), \"ImageArray.Long\", data, id=i, _type=128)\n if added is None:\n added_img += img\n\n if added is None:\n added = added_img\n else:\n added = np.flipud(added)\n data = zlib.compress(struct.pack(\"<{}I\".format(sx*sy), *np.ravel(added.astype(np.uint32, casting='unsafe'))), level=lvl)\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"ImageArray.Long\", data, _type=128)\n \n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"Image.PulsesPerPixel\", struct.pack(\"<I\", self.spp*self.Nscan))\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"Image.MaxCountsPerPixel\", struct.pack(\"<I\", int(np.max(added))))\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"Image.MinCountsPerPixel\", struct.pack(\"<I\", int(np.min(added))))\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"Image.TotalCountsDbl\", struct.pack(\"<d\", np.sum(added)))\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded/Image[{}]\".format(AN), \"Image.TotalCounts\", struct.pack(\"<I\", int(np.sum(added))))\n \n if scans is not None:\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScans\", \"Image.NumberOfImages\", struct.pack(\"<I\", N+1))\n self.root.edit_block(\"filterdata/TofCorrection/ImageStack/Reduced Data/ImageStackScansAdded\", \"Image.NumberOfImages\", struct.pack(\"<I\", AN+1))\n self.Nimg += 1\n\n@aliased\nclass ITA_collection(Collection):\n \"\"\"\n ITA_collection is a super class containing a collection of tof-sims images.\n for details on Collection see pySPM.collection.Collection\n \"\"\"\n def __init__(self, filename, channels1=None, channels2=None, name=None, mass=False, strict\n=False):\n \"\"\"\n Opening a ToF-SIMS ITA file as an image collection\n\n Parameters\n ----------\n filename : string\n The filename\n channels1 : None or a list of names\n channels2 : None or a list of names\n channels1 and channels2 can be list of names or masses if mass=True\n name : string or None\n Name of the collection. If None, the basename of the filename is used (e.g. path/myfile.ita => name=myfile)\n mass : bool\n if True the channel lists are in mass and not names\n strict : bool\n Is the channel name strict? (see pySPM.ITA.getChannelsByName)\n\n Returns\n -------\n pySPM.ITA_collection class\n \"\"\"\n self.ita = ITA(filename)\n self.filename = filename\n self.PCA = None\n if name is None:\n name = os.path.basename(filename)\n self.name = name\n Collection.__init__(self, sx=self.ita.fov, sy=self.ita.fov*self.ita.sy/self.ita.sx,\n unit='m', name=name)\n self.msg = \"\"\n if channels1 is None:\n mass = True\n masses = self.ita.get_masses()\n channels1 = [x['cmass'] for x in masses if x['id'] > 1]\n CHS = [channels1]\n if channels2 is not None:\n CHS.append(channels2)\n for channels in CHS:\n if channels is channels2:\n strict = False\n if type(channels) is list:\n for x in channels:\n if mass:\n try:\n I = self.ita.get_added_image_by_mass(x)\n m = masses[2+channels1.index(x)]\n if m['assign'] != '':\n self.add(I, m['assign'])\n else:\n self.add(I, \"{cmass:.2f}u\".format(cmass=x))\n except:\n pass\n else:\n Z, ch = self.ita.get_added_image_by_name(x, strict)\n self.msg += \"{0}\\n\".format(x)\n for z in ch:\n self.msg += \"\\t{name} ({desc}), mass: {lower:.2f} - {upper:.2f}\\n\"\\\n .format(desc=z['desc'], name=z['assign'],\n lower=z['lmass'], upper=z['umass'])\n self.add(Z, x)\n elif type(channels) is dict:\n for x in channels:\n if mass:\n self.add(self.ita.get_added_image_by_mass(channels[x]), x)\n else:\n Z, ch = self.ita.get_added_image_by_name(\n channels[x], strict)\n self.msg += \"{0}\\n\".format(x)\n for z in ch:\n self.msg += \"\\t{name} ({desc}), mass: {lower:.2f} - {upper:.2f}\\n\"\\\n .format(desc=z['desc'], name=z['assign'],\n lower=z['lmass'], upper=z['umass'])\n self.add(Z, x)\n else:\n raise TypeError(\n \"Channels should be a list or a dictionnary. Got {}\".format(type(channels)))\n\n def __getitem__(self, key):\n \"\"\"\n Retrieve the image of a given channel\n\n Example\n -------\n >>> A = pySPM.ITA_collection(\"myfile.ita\")\n >>> A['Au-']\n <pySPM.SPM.SPM_image at 0x????????>\n \"\"\"\n if key not in self.channels:\n return None\n return self.channels[key]\n\n @alias(\"runPCA\")\n def run_pca(self, channels=None):\n \"\"\"\n Perform a Principle Component Analysis (PCA) on the channels\n\n Parameters\n ----------\n channels : None or list of strings\n List of channels to use for the PCA. If None all channels will be used.\n \"\"\"\n from .PCA import ITA_PCA\n if channels is None:\n channels = self.channels.keys()\n self.PCA = ITA_PCA(self, channels)\n \n @alias(\"showPCA\")\n def show_pca(self, num=None, loadings=True, **kargs):\n \"\"\"\n Run PCA if not already done and display the PCA images.\n\n Parameters\n ----------\n num : int or None\n The number of PC component to display. If None display all PAs\n **kargs : additional parameters\n passed to pySPM.PCA.showPCA\n \n Returns\n -------\n None\n Plot num PCA into a 1×num subplots\n\n \"\"\"\n if self.PCA is None:\n self.run_pca()\n self.PCA.show_pca(num=num, loadings=loadings, **kargs)\n\n def loadings(self, num=None, ax=None):\n \"\"\"\n Return a pandas DataFrame with the num first loadings\n\n Parameters\n ----------\n num : int or None\n The number of PC to use. If None use all PCs\n\n Note\n ----\n The results can be used in combination with pySPM.PCA.hinton to create nice hinton plots\n >>> col = pySPM.ITA_collection(\"myfile.ita\")\n >>> L = col.loadings(3)\n >>> col.PCA.hincton(matrix=L)\n Display a hinton plot with num lines representing the strength of each loading. Blue means negative loadings and Red means positive ones.\n The size of each square is proportional to the absolute value of each loading.\n \"\"\"\n if self.PCA is None:\n self.run_pca()\n if num is None:\n L = self.PCA.loadings()\n else:\n L = self.PCA.loadings()[:num]\n if ax is not None:\n if ax is True:\n self.PCA.hinton(matrix=L)\n else:\n self.PCA.hinton(matrix=L, ax=ax)\n return L\n \n @deprecated(\"StitchCorrection\")\n def stitch_correction(self, channel, stitches, gauss=0, debug=False):\n \"\"\"\n When an image is created by stitching of several images (while moving the stage during the measurement) the resulting image can have several artifacts due to charging.\n The goal of this function is the try to suppress this stitching artifacts by givings a channel name which is known to be homogeneous everywhere\n\n Parameters\n ----------\n channel : string\n name of a channel with a known homogeneous yield (i.e. where the visible variation of the yield is only due to charging and not to a material density variation\n stitches : list or tuple of two ints\n stitches=(N,M) where N×M is the numer of images stitched\n gauss : float\n if >0 a gauss filter will be applied on the reference image\n debug : bool\n if True returns additionally to the new collection also the reference image\n\n Returns\n -------\n pySPM.ITA_collection\n A new collection with corrected data\n\n \"\"\"\n import copy\n from scipy.ndimage.filters import gaussian_filter\n N = ITA_collection(self.filename, [], name=self.name)\n size = list(self.channels.values())[0].pixels.shape\n S = np.zeros((int(size[0]/stitches[0]), int(size[1]/stitches[1])))\n sy, sx = S.shape\n for i in range(stitches[0]):\n for j in range(stitches[1]):\n S += self.channels[channel].pixels[sy*i:sy*(i+1), sx*j:sx*(j+1)]\n S[S == 0] = 1\n if gauss>0:\n S = gaussian_filter(S, gauss)\n for x in self.channels:\n F = np.zeros(size)\n for i in range(stitches[0]):\n for j in range(stitches[1]):\n F[sy*i:sy*(i+1), sx*j:sx*(j+1)] = \\\n self.channels[x].pixels[sy*i:sy*(i+1), sx*j:sx*(j+1)]/S\n new_channel = copy.deepcopy(self[x])\n new_channel.pixels = F\n N.add(new_channel, x)\n if debug:\n return N, S\n return N\n" ]
[ [ "numpy.fft.fft2", "numpy.hstack", "numpy.sum", "numpy.sqrt", "numpy.linspace", "numpy.min", "numpy.flipud", "matplotlib.pyplot.subplots", "numpy.max", "numpy.argmax", "numpy.mean", "scipy.ndimage.filters.gaussian_filter", "matplotlib.colors.ListedColormap", "numpy.ravel", "numpy.array", "numpy.zeros", "numpy.roll", "numpy.vstack" ] ]
chuankaizhao/SAXS-guidedAdaptiveSampling
[ "348eebdc59641398d09538118618e6b0d04521d6" ]
[ "05-AdaptiveSampling/pickAdaptiveSeeds.py" ]
[ "### Given the SAXS discrepancy scores, pick the N states with the least SAXS discrepancy scores\n### for next round of adaptive sampling. Other information can be incorporated together with SAXS\n### to pick the seeding structures for adaptive sampling. \n### Required packages: numpy\n### Output: input files for CPPTRAJ to extract the seeding structures and save as rst (Amber restart files) \n\nimport numpy as np\n\n### read the list of MD trajectories\nList = [ line.rstrip() for line in open(\"List\", \"r\") ]\n\n### read the saxs discrepancy scores and sort based on scores\nsaxs = np.loadtxt(\"ProteinG_discrepancy.txt\")\nsaxs.view('i8,i8,i8').sort(order=['f1'], axis=0)\n\n### load the cluster files\ncl = io.load('../cluster/clustering_tica.pkl')\ncl = cl.labels_\n\n### randomly pick M structures from each of the N states with the lowest SAXS discrepancy scores for adaptive sampling.\nM=5\nN=10\n\nimport random\n\n### define the list of frames to extract from the MD trajectories\nfinal_selects = []\n\nfor i in range(N):\n selects = []\n for j in range(len(cl)):\n for k in range(len(cl[j])):\n if cl[i][j] == saxs[i][0]:\n selects.append[j][k]\n sel = np.random.choice(range(len(selects)),size=M)\n for j in range(M):\n final_selects.append(sel[j])\n\n\n### write the CPPTRAJ input file to extract frames\nround = 1\npath = \"/home/czhao37/3-ABA/trajs_full/\"\nopath = \"/home/czhao37/3-ABA/Round\" + str(round+1) + \"/0-Minimization/\"\nselects = final_selects\n\nfor i in range(len(selects)):\n select = selects[i]\n select = [ int(select[0]), int(select[1]) ]\n print(select)\n name = \"r\" + str(round+1) + \"par\" + str(i+1)\n f = open(\"cpptraj_\" + name + \".in\", \"w\")\n f.write(\"parm \" + path + List[select[0]] + \".prmtop\\n\")\n f.write(\"trajin \" + path + List[select[0]] + \".mdcrd \" + str(select[1]+1) + \" \" + str(select[1]+2) + \" 4\\n\")\n f.write(\"autoimage\\n\")\n f.write(\"parmbox alpha 90 beta 90 gamma 90\\n\")\n f.write(\"trajout \" + opath + name + \".rst restart\\n\")\n f.write(\"parmwrite out \" + opath + name + \".prmtop\\n\")\n" ]
[ [ "numpy.loadtxt" ] ]
BigNerd/justmltools
[ "97133380f2675596fdd9c87d9bc570ccd5fc20b9" ]
[ "tests/nlp/test_word_embedder.py" ]
[ "import numpy as np\nimport os\nimport pathlib\nfrom unittest import TestCase\nfrom justmltools.nlp.word_embedder import WordEmbedder\n\n\nclass TestWordEmbedder(TestCase):\n\n \"\"\" sample_text consists of the 100 words contained in the embedding file,\n however, the three '/'-separated words, e.g. zürich/winterthur, will not be loaded by the word embedder\n because '/' is used as a word boundary in the same way as whitespace by its tokenizer\n \"\"\"\n sample_text = \"\"\"\n humboldtgesellschaft gallersbach risikopositionswert „sekunde gallesium kleindemsin eboracensium qorig pagamento \n beisammenbleiben homosexuellenprozesse polymerasekomplex unharmonisches brennstofflagerung balabin \n „dauerkonflikt bildgießereien vvitch durchkonjugiert gainsbarre flugbootpionier „organisiertes todiraș \n hintergrundannahme sorridere strategieprofil navesi quadripustulata rightmire halbjahresabonnement davorstand \n steenaben gläsgen herrschaftsstellung femtogramm forstlehrlinge sternenplatz memmingen/archiv/ axialvektoren \n pertubuhan tempolimiten aliyye plagiatmasche relevanzkriterieum wüstenbewohnende senstadtum xwars bartholdo \n anlagesystem benennungsschemas „nrated „forg börsenrechtliche blautopfes „forge waidhauser dolds assignierten \n voiß worowitz weirowa türsturze iovita supergravitations oradeafc labelgruppe metallblöcken dysphemistisch \n conferuntur kuntas einzelbestand bestandsaufnahme/diskussion börsenrechtlicher „spurensuche… hervicusgasse \n toteisblockes wirtschaftskontakten hohnohka drainageplatten flackenheide chaonnophris lindakoennecke \n möbelabteilung lehrgangsende ‚ecke‘ motife schutzbestrebungen corval corythucha mölmschen sanandi ysatis \n metronetze albumbilder døttre zürich/winterthur stevenisten forstén sandener batrachotoxine\n \"\"\"\n\n def setUp(self) -> None:\n dir_path: str = pathlib.Path(__file__).parent.absolute()\n embedding_file_path: str = os.path.join(dir_path, \"embedding_wiki_de_tail_100.vec\")\n self.word_embedder: WordEmbedder = WordEmbedder(embedding_file_path=embedding_file_path)\n\n def test_embedding_dim(self):\n self.assertEqual(300, self.word_embedder.embedding_dim())\n\n def test_tokenize_texts(self):\n token_matrix: np.nd_array = self.word_embedder.tokenize_texts([self.sample_text])\n self.assertEqual(1, token_matrix.shape[0]) # one row for one text\n self.assertEqual(3000, token_matrix.shape[1]) # 3000 columns for 3000 tokens (including padded 0s at the end)\n\n tokenized_text = token_matrix[0]\n\n number_of_non_zero_tokens: int = np.count_nonzero(tokenized_text)\n self.assertEqual(97, number_of_non_zero_tokens) # only 97, not 100 because '/'-separated words are skipped\n\n number_of_unique_tokens = np.unique(tokenized_text).shape[0]\n self.assertEqual(97 + 1, number_of_unique_tokens) # one unique token for each unique word plus one for 0\n\n def test_embed_texts(self):\n embedded_texts: np.nd_array = self.word_embedder.embed_texts([self.sample_text])\n self.assertEqual(3, len(embedded_texts)) # 3 embeddings, for text, left context and right context\n\n embedded_text = embedded_texts[0]\n self.assertEqual(3000, embedded_text.shape[1])\n\n embedded_left_context = embedded_texts[1]\n self.assertEqual(3000, embedded_left_context.shape[1])\n\n embedded_right_context = embedded_texts[2]\n self.assertEqual(3000, embedded_right_context.shape[1])\n" ]
[ [ "numpy.count_nonzero", "numpy.unique" ] ]
sanilrod/Contactless-Attendence-System
[ "a0246047df139807dd636ca102b8151c456292ae" ]
[ "UI/app/views.py" ]
[ "import glob\nfrom .models import *\nimport os\nimport datetime\nfrom django.shortcuts import render\n\n# Create your views here.\n\n\ndef getAttendance(request):\n print(\"abcbababbabababbababababa\")\n import face_recognition\n import cv2\n import numpy as np\n from django.http import JsonResponse\n\n # This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the\n # other example, but it includes some basic performance tweaks to make things run a lot faster:\n # 1. Process each video frame at 1/4 resolution (though still display it at full resolution)\n # 2. Only detect faces in every other frame of video.\n\n # PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.\n # OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this\n # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.\n\n # Get a reference to webcam #0 (the default one)\n video_capture = cv2.VideoCapture(0)\n\n\n # Create arrays of known face encodings and their names\n known_face_encodings = [\n ]\n known_face_names = [\n ]\n\n for filepath in glob.iglob('media/encoding/*.txt'):\n known_face_encodings.append(np.loadtxt(filepath, delimiter=\",\"))\n teacher = Teacher.objects.get(id=int(str(os.path.basename(filepath).split(\"_\")[0])))\n known_face_names.append(teacher.name)\n\n\n # Initialize some variables\n face_locations = []\n face_encodings = []\n face_names = []\n process_this_frame = True\n\n print(known_face_names)\n print(known_face_encodings)\n\n while True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n try:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n name = \"Unknown\"\n\n # # If a match was found in known_face_encodings, just use the first one.\n # if True in matches:\n # first_match_index = matches.index(True)\n # name = known_face_names[first_match_index]\n\n # Or instead, use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n print(best_match_index)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n face_names.append(name)\n except:\n pass\n\n process_this_frame = not process_this_frame\n\n # Display the results\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n # Display the resulting image\n cv2.imshow('Video', frame)\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n saved_attendance_path = \"media/attendance/\"+str(name)+str(datetime.datetime.now())+\".jpg\"\n cv2.imwrite(saved_attendance_path,frame)\n break\n\n # Release handle to the webcam\n video_capture.release()\n cv2.destroyAllWindows()\n teacher_name = name if name != \"Unknown\" else \"\"\n return JsonResponse({'teacher_name':teacher_name,\"image_path\":saved_attendance_path})\n\n" ]
[ [ "numpy.argmin", "numpy.loadtxt" ] ]
r0mainK/ml-core
[ "ac17828d58e817e771caf2b2c3de523d527874b8" ]
[ "sourced/ml/core/algorithms/swivel.py" ]
[ "#!/usr/bin/env python3\n#\n# Copyright 2016 Google Inc. All Rights Reserved.\n# Copyright 2017 Sourced Technologies S. L.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Submatrix-wise Vector Embedding Learner.\n\nImplementation of SwiVel algorithm described at:\nhttp://arxiv.org/abs/1602.02215\n\nThis program expects an input directory that contains the following files.\n\n row_vocab.txt, col_vocab.txt\n\n The row an column vocabulary files. Each file should contain one token per\n line; these will be used to generate a tab-separate file containing the\n trained embeddings.\n\n row_sums.txt, col_sum.txt\n\n The matrix row and column marginal sums. Each file should contain one\n decimal floating point number per line which corresponds to the marginal\n count of the matrix for that row or column.\n\n shards.recs\n\n A file containing the sub-matrix shards, stored as TFRecords. Each shard is\n expected to be a serialzed tf.Example protocol buffer with the following\n properties:\n\n global_row: the global row indices contained in the shard\n global_col: the global column indices contained in the shard\n sparse_local_row, sparse_local_col, sparse_value: three parallel arrays\n that are a sparse representation of the submatrix counts.\n\nIt will generate embeddings, training from the input directory for\nthe specified number of epochs. When complete, it will output the trained\nvectors to a tab-separated file that contains one line per embedding. Row and\ncolumn embeddings are stored in separate files.\n\n\"\"\"\n\nimport glob\nimport math\nimport os\nimport threading\nimport time\n\nimport numpy\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\n\nflags = tf.app.flags\n\nflags.DEFINE_string(\"input_base_path\", None,\n \"Directory containing input shards, vocabularies, \"\n \"and marginals.\")\nflags.DEFINE_string(\"output_base_path\", None,\n \"Path where to write the trained embeddings.\")\nflags.DEFINE_integer(\"embedding_size\", 300, \"Size of the embeddings\")\nflags.DEFINE_boolean(\"trainable_bias\", False, \"Biases are trainable\")\nflags.DEFINE_integer(\"submatrix_rows\", 4096,\n \"Rows in each training submatrix. This must match \"\n \"the training data.\")\nflags.DEFINE_integer(\"submatrix_cols\", 4096,\n \"Rows in each training submatrix. This must match \"\n \"the training data.\")\nflags.DEFINE_float(\"loss_multiplier\", 1.0 / 4096,\n \"constant multiplier on loss.\")\nflags.DEFINE_float(\"confidence_exponent\", 0.5,\n \"Exponent for l2 confidence function\")\nflags.DEFINE_float(\"confidence_scale\", 0.25,\n \"Scale for l2 confidence function\")\nflags.DEFINE_float(\"confidence_base\", 0.1, \"Base for l2 confidence function\")\nflags.DEFINE_float(\"learning_rate\", 1.0, \"Initial learning rate\")\nflags.DEFINE_string(\"optimizer\", \"Adagrad\",\n \"SGD optimizer (tf.train.*Optimizer)\")\nflags.DEFINE_integer(\"num_concurrent_steps\", 2,\n \"Number of threads to train with\")\nflags.DEFINE_integer(\"num_readers\", 4,\n \"Number of threads to read the input data and feed it\")\nflags.DEFINE_float(\"num_epochs\", 40, \"Number epochs to train for\")\nflags.DEFINE_float(\"per_process_gpu_memory_fraction\", 0,\n \"Fraction of GPU memory to use, 0 means allow_growth\")\nflags.DEFINE_integer(\"num_gpus\", 0,\n \"Number of GPUs to use, 0 means all available\")\nflags.DEFINE_string(\"logs\", \"\",\n \"Path for TensorBoard logs (empty value disables them)\")\n\nFLAGS = flags.FLAGS\n\n\ndef log(message, *args, **kwargs):\n tf.logging.info(message, *args, **kwargs)\n\n\ndef get_available_gpus():\n return [d.name for d in device_lib.list_local_devices()\n if d.device_type == \"GPU\"]\n\n\ndef embeddings_with_init(vocab_size, embedding_dim, name):\n \"\"\"Creates and initializes the embedding tensors.\"\"\"\n return tf.get_variable(name=name,\n shape=[vocab_size, embedding_dim],\n initializer=tf.random_normal_initializer(\n stddev=math.sqrt(1.0 / embedding_dim)))\n\n\ndef count_matrix_input(filenames, submatrix_rows, submatrix_cols):\n \"\"\"Reads submatrix shards from disk.\"\"\"\n filename_queue = tf.train.string_input_producer(filenames)\n reader = tf.WholeFileReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"global_row\": tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),\n \"global_col\": tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),\n \"sparse_local_row\": tf.VarLenFeature(dtype=tf.int64),\n \"sparse_local_col\": tf.VarLenFeature(dtype=tf.int64),\n \"sparse_value\": tf.VarLenFeature(dtype=tf.float32),\n })\n\n global_row = features[\"global_row\"]\n global_col = features[\"global_col\"]\n\n sparse_local_row = features[\"sparse_local_row\"].values\n sparse_local_col = features[\"sparse_local_col\"].values\n sparse_count = features[\"sparse_value\"].values\n\n sparse_indices = tf.concat(axis=1, values=[tf.expand_dims(sparse_local_row, 1),\n tf.expand_dims(sparse_local_col, 1)])\n count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],\n sparse_count, validate_indices=False)\n\n queued_global_row, queued_global_col, queued_count = tf.train.batch(\n [global_row, global_col, count],\n batch_size=1,\n num_threads=FLAGS.num_readers,\n capacity=32)\n\n queued_global_row = tf.reshape(queued_global_row, [submatrix_rows])\n queued_global_col = tf.reshape(queued_global_col, [submatrix_cols])\n queued_count = tf.reshape(queued_count, [submatrix_rows, submatrix_cols])\n\n return queued_global_row, queued_global_col, queued_count\n\n\ndef read_marginals_file(filename):\n \"\"\"Reads text file with one number per line to an array.\"\"\"\n with open(filename) as lines:\n return [float(line) for line in lines]\n\n\ndef write_embedding_tensor_to_disk(vocab_path, output_path, sess, embedding):\n \"\"\"Writes tensor to output_path as tsv\"\"\"\n # Fetch the embedding values from the model\n embeddings = sess.run(embedding)\n\n with open(output_path, \"w\") as out_f:\n with open(vocab_path) as vocab_f:\n for index, word in enumerate(vocab_f):\n word = word.strip()\n embedding = embeddings[index]\n out_f.write(word + \"\\t\" + \"\\t\".join(\n [str(x) for x in embedding]) + \"\\n\")\n\n\ndef write_embeddings_to_disk(config, model, sess):\n \"\"\"Writes row and column embeddings disk\"\"\"\n # Row Embedding\n row_vocab_path = config.input_base_path + \"/row_vocab.txt\"\n row_embedding_output_path = config.output_base_path + \"/row_embedding.tsv\"\n log(\"Writing row embeddings to: %s\", row_embedding_output_path)\n write_embedding_tensor_to_disk(row_vocab_path, row_embedding_output_path,\n sess, model.row_embedding)\n\n # Column Embedding\n col_vocab_path = config.input_base_path + \"/col_vocab.txt\"\n col_embedding_output_path = config.output_base_path + \"/col_embedding.tsv\"\n log(\"Writing column embeddings to: %s\", col_embedding_output_path)\n write_embedding_tensor_to_disk(col_vocab_path, col_embedding_output_path,\n sess, model.col_embedding)\n\n\nclass SwivelModel:\n \"\"\"Small class to gather needed pieces from a Graph being built.\"\"\"\n\n def __init__(self, config):\n \"\"\"Construct graph for dmc.\"\"\"\n self._config = config\n\n # Create paths to input data files\n log(\"Reading model from: %s\", config.input_base_path)\n count_matrix_files = glob.glob(os.path.join(config.input_base_path, \"shard-*.pb\"))\n row_sums_path = os.path.join(config.input_base_path, \"row_sums.txt\")\n col_sums_path = os.path.join(config.input_base_path, \"col_sums.txt\")\n\n # Read marginals\n row_sums = read_marginals_file(row_sums_path)\n col_sums = read_marginals_file(col_sums_path)\n\n self.n_rows = len(row_sums)\n self.n_cols = len(col_sums)\n log(\"Matrix dim: (%d,%d) SubMatrix dim: (%d,%d)\",\n self.n_rows, self.n_cols, config.submatrix_rows,\n config.submatrix_cols)\n if self.n_cols < config.submatrix_cols:\n raise ValueError(\n \"submatrix_cols={0} can not be bigger than columns number={1} \"\n \"(specify submatrix_cols={1})\".format(config.submatrix_cols, self.n_cols))\n if self.n_rows < config.submatrix_rows:\n raise ValueError(\n \"submatrix_rows={0} can not be bigger than rows number={1} \"\n \"(specify submatrix_rows={1})\".format(config.submatrix_rows, self.n_cols))\n self.n_submatrices = (self.n_rows * self.n_cols /\n (config.submatrix_rows * config.submatrix_cols))\n log(\"n_submatrices: %d\", self.n_submatrices)\n\n with tf.device(\"/cpu:0\"):\n # ===== CREATE VARIABLES ======\n # Get input\n global_row, global_col, count = count_matrix_input(\n count_matrix_files, config.submatrix_rows,\n config.submatrix_cols)\n\n # Embeddings\n self.row_embedding = embeddings_with_init(\n embedding_dim=config.embedding_size,\n vocab_size=self.n_rows,\n name=\"row_embedding\")\n self.col_embedding = embeddings_with_init(\n embedding_dim=config.embedding_size,\n vocab_size=self.n_cols,\n name=\"col_embedding\")\n tf.summary.histogram(\"row_emb\", self.row_embedding)\n tf.summary.histogram(\"col_emb\", self.col_embedding)\n\n matrix_log_sum = math.log(numpy.sum(row_sums) + 1)\n row_bias_init = [math.log(x + 1) for x in row_sums]\n col_bias_init = [math.log(x + 1) for x in col_sums]\n self.row_bias = tf.Variable(\n row_bias_init, trainable=config.trainable_bias)\n self.col_bias = tf.Variable(\n col_bias_init, trainable=config.trainable_bias)\n tf.summary.histogram(\"row_bias\", self.row_bias)\n tf.summary.histogram(\"col_bias\", self.col_bias)\n\n # Add optimizer\n l2_losses = []\n sigmoid_losses = []\n self.global_step = tf.Variable(0, name=\"global_step\")\n learning_rate = tf.Variable(config.learning_rate,\n name=\"learning_rate\")\n opt = getattr(tf.train, FLAGS.optimizer + \"Optimizer\")(\n learning_rate)\n tf.summary.scalar(\"learning_rate\", learning_rate)\n\n all_grads = []\n\n devices = [\"/gpu:%d\" % i for i in range(FLAGS.num_gpus)] \\\n if FLAGS.num_gpus > 0 else get_available_gpus()\n self.devices_number = len(devices)\n if not self.devices_number:\n devices = [\"/cpu:0\"]\n self.devices_number = 1\n for dev in devices:\n with tf.device(dev):\n with tf.name_scope(dev[1:].replace(\":\", \"_\")):\n # ===== CREATE GRAPH =====\n # Fetch embeddings.\n selected_row_embedding = tf.nn.embedding_lookup(\n self.row_embedding, global_row)\n selected_col_embedding = tf.nn.embedding_lookup(\n self.col_embedding, global_col)\n\n # Fetch biases.\n selected_row_bias = tf.nn.embedding_lookup(\n [self.row_bias], global_row)\n selected_col_bias = tf.nn.embedding_lookup(\n [self.col_bias], global_col)\n\n # Multiply the row and column embeddings to generate\n # predictions.\n predictions = tf.matmul(\n selected_row_embedding, selected_col_embedding,\n transpose_b=True)\n\n # These binary masks separate zero from non-zero values.\n count_is_nonzero = tf.to_float(tf.cast(count, tf.bool))\n count_is_zero = 1 - count_is_nonzero\n\n objectives = count_is_nonzero * tf.log(count + 1e-30)\n objectives -= tf.reshape(\n selected_row_bias, [config.submatrix_rows, 1])\n objectives -= selected_col_bias\n objectives += matrix_log_sum\n\n err = predictions - objectives\n\n # The confidence function scales the L2 loss based on\n # the raw co-occurrence count.\n l2_confidence = (\n config.confidence_base +\n config.confidence_scale * tf.pow(\n count, config.confidence_exponent))\n\n l2_loss = config.loss_multiplier * tf.reduce_sum(\n 0.5 * l2_confidence * err * err * count_is_nonzero)\n l2_losses.append(tf.expand_dims(l2_loss, 0))\n\n sigmoid_loss = config.loss_multiplier * tf.reduce_sum(\n tf.nn.softplus(err) * count_is_zero)\n sigmoid_losses.append(tf.expand_dims(sigmoid_loss, 0))\n\n loss = l2_loss + sigmoid_loss\n grads = opt.compute_gradients(loss)\n all_grads.append(grads)\n\n with tf.device(\"/cpu:0\"):\n # ===== MERGE LOSSES =====\n l2_loss = tf.reduce_mean(tf.concat(axis=0, values=l2_losses), 0,\n name=\"l2_loss\")\n sigmoid_loss = tf.reduce_mean(\n tf.concat(axis=0, values=sigmoid_losses), 0,\n name=\"sigmoid_loss\")\n overall_loss = l2_loss + sigmoid_loss\n average = tf.train.ExponentialMovingAverage(0.999)\n loss_average_op = average.apply(\n (overall_loss, l2_loss, sigmoid_loss))\n self.loss = average.average(overall_loss)\n tf.summary.scalar(\"overall_loss\", self.loss)\n tf.summary.scalar(\"l2_loss\", average.average(l2_loss))\n tf.summary.scalar(\"sigmoid_loss\", average.average(sigmoid_loss))\n\n # Apply the gradients to adjust the shared variables.\n apply_gradient_ops = []\n for grads in all_grads:\n apply_gradient_ops.append(opt.apply_gradients(\n grads, global_step=self.global_step))\n\n self.train_op = tf.group(loss_average_op, *apply_gradient_ops)\n self.saver = tf.train.Saver(sharded=True)\n\n def initialize_summary(self, sess):\n log(\"creating TensorBoard stuff...\")\n self.summary = tf.summary.merge_all()\n self.writer = tf.summary.FileWriter(FLAGS.logs, sess.graph)\n projector_config = \\\n tf.contrib.tensorboard.plugins.projector.ProjectorConfig()\n embedding_config = projector_config.embeddings.add()\n length = min(10000, self.n_rows, self.n_cols)\n self.embedding10k = tf.Variable(\n tf.zeros((length, self._config.embedding_size)),\n name=\"top10k_embedding\")\n embedding_config.tensor_name = self.embedding10k.name\n embedding_config.metadata_path = os.path.join(\n self._config.input_base_path, \"row_vocab.txt\")\n tf.contrib.tensorboard.plugins.projector.visualize_embeddings(\n self.writer, projector_config)\n self.saver = tf.train.Saver((self.embedding10k,), max_to_keep=1)\n\n def write_summary(self, sess):\n log(\"writing the summary...\")\n length = min(10000, self.n_rows, self.n_cols)\n assignment = self.embedding10k.assign(\n (self.row_embedding[:length] + self.col_embedding[:length]) / 2)\n summary, _, global_step = sess.run(\n (self.summary, assignment, self.global_step))\n self.writer.add_summary(summary, global_step)\n self.saver.save(\n sess, os.path.join(FLAGS.logs, \"embeddings10k.checkpoint\"),\n global_step)\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n start_time = time.time()\n\n omitted = {\"handler\", \"command\"}\n\n log(\"Swivel parameters:\\n\" + \"\\n\".join(\n \"\\t{:20} {}\".format(key, value) for key, value in\n sorted(FLAGS.__dict__.items()) if key not in omitted))\n # Create the output path. If this fails, it really ought to fail now. :)\n if not os.path.isdir(FLAGS.output_base_path):\n os.makedirs(FLAGS.output_base_path)\n\n # Create and run model\n with tf.Graph().as_default():\n log(\"creating the model...\")\n model = SwivelModel(FLAGS)\n\n # Create a session for running Ops on the Graph.\n gpu_opts = {}\n if FLAGS.per_process_gpu_memory_fraction > 0:\n gpu_opts[\"per_process_gpu_memory_fraction\"] = \\\n FLAGS.per_process_gpu_memory_fraction\n else:\n gpu_opts[\"allow_growth\"] = True\n gpu_options = tf.GPUOptions(**gpu_opts)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n if FLAGS.logs:\n model.initialize_summary(sess)\n\n # Run the Op to initialize the variables.\n log(\"initializing the variables...\")\n sess.run(tf.global_variables_initializer())\n\n # Start feeding input\n log(\"starting the input threads...\")\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Calculate how many steps each thread should run\n n_total_steps = int(FLAGS.num_epochs * model.n_rows * model.n_cols) / (\n FLAGS.submatrix_rows * FLAGS.submatrix_cols)\n n_steps_per_thread = n_total_steps / (\n FLAGS.num_concurrent_steps * model.devices_number)\n n_submatrices_to_train = model.n_submatrices * FLAGS.num_epochs\n t0 = [time.time()]\n n_steps_between_status_updates = 100\n n_steps_between_summary_updates = 10000\n status_i = [0, 0]\n status_lock = threading.Lock()\n msg = (\"%%%dd/%%d submatrices trained (%%.1f%%%%), \"\n \"%%5.1f submatrices/sec | loss %%f\") % \\\n len(str(n_submatrices_to_train))\n\n def TrainingFn():\n for _ in range(int(n_steps_per_thread)):\n _, global_step, loss = sess.run((\n model.train_op, model.global_step, model.loss))\n\n show_status = False\n update_summary = False\n with status_lock:\n new_i = global_step // n_steps_between_status_updates\n if new_i > status_i[0]:\n status_i[0] = new_i\n show_status = True\n new_i = global_step // n_steps_between_summary_updates\n if new_i > status_i[1]:\n status_i[1] = new_i\n update_summary = True\n if show_status:\n elapsed = float(time.time() - t0[0])\n log(msg, global_step, n_submatrices_to_train,\n 100.0 * global_step / n_submatrices_to_train,\n n_steps_between_status_updates / elapsed, loss)\n t0[0] = time.time()\n if update_summary and FLAGS.logs:\n model.write_summary(sess)\n\n # Start training threads\n train_threads = []\n for _ in range(FLAGS.num_concurrent_steps):\n t = threading.Thread(target=TrainingFn)\n train_threads.append(t)\n t.start()\n\n # Wait for threads to finish.\n for t in train_threads:\n t.join()\n\n coord.request_stop()\n coord.join(threads)\n\n # Write out vectors\n write_embeddings_to_disk(FLAGS, model, sess)\n\n # Shutdown\n sess.close()\n log(\"Elapsed: %s\", time.time() - start_time)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.device", "tensorflow.concat", "tensorflow.FixedLenFeature", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.train.ExponentialMovingAverage", "tensorflow.GPUOptions", "tensorflow.train.batch", "tensorflow.group", "tensorflow.summary.scalar", "tensorflow.Graph", "tensorflow.sparse_to_dense", "tensorflow.Variable", "tensorflow.WholeFileReader", "tensorflow.ConfigProto", "tensorflow.logging.set_verbosity", "tensorflow.train.Saver", "tensorflow.app.run", "tensorflow.matmul", "tensorflow.pow", "tensorflow.train.Coordinator", "tensorflow.global_variables_initializer", "tensorflow.train.string_input_producer", "tensorflow.logging.info", "tensorflow.summary.merge_all", "tensorflow.VarLenFeature", "tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig", "numpy.sum", "tensorflow.nn.embedding_lookup", "tensorflow.summary.histogram", "tensorflow.summary.FileWriter", "tensorflow.train.start_queue_runners", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings", "tensorflow.log", "tensorflow.nn.softplus" ] ]
dannyjacobs/PRISim
[ "89e544d771cf5c4113a4d5787a57c9586fa98eac" ]
[ "main/generate_antenna_power.py" ]
[ "import argparse\nimport numpy as NP \nfrom astropy.io import fits\nfrom astropy.io import ascii\nimport scipy.constants as FCNST\nimport matplotlib.pyplot as PLT\nimport matplotlib.colors as PLTC\nimport progressbar as PGB\nimport healpy as HP\nimport geometry as GEOM\nimport interferometry as RI\nimport catalog as SM\nimport constants as CNST\nimport my_operations as OPS\nimport primary_beams as PB\nimport ipdb as PDB\n\ndef Jy2K(fluxJy, freq, pixres):\n return fluxJy * CNST.Jy / pixres / (2.0* FCNST.k * (freq)**2 / FCNST.c**2)\n\ndef K2Jy(tempK, freq, pixres):\n return tempK * (2.0* FCNST.k * (freq)**2 / FCNST.c**2) * pixres / CNST.Jy\n\n## Parse input arguments\n\nparser = argparse.ArgumentParser(description='Program to simulate interferometer array data')\n\nproject_group = parser.add_mutually_exclusive_group(required=True)\nproject_group.add_argument('--project-MWA', dest='project_MWA', action='store_true')\nproject_group.add_argument('--project-HERA', dest='project_HERA', action='store_true')\nproject_group.add_argument('--project-beams', dest='project_beams', action='store_true')\nproject_group.add_argument('--project-drift-scan', dest='project_drift_scan', action='store_true')\nproject_group.add_argument('--project-global-EoR', dest='project_global_EoR', action='store_true')\n\ntelescope_group = parser.add_argument_group('Telescope parameters', 'Telescope/interferometer specifications')\ntelescope_group.add_argument('--label-prefix', help='Prefix for baseline labels [str, Default = \"\"]', default='', type=str, dest='label_prefix')\ntelescope_group.add_argument('--telescope', help='Telescope name [str, default=\"custom\"]', default='custom', type=str, dest='telescope_id', choices=['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'paper_dipole', 'custom', 'mwa_tools'])\ntelescope_group.add_argument('--latitude', help='Latitude of interferometer array in degrees [float, Default=-26.701]', default=-26.701, type=float, dest='latitude')\ntelescope_group.add_argument('--A-eff', help='Effective area in m^2', type=float, dest='A_eff', nargs='?')\n\nantenna_element_group = parser.add_argument_group('Antenna element parameters', 'Antenna element specifications')\nantenna_element_group.add_argument('--shape', help='Shape of antenna element [no default]', type=str, dest='antenna_element_shape', default=None, choices=['dish', 'dipole', 'delta'])\nantenna_element_group.add_argument('--size', help='Size of dish or length of dipole (in meters) [float, no default]', default=None, type=float, dest='antenna_element_size')\nantenna_element_group.add_argument('--orientation', help='Orientation of dipole or pointing direction of dish [float, (altitude azimuth) or (l m [n])]', default=None, type=float, nargs='*', dest='antenna_element_orientation')\nantenna_element_group.add_argument('--ocoords', help='Coordinates of dipole orientation or dish pointing direction [str]', default=None, type=str, dest='antenna_element_orientation_coords', choices=['dircos', 'altaz'])\nantenna_element_group.add_argument('--phased-array', dest='phased_array', action='store_true')\nantenna_element_group.add_argument('--phased-array-file', help='Locations of antenna elements to be phased', default='/data3/t_nithyanandan/project_MWA/MWA_tile_dipole_locations.txt', type=file, dest='phased_elements_file')\nantenna_element_group.add_argument('--groundplane', help='Height of antenna element above ground plane (in meters) [float]', default=None, type=float, dest='ground_plane')\n\nobsparm_group = parser.add_argument_group('Observation setup', 'Parameters specifying the observation')\nobsparm_group.add_argument('-f', '--freq', help='Foreground center frequency in Hz [float, Default=185e6]', default=185e6, type=float, dest='freq')\nobsparm_group.add_argument('--dfreq', help='Frequency resolution in Hz [float, Default=40e3]', default=40e3, type=float, dest='freq_resolution')\nobsparm_group.add_argument('--obs-mode', help='Observing mode [str, track/drift/drift-shift/custom]', default=None, type=str, dest='obs_mode', choices=['track', 'drift', 'dns', 'custom'])\n# obsparm_group.add_argument('--t-snap', help='Integration time (seconds) [float, Default=300.0]', default=5.0*60.0, type=float, dest='t_snap')\nobsparm_group.add_argument('--nchan', help='Number of frequency channels [int, Default=256]', default=256, type=int, dest='n_channels')\n\nduration_group = parser.add_argument_group('Observing duration parameters', 'Parameters specifying observing duration')\nduration_group.add_argument('--t-obs', help='Duration of observation [seconds]', dest='t_obs', default=None, type=float, metavar='t_obs')\nduration_group.add_argument('--n-snap', help='Number of snapshots or records that make up the observation', dest='n_snaps', default=None, type=int, metavar='n_snapshots')\nduration_group.add_argument('--t-snap', help='integration time of each snapshot [seconds]', dest='t_snap', default=None, type=int, metavar='t_snap')\n\npointing_group = parser.add_mutually_exclusive_group(required=True)\npointing_group.add_argument('--pointing-file', dest='pointing_file', type=str, nargs=1, default=None)\npointing_group.add_argument('--pointing-info', dest='pointing_info', type=float, nargs=3, metavar=('lst_init', 'ra_init', 'dec_init'))\n\nsnapshot_selection_group = parser.add_mutually_exclusive_group(required=False)\nsnapshot_selection_group.add_argument('--beam-switch', dest='beam_switch', action='store_true')\nsnapshot_selection_group.add_argument('--snap-pick', dest='pick_snapshots', default=None, type=int, nargs='*')\nsnapshot_selection_group.add_argument('--snap-range', dest='snapshots_range', default=None, nargs=2, type=int)\nsnapshot_selection_group.add_argument('--all-snaps', dest='all_snapshots', action='store_true')\n\nfgmodel_group = parser.add_mutually_exclusive_group(required=True)\nfgmodel_group.add_argument('--ASM', action='store_true') # Diffuse (GSM) + Compact (NVSS+SUMSS) All-sky model \nfgmodel_group.add_argument('--DSM', action='store_true') # Diffuse all-sky model\nfgmodel_group.add_argument('--CSM', action='store_true') # Point source model (NVSS+SUMSS)\nfgmodel_group.add_argument('--SUMSS', action='store_true') # SUMSS catalog\nfgmodel_group.add_argument('--NVSS', action='store_true') # NVSS catalog\nfgmodel_group.add_argument('--MSS', action='store_true') # Molonglo Sky Survey\nfgmodel_group.add_argument('--GLEAM', action='store_true') # GLEAM catalog\nfgmodel_group.add_argument('--PS', action='store_true') # Point sources \nfgmodel_group.add_argument('--USM', action='store_true') # Uniform all-sky model\n\nfgparm_group = parser.add_argument_group('Foreground Setup', 'Parameters describing foreground sky')\nfgparm_group.add_argument('--flux-unit', help='Units of flux density [str, Default=\"Jy\"]', type=str, dest='flux_unit', default='Jy', choices=['Jy','K'])\nfgparm_group.add_argument('--spindex', help='Spectral index, ~ f^spindex [float, Default=0.0]', type=float, dest='spindex', default=0.0)\nfgparm_group.add_argument('--spindex-rms', help='Spectral index rms [float, Default=0.0]', type=float, dest='spindex_rms', default=0.0)\nfgparm_group.add_argument('--spindex-seed', help='Spectral index seed [float, Default=None]', type=int, dest='spindex_seed', default=None)\nfgparm_group.add_argument('--nside', help='nside parameter for healpix map [int, Default=64]', type=int, dest='nside', default=64, choices=[64, 128])\n\nfgcat_group = parser.add_argument_group('Catalog files', 'Catalog file locations')\nfgcat_group.add_argument('--dsm-file-prefix', help='Diffuse sky model filename prefix [str]', type=str, dest='DSM_file_prefix', default='/data3/t_nithyanandan/project_MWA/foregrounds/gsmdata')\nfgcat_group.add_argument('--sumss-file', help='SUMSS catalog file [str]', type=str, dest='SUMSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/sumsscat.Mar-11-2008.txt')\nfgcat_group.add_argument('--nvss-file', help='NVSS catalog file [str]', type=file, dest='NVSS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/NVSS_catalog.fits')\nfgcat_group.add_argument('--GLEAM-file', help='GLEAM catalog file [str]', type=str, dest='GLEAM_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/mwacs_b1_131016.csv')\nfgcat_group.add_argument('--PS-file', help='Point source catalog file [str]', type=str, dest='PS_file', default='/data3/t_nithyanandan/project_MWA/foregrounds/PS_catalog.txt')\n# parser.add_argument('--', help='', type=, dest='', required=True)\n\nparser.add_argument('--plots', help='Create plots', action='store_true', dest='plots')\n\nargs = vars(parser.parse_args())\n\nrootdir = '/data3/t_nithyanandan/'\n\nproject_MWA = args['project_MWA']\nproject_HERA = args['project_HERA']\nproject_beams = args['project_beams']\nproject_drift_scan = args['project_drift_scan']\nproject_global_EoR = args['project_global_EoR']\n\nif project_MWA: project_dir = 'project_MWA'\nif project_HERA: project_dir = 'project_HERA'\nif project_beams: project_dir = 'project_beams'\nif project_drift_scan: project_dir = 'project_drift_scan'\nif project_global_EoR: project_dir = 'project_global_EoR'\n\ntelescope_id = args['telescope_id']\nelement_shape = args['antenna_element_shape']\nelement_size = args['antenna_element_size']\nelement_orientation = args['antenna_element_orientation']\nelement_ocoords = args['antenna_element_orientation_coords']\nphased_array = args['phased_array']\nphased_elements_file = args['phased_elements_file']\n\nif (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole'):\n element_size = 0.74\n element_shape = 'dipole'\n if telescope_id == 'mwa': phased_array = True\nelif telescope_id == 'vla':\n element_size = 25.0\n element_shape = 'dish'\nelif telescope_id == 'gmrt':\n element_size = 45.0\n element_shape = 'dish'\nelif telescope_id == 'hera':\n element_size = 14.0\n element_shape = 'dish'\nelif telescope_id == 'custom':\n if element_shape != 'delta':\n if (element_shape is None) or (element_size is None):\n raise ValueError('Both antenna element shape and size must be specified for the custom telescope type.')\n elif element_size <= 0.0:\n raise ValueError('Antenna element size must be positive.')\nelif telescope_id == 'mwa_tools':\n pass\nelse:\n raise ValueError('telescope ID must be specified.')\n\nif telescope_id == 'custom':\n if element_shape == 'delta':\n telescope_id = 'delta'\n else:\n telescope_id = '{0:.1f}m_{1:}'.format(element_size, element_shape)\n\n if phased_array:\n telescope_id = telescope_id + '_array'\ntelescope_str = telescope_id+'_'\n\nif element_orientation is None:\n if element_ocoords is not None:\n if element_ocoords == 'altaz':\n if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):\n element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)\n else:\n element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)\n elif element_ocoords == 'dircos':\n if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):\n element_orientation = NP.asarray([1.0, 0.0, 0.0]).reshape(1,-1)\n else:\n element_orientation = NP.asarray([0.0, 0.0, 1.0]).reshape(1,-1)\n else:\n raise ValueError('Invalid value specified antenna element orientation coordinate system.')\n else:\n if (telescope_id == 'mwa') or (telescope_id == 'mwa_dipole') or (element_shape == 'dipole'):\n element_orientation = NP.asarray([0.0, 90.0]).reshape(1,-1)\n else:\n element_orientation = NP.asarray([90.0, 270.0]).reshape(1,-1)\n element_ocoords = 'altaz'\nelse:\n if element_ocoords is None:\n raise ValueError('Antenna element orientation coordinate system must be specified to describe the specified antenna orientation.')\n\nelement_orientation = NP.asarray(element_orientation).reshape(1,-1)\nif (element_orientation.size < 2) or (element_orientation.size > 3):\n raise ValueError('Antenna element orientation must be a two- or three-element vector.')\nelif (element_ocoords == 'altaz') and (element_orientation.size != 2):\n raise ValueError('Antenna element orientation must be a two-element vector if using Alt-Az coordinates.')\n\nground_plane = args['ground_plane']\nif ground_plane is None:\n ground_plane_str = 'no_ground_'\nelse:\n if ground_plane > 0.0:\n ground_plane_str = '{0:.1f}m_ground_'.format(ground_plane)\n else:\n raise ValueError('Height of antenna element above ground plane must be positive.')\n\nlatitude = args['latitude']\nlatitude_str = 'lat_{0:.3f}_'.format(latitude)\n\ntelescope = {}\nif telescope_id in ['mwa', 'vla', 'gmrt', 'hera', 'mwa_dipole', 'mwa_tools']:\n telescope['id'] = telescope_id\ntelescope['shape'] = element_shape\ntelescope['size'] = element_size\ntelescope['orientation'] = element_orientation\ntelescope['ocoords'] = element_ocoords\ntelescope['groundplane'] = ground_plane\ntelescope['latitude'] = latitude\n\nfreq = args['freq']\nfreq_resolution = args['freq_resolution']\nn_channels = args['n_channels']\nnchan = n_channels\nchans = (freq + (NP.arange(nchan) - 0.5 * nchan) * freq_resolution)/ 1e9 # in GHz\nbw = n_channels * freq_resolution\nbandpass_str = '{0:0d}x{1:.1f}_kHz'.format(nchan, freq_resolution/1e3)\n\nif args['A_eff'] is None:\n if (telescope['shape'] == 'dipole') or (telescope['shape'] == 'delta'):\n A_eff = (0.5*FCNST.c/freq)**2\n if (telescope_id == 'mwa') or phased_array:\n A_eff *= 16\n if telescope['shape'] == 'dish':\n A_eff = NP.pi * (0.5*element_size)**2\nelse:\n A_eff = args['A_eff']\n\nobs_mode = args['obs_mode']\nt_snap = args['t_snap']\nt_obs = args['t_obs']\nn_snaps = args['n_snaps']\n\nsnapshot_type_str = obs_mode\n\npointing_file = args['pointing_file']\nif pointing_file is not None:\n pointing_file = pointing_file[0]\npointing_info = args['pointing_info']\n\nelement_locs = None\nif phased_array:\n try:\n element_locs = NP.loadtxt(phased_elements_file, skiprows=1, comments='#', usecols=(0,1,2))\n except IOError:\n raise IOError('Could not open the specified file for phased array of antenna elements.')\n\nif telescope_id == 'mwa':\n xlocs, ylocs = NP.meshgrid(1.1*NP.linspace(-1.5,1.5,4), 1.1*NP.linspace(1.5,-1.5,4))\n element_locs = NP.hstack((xlocs.reshape(-1,1), ylocs.reshape(-1,1), NP.zeros(xlocs.size).reshape(-1,1)))\n\nif pointing_file is not None:\n pointing_init = None\n pointing_info_from_file = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(1,2,3), delimiter=',')\n obs_id = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(0,), delimiter=',', dtype=str)\n if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):\n delays_str = NP.loadtxt(pointing_file, skiprows=2, comments='#', usecols=(4,), delimiter=',', dtype=str)\n delays_list = [NP.fromstring(delaystr, dtype=float, sep=';', count=-1) for delaystr in delays_str]\n delay_settings = NP.asarray(delays_list)\n delay_settings *= 435e-12\n delays = NP.copy(delay_settings)\n if n_snaps is None:\n n_snaps = pointing_info_from_file.shape[0]\n pointing_info_from_file = pointing_info_from_file[:min(n_snaps, pointing_info_from_file.shape[0]),:]\n obs_id = obs_id[:min(n_snaps, pointing_info_from_file.shape[0])]\n if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):\n delays = delay_settings[:min(n_snaps, pointing_info_from_file.shape[0]),:]\n n_snaps = min(n_snaps, pointing_info_from_file.shape[0])\n pointings_altaz = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)\n pointings_altaz_orig = OPS.reverse(pointing_info_from_file[:,:2].reshape(-1,2), axis=1)\n lst = 15.0 * pointing_info_from_file[:,2]\n lst_wrapped = lst + 0.0\n lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0\n lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))\n\n if obs_mode is None:\n obs_mode = 'custom'\n if (obs_mode == 'dns') and beam_switch:\n angle_diff = GEOM.sphdist(pointings_altaz[1:,1], pointings_altaz[1:,0], pointings_altaz[:-1,1], pointings_altaz[:-1,0])\n angle_diff = NP.concatenate(([0.0], angle_diff))\n shift_threshold = 1.0 # in degrees\n # lst_edges = NP.concatenate(([lst_edges[0]], lst_edges[angle_diff > shift_threshold], [lst_edges[-1]]))\n lst_wrapped = NP.concatenate(([lst_wrapped[0]], lst_wrapped[angle_diff > shift_threshold], [lst_wrapped[-1]]))\n n_snaps = lst_wrapped.size - 1\n pointings_altaz = NP.vstack((pointings_altaz[0,:].reshape(-1,2), pointings_altaz[angle_diff>shift_threshold,:].reshape(-1,2)))\n obs_id = NP.concatenate(([obs_id[0]], obs_id[angle_diff>shift_threshold]))\n if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):\n delays = NP.vstack((delay_settings[0,:], delay_settings[angle_diff>shift_threshold,:]))\n obs_mode = 'custom'\n\n lst_edges_left = lst_wrapped[:-1] + 0.0\n lst_edges_right = NP.concatenate(([lst_edges[1]], lst_edges[NP.asarray(NP.where(angle_diff > shift_threshold)).ravel()+1]))\n elif snapshots_range is not None:\n snapshots_range[1] = snapshots_range[1] % n_snaps\n if snapshots_range[0] > snapshots_range[1]:\n raise IndexError('min snaphost # must be <= max snapshot #')\n lst_wrapped = lst_wrapped[snapshots_range[0]:snapshots_range[1]+2]\n lst_edges = NP.copy(lst_wrapped)\n pointings_altaz = pointings_altaz[snapshots_range[0]:snapshots_range[1]+1,:]\n obs_id = obs_id[snapshots_range[0]:snapshots_range[1]+1]\n if (telescope_id == 'mwa') or (telescope_id == 'mwa_tools') or (phased_array):\n delays = delay_settings[snapshots_range[0]:snapshots_range[1]+1,:]\n n_snaps = snapshots_range[1]-snapshots_range[0]+1\n elif pick_snapshots is not None:\n pick_snapshots = NP.asarray(pick_snapshots)\n lst_begin = NP.asarray(lst_wrapped[pick_snapshots])\n lst_end = NP.asarray(lst_wrapped[pick_snapshots+1])\n t_snap = (lst_end - lst_begin) / 15.0 * 3.6e3\n n_snaps = t_snap.size\n lst = 0.5 * (lst_begin + lst_end)\n pointings_altaz = pointings_altaz[pick_snapshots,:]\n obs_id = obs_id[pick_snapshots]\n if (telescope_id == 'mwa') or (phased_array) or (telescope_id == 'mwa_tools'):\n delays = delay_settings[pick_snapshots,:]\n obs_mode = 'custom'\n if pick_snapshots is None:\n if not beam_switch:\n lst = 0.5*(lst_edges[1:]+lst_edges[:-1])\n t_snap = (lst_edges[1:]-lst_edges[:-1]) / 15.0 * 3.6e3\n else:\n lst = 0.5*(lst_edges_left + lst_edges_right)\n t_snap = (lst_edges_right - lst_edges_left) / 15.0 * 3.6e3\n\n pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')\n pointings_hadec = GEOM.altaz2hadec(pointings_altaz, latitude, units='degrees')\n pointings_radec = NP.hstack(((lst-pointings_hadec[:,0]).reshape(-1,1), pointings_hadec[:,1].reshape(-1,1)))\n pointings_radec[:,0] = pointings_radec[:,0] % 360.0\n t_obs = NP.sum(t_snap)\nelif pointing_info is not None:\n pointing_init = NP.asarray(pointing_info[1:])\n lst_init = pointing_info[0]\n pointing_file = None\n if t_snap is None:\n raise NameError('t_snap must be provided for an automated observing run')\n\n if (n_snaps is None) and (t_obs is None):\n raise NameError('n_snaps or t_obs must be provided for an automated observing run')\n elif (n_snaps is not None) and (t_obs is not None):\n raise ValueError('Only one of n_snaps or t_obs must be provided for an automated observing run')\n elif n_snaps is None:\n n_snaps = int(t_obs/t_snap)\n else:\n t_obs = n_snaps * t_snap\n t_snap = t_snap + NP.zeros(n_snaps)\n lst = (lst_init + (t_snap/3.6e3) * NP.arange(n_snaps)) * 15.0 # in degrees\n if obs_mode is None:\n obs_mode = 'track'\n\n if obs_mode == 'track':\n pointings_radec = NP.repeat(NP.asarray(pointing_init).reshape(-1,2), n_snaps, axis=0)\n else:\n ha_init = lst_init * 15.0 - pointing_init[0]\n pointings_radec = NP.hstack((NP.asarray(lst-pointing_init[0]).reshape(-1,1), pointing_init[1]+NP.zeros(n_snaps).reshape(-1,1)))\n\n pointings_hadec = NP.hstack(((lst-pointings_radec[:,0]).reshape(-1,1), pointings_radec[:,1].reshape(-1,1)))\n pointings_altaz = GEOM.hadec2altaz(pointings_hadec, latitude, units='degrees')\n pointings_dircos = GEOM.altaz2dircos(pointings_altaz, units='degrees')\n\n pointings_radec_orig = NP.copy(pointings_radec)\n pointings_hadec_orig = NP.copy(pointings_hadec)\n pointings_altaz_orig = NP.copy(pointings_altaz)\n pointings_dircos_orig = NP.copy(pointings_dircos)\n\n lst_wrapped = lst + 0.0\n lst_wrapped[lst_wrapped > 180.0] = lst_wrapped[lst_wrapped > 180.0] - 360.0\n lst_edges = NP.concatenate((lst_wrapped, [lst_wrapped[-1]+lst_wrapped[-1]-lst_wrapped[-2]]))\n\nduration_str = ''\nif obs_mode in ['track', 'drift']:\n if (t_snap is not None) and (n_snaps is not None):\n duration_str = '_{0:0d}x{1:.1f}s'.format(n_snaps, NP.asarray(t_snap)[0])\n\npointing_info = {}\npointing_info['pointing_center'] = pointings_altaz\npointing_info['pointing_coords'] = 'altaz'\npointing_info['lst'] = lst\nif element_locs is not None:\n telescope['element_locs'] = element_locs\n\nplots = args['plots']\n\nuse_GSM = args['ASM']\nuse_DSM = args['DSM']\nuse_CSM = args['CSM']\nuse_NVSS = args['NVSS']\nuse_SUMSS = args['SUMSS']\nuse_MSS = args['MSS']\nuse_GLEAM = args['GLEAM']\nuse_PS = args['PS']\nuse_USM = args['USM']\n\nfg_str = ''\nnside = args['nside']\npixres = HP.nside2pixarea(nside)\nflux_unit = args['flux_unit']\nspindex_seed = args['spindex_seed']\nspindex_rms = args['spindex_rms']\nspindex_rms_str = ''\nspindex_seed_str = ''\nif spindex_rms > 0.0:\n spindex_rms_str = '{0:.1f}'.format(spindex_rms)\nelse:\n spindex_rms = 0.0\n\nif spindex_seed is not None:\n spindex_seed_str = '{0:0d}_'.format(spindex_seed)\n\n\nif use_GSM:\n fg_str = 'asm'\n\n dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)\n hdulist = fits.open(dsm_file)\n pixres = hdulist[0].header['PIXAREA']\n dsm_table = hdulist[1].data\n ra_deg_DSM = dsm_table['RA']\n dec_deg_DSM = dsm_table['DEC']\n temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]\n fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy\n spindex = dsm_table['spindex'] + 2.0\n freq_DSM = 0.185 # in GHz\n freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)\n catlabel = NP.repeat('DSM', fluxes_DSM.size)\n ra_deg = ra_deg_DSM + 0.0\n dec_deg = dec_deg_DSM + 0.0\n majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)\n minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)\n # majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))\n # minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))\n fluxes = fluxes_DSM + 0.0\n\n freq_SUMSS = 0.843 # in GHz\n SUMSS_file = args['SUMSS_file']\n catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))\n ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)\n dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype=\"|S3\")\n sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])\n sgn_dec = 1.0*NP.ones(dec_dd.size)\n sgn_dec[sgn_dec_str == '-'] = -1.0\n dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)\n fmajax = catalog[:,7]\n fminax = catalog[:,8]\n fpa = catalog[:,9]\n dmajax = catalog[:,10]\n dminax = catalog[:,11]\n PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)\n ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]\n dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]\n fint = catalog[PS_ind,6] * 1e-3\n if spindex_seed is None:\n spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)\n else:\n NP.random.seed(spindex_seed)\n spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)\n\n fmajax = fmajax[PS_ind]\n fminax = fminax[PS_ind]\n fpa = fpa[PS_ind]\n dmajax = dmajax[PS_ind]\n dminax = dminax[PS_ind]\n bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS\n ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]\n dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]\n fint = fint[bright_source_ind]\n fmajax = fmajax[bright_source_ind]\n fminax = fminax[bright_source_ind]\n fpa = fpa[bright_source_ind]\n dmajax = dmajax[bright_source_ind]\n dminax = dminax[bright_source_ind]\n spindex_SUMSS = spindex_SUMSS[bright_source_ind]\n valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)\n ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]\n dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]\n fint = fint[valid_ind]\n fmajax = fmajax[valid_ind]\n fminax = fminax[valid_ind]\n fpa = fpa[valid_ind]\n spindex_SUMSS = spindex_SUMSS[valid_ind]\n freq_catalog = NP.concatenate((freq_catalog, freq_SUMSS*1e9 + NP.zeros(fint.size)))\n catlabel = NP.concatenate((catlabel, NP.repeat('SUMSS', fint.size)))\n ra_deg = NP.concatenate((ra_deg, ra_deg_SUMSS))\n dec_deg = NP.concatenate((dec_deg, dec_deg_SUMSS))\n spindex = NP.concatenate((spindex, spindex_SUMSS))\n majax = NP.concatenate((majax, fmajax/3.6e3))\n minax = NP.concatenate((minax, fminax/3.6e3))\n fluxes = NP.concatenate((fluxes, fint))\n\n nvss_file = args['NVSS_file']\n freq_NVSS = 1.4 # in GHz\n hdulist = fits.open(nvss_file)\n ra_deg_NVSS = hdulist[1].data['RA(2000)']\n dec_deg_NVSS = hdulist[1].data['DEC(2000)']\n nvss_fpeak = hdulist[1].data['PEAK INT']\n nvss_majax = hdulist[1].data['MAJOR AX']\n nvss_minax = hdulist[1].data['MINOR AX']\n hdulist.close()\n\n if spindex_seed is None:\n spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)\n else:\n NP.random.seed(2*spindex_seed)\n spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)\n\n not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))\n bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)\n PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3\n count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))\n nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]\n freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))\n catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))\n ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n fluxes = NP.concatenate((fluxes, nvss_fpeak))\n\n spec_parms = {}\n # spec_parms['name'] = NP.repeat('tanh', ra_deg.size)\n spec_parms['name'] = NP.repeat('power-law', ra_deg.size)\n spec_parms['power-law-index'] = spindex\n # spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)\n spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)\n spec_parms['flux-scale'] = fluxes\n spec_parms['flux-offset'] = NP.zeros(ra_deg.size)\n spec_parms['freq-width'] = NP.zeros(ra_deg.size)\n\n skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])\n\nelif use_DSM:\n fg_str = 'dsm'\n\n dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)\n hdulist = fits.open(dsm_file)\n pixres = hdulist[0].header['PIXAREA']\n dsm_table = hdulist[1].data\n ra_deg_DSM = dsm_table['RA']\n dec_deg_DSM = dsm_table['DEC']\n temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]\n fluxes_DSM = temperatures * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy\n spindex = dsm_table['spindex'] + 2.0\n freq_DSM = 0.185 # in GHz\n freq_catalog = freq_DSM * 1e9 + NP.zeros(fluxes_DSM.size)\n catlabel = NP.repeat('DSM', fluxes_DSM.size)\n ra_deg = ra_deg_DSM\n dec_deg = dec_deg_DSM\n majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)\n minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_DSM.size)\n # majax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))\n # minax = NP.degrees(NP.sqrt(HP.nside2pixarea(64)*4/NP.pi) * NP.ones(fluxes_DSM.size))\n fluxes = fluxes_DSM\n hdulist.close()\n\n spec_parms = {}\n # spec_parms['name'] = NP.repeat('tanh', ra_deg.size)\n spec_parms['name'] = NP.repeat('power-law', ra_deg.size)\n spec_parms['power-law-index'] = spindex\n # spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)\n spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)\n spec_parms['flux-scale'] = fluxes\n spec_parms['flux-offset'] = NP.zeros(ra_deg.size)\n spec_parms['freq-width'] = NP.zeros(ra_deg.size)\n\n skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])\n\nelif use_USM:\n fg_str = 'usm'\n\n dsm_file = args['DSM_file_prefix']+'_{0:.1f}_MHz_nside_{1:0d}.fits'.format(freq*1e-6, nside)\n hdulist = fits.open(dsm_file)\n pixres = hdulist[0].header['PIXAREA']\n dsm_table = hdulist[1].data\n ra_deg = dsm_table['RA']\n dec_deg = dsm_table['DEC']\n temperatures = dsm_table['T_{0:.0f}'.format(freq/1e6)]\n avg_temperature = NP.mean(temperatures)\n fluxes_USM = avg_temperature * (2.0 * FCNST.k * freq**2 / FCNST.c**2) * pixres / CNST.Jy * NP.ones(temperatures.size)\n spindex = NP.zeros(fluxes_USM.size)\n freq_USM = 0.185 # in GHz\n freq_catalog = freq_USM * 1e9 + NP.zeros(fluxes_USM.size)\n catlabel = NP.repeat('USM', fluxes_USM.size)\n majax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)\n minax = NP.degrees(HP.nside2resol(nside)) * NP.ones(fluxes_USM.size)\n hdulist.close() \n\n spec_parms = {}\n # spec_parms['name'] = NP.repeat('tanh', ra_deg.size)\n spec_parms['name'] = NP.repeat('power-law', ra_deg.size)\n spec_parms['power-law-index'] = spindex\n # spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)\n spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)\n spec_parms['flux-scale'] = fluxes\n spec_parms['flux-offset'] = NP.zeros(ra_deg.size)\n spec_parms['freq-width'] = NP.zeros(ra_deg.size)\n\n skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])\n \nelif use_CSM:\n fg_str = 'csm'\n freq_SUMSS = 0.843 # in GHz\n SUMSS_file = args['SUMSS_file']\n catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))\n ra_deg_SUMSS = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)\n dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype=\"|S3\")\n sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])\n sgn_dec = 1.0*NP.ones(dec_dd.size)\n sgn_dec[sgn_dec_str == '-'] = -1.0\n dec_deg_SUMSS = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)\n fmajax = catalog[:,7]\n fminax = catalog[:,8]\n fpa = catalog[:,9]\n dmajax = catalog[:,10]\n dminax = catalog[:,11]\n PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)\n ra_deg_SUMSS = ra_deg_SUMSS[PS_ind]\n dec_deg_SUMSS = dec_deg_SUMSS[PS_ind]\n fint = catalog[PS_ind,6] * 1e-3\n if spindex_seed is None:\n spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)\n else:\n NP.random.seed(spindex_seed)\n spindex_SUMSS = -0.83 + spindex_rms * NP.random.randn(fint.size)\n\n fmajax = fmajax[PS_ind]\n fminax = fminax[PS_ind]\n fpa = fpa[PS_ind]\n dmajax = dmajax[PS_ind]\n dminax = dminax[PS_ind]\n bright_source_ind = fint >= 10.0 * (freq_SUMSS*1e9/freq)**spindex_SUMSS\n ra_deg_SUMSS = ra_deg_SUMSS[bright_source_ind]\n dec_deg_SUMSS = dec_deg_SUMSS[bright_source_ind]\n fint = fint[bright_source_ind]\n fmajax = fmajax[bright_source_ind]\n fminax = fminax[bright_source_ind]\n fpa = fpa[bright_source_ind]\n dmajax = dmajax[bright_source_ind]\n dminax = dminax[bright_source_ind]\n spindex_SUMSS = spindex_SUMSS[bright_source_ind]\n valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)\n ra_deg_SUMSS = ra_deg_SUMSS[valid_ind]\n dec_deg_SUMSS = dec_deg_SUMSS[valid_ind]\n fint = fint[valid_ind]\n fmajax = fmajax[valid_ind]\n fminax = fminax[valid_ind]\n fpa = fpa[valid_ind]\n spindex_SUMSS = spindex_SUMSS[valid_ind]\n freq_catalog = freq_SUMSS*1e9 + NP.zeros(fint.size)\n catlabel = NP.repeat('SUMSS', fint.size)\n ra_deg = ra_deg_SUMSS + 0.0\n dec_deg = dec_deg_SUMSS\n spindex = spindex_SUMSS\n majax = fmajax/3.6e3\n minax = fminax/3.6e3\n fluxes = fint + 0.0\n nvss_file = args['NVSS_file']\n freq_NVSS = 1.4 # in GHz\n hdulist = fits.open(nvss_file)\n ra_deg_NVSS = hdulist[1].data['RA(2000)']\n dec_deg_NVSS = hdulist[1].data['DEC(2000)']\n nvss_fpeak = hdulist[1].data['PEAK INT']\n nvss_majax = hdulist[1].data['MAJOR AX']\n nvss_minax = hdulist[1].data['MINOR AX']\n hdulist.close()\n\n if spindex_seed is None:\n spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)\n else:\n NP.random.seed(2*spindex_seed)\n spindex_NVSS = -0.83 + spindex_rms * NP.random.randn(nvss_fpeak.size)\n\n not_in_SUMSS_ind = NP.logical_and(dec_deg_NVSS > -30.0, dec_deg_NVSS <= min(90.0, latitude+90.0))\n bright_source_ind = nvss_fpeak >= 10.0 * (freq_NVSS*1e9/freq)**(spindex_NVSS)\n PS_ind = NP.sqrt(nvss_majax**2-(0.75/60.0)**2) < 14.0/3.6e3\n count_valid = NP.sum(NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind))\n nvss_fpeak = nvss_fpeak[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]\n freq_catalog = NP.concatenate((freq_catalog, freq_NVSS*1e9 + NP.zeros(count_valid)))\n catlabel = NP.concatenate((catlabel, NP.repeat('NVSS',count_valid)))\n ra_deg = NP.concatenate((ra_deg, ra_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n dec_deg = NP.concatenate((dec_deg, dec_deg_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n spindex = NP.concatenate((spindex, spindex_NVSS[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n majax = NP.concatenate((majax, nvss_majax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n minax = NP.concatenate((minax, nvss_minax[NP.logical_and(NP.logical_and(not_in_SUMSS_ind, bright_source_ind), PS_ind)]))\n fluxes = NP.concatenate((fluxes, nvss_fpeak))\n\n spec_parms = {}\n # spec_parms['name'] = NP.repeat('tanh', ra_deg.size)\n spec_parms['name'] = NP.repeat('power-law', ra_deg.size)\n spec_parms['power-law-index'] = spindex\n # spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)\n spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)\n spec_parms['flux-scale'] = fluxes\n spec_parms['flux-offset'] = NP.zeros(ra_deg.size)\n spec_parms['freq-width'] = NP.zeros(ra_deg.size)\n\n skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])\n\nelif use_SUMSS:\n SUMSS_file = args['SUMSS_file']\n catalog = NP.loadtxt(SUMSS_file, usecols=(0,1,2,3,4,5,10,12,13,14,15,16))\n ra_deg = 15.0 * (catalog[:,0] + catalog[:,1]/60.0 + catalog[:,2]/3.6e3)\n dec_dd = NP.loadtxt(SUMSS_file, usecols=(3,), dtype=\"|S3\")\n sgn_dec_str = NP.asarray([dec_dd[i][0] for i in range(dec_dd.size)])\n sgn_dec = 1.0*NP.ones(dec_dd.size)\n sgn_dec[sgn_dec_str == '-'] = -1.0\n dec_deg = sgn_dec * (NP.abs(catalog[:,3]) + catalog[:,4]/60.0 + catalog[:,5]/3.6e3)\n fmajax = catalog[:,7]\n fminax = catalog[:,8]\n fpa = catalog[:,9]\n dmajax = catalog[:,10]\n dminax = catalog[:,11]\n PS_ind = NP.logical_and(dmajax == 0.0, dminax == 0.0)\n ra_deg = ra_deg[PS_ind]\n dec_deg = dec_deg[PS_ind]\n fint = catalog[PS_ind,6] * 1e-3\n fmajax = fmajax[PS_ind]\n fminax = fminax[PS_ind]\n fpa = fpa[PS_ind]\n dmajax = dmajax[PS_ind]\n dminax = dminax[PS_ind]\n bright_source_ind = fint >= 1.0\n ra_deg = ra_deg[bright_source_ind]\n dec_deg = dec_deg[bright_source_ind]\n fint = fint[bright_source_ind]\n fmajax = fmajax[bright_source_ind]\n fminax = fminax[bright_source_ind]\n fpa = fpa[bright_source_ind]\n dmajax = dmajax[bright_source_ind]\n dminax = dminax[bright_source_ind]\n valid_ind = NP.logical_and(fmajax > 0.0, fminax > 0.0)\n ra_deg = ra_deg[valid_ind]\n dec_deg = dec_deg[valid_ind]\n fint = fint[valid_ind]\n fmajax = fmajax[valid_ind]\n fminax = fminax[valid_ind]\n fpa = fpa[valid_ind]\n freq_catalog = 0.843 # in GHz\n if spindex_seed is None:\n spindex = -0.83 + spindex_rms * NP.random.randn(fint.size)\n else:\n NP.random.seed(spindex_seed)\n spindex = -0.83 + spindex_rms * NP.random.randn(fint.size)\n\n fg_str = 'sumss'\n\n spec_parms = {}\n # spec_parms['name'] = NP.repeat('tanh', ra_deg.size)\n spec_parms['name'] = NP.repeat('power-law', ra_deg.size)\n spec_parms['power-law-index'] = spindex\n # spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)\n spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)\n spec_parms['flux-scale'] = fluxes\n spec_parms['flux-offset'] = NP.zeros(ra_deg.size)\n spec_parms['freq-width'] = 1.0e-3 + NP.zeros(ra_deg.size)\n\n skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])\n\nelif use_MSS:\n pass\nelif use_GLEAM:\n catalog_file = args['GLEAM_file']\n catdata = ascii.read(catalog_file, data_start=1, delimiter=',')\n dec_deg = catdata['DEJ2000']\n ra_deg = catdata['RAJ2000']\n fpeak = catdata['S150_fit']\n ferr = catdata['e_S150_fit']\n spindex = catdata['Sp+Index']\n fg_str = 'gleam'\n\n spec_parms = {}\n # spec_parms['name'] = NP.repeat('tanh', ra_deg.size)\n spec_parms['name'] = NP.repeat('power-law', ra_deg.size)\n spec_parms['power-law-index'] = spindex\n # spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)\n spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)\n spec_parms['flux-scale'] = fluxes\n spec_parms['flux-offset'] = NP.zeros(ra_deg.size)\n spec_parms['freq-width'] = NP.zeros(ra_deg.size)\n\n skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])\n\nelif use_PS:\n fg_str = 'point'\n catalog_file = args['PS_file']\n catdata = ascii.read(catalog_file, comment='#', header_start=0, data_start=1)\n ra_deg = catdata['RA'].data\n dec_deg = catdata['DEC'].data\n fint = catdata['F_INT'].data\n spindex = catdata['SPINDEX'].data\n majax = catdata['MAJAX'].data\n minax = catdata['MINAX'].data\n pa = catdata['PA'].data\n freq_PS = 0.185 # in GHz\n freq_catalog = freq_PS * 1e9 + NP.zeros(fint.size)\n catlabel = NP.repeat('PS', fint.size)\n\n spec_parms = {}\n # spec_parms['name'] = NP.repeat('tanh', ra_deg.size)\n spec_parms['name'] = NP.repeat('power-law', ra_deg.size)\n spec_parms['power-law-index'] = spindex\n # spec_parms['freq-ref'] = freq/1e9 + NP.zeros(ra_deg.size)\n spec_parms['freq-ref'] = freq_catalog + NP.zeros(ra_deg.size)\n spec_parms['flux-scale'] = fluxes\n spec_parms['flux-offset'] = NP.zeros(ra_deg.size)\n spec_parms['freq-width'] = NP.zeros(ra_deg.size)\n\n skymod = SM.SkyModel(catlabel, chans*1e9, NP.hstack((ra_deg.reshape(-1,1), dec_deg.reshape(-1,1))), 'func', spec_parms=spec_parms, src_shape=NP.hstack((majax.reshape(-1,1),minax.reshape(-1,1),NP.zeros(fluxes.size).reshape(-1,1))), src_shape_units=['degree','degree','degree'])\n\nantpower_Jy = RI.antenna_power(skymod, telescope, pointing_info, freq_scale='Hz')\nantpower_K = antpower_Jy * CNST.Jy / pixres / (2.0* FCNST.k * (1e9*chans.reshape(1,-1))**2 / FCNST.c**2)\n\noutfile = 'antenna_power_'+telescope_str+ground_plane_str+latitude_str+snapshot_type_str+duration_str+'_'+fg_str+'_sprms_{0:.1f}_'.format(spindex_rms)+spindex_seed_str+'nside_{0:0d}_'.format(nside)+'{0}_{1:.1f}_MHz'.format(bandpass_str, freq/1e6)\n\nif plots:\n fig = PLT.figure(figsize=(6,6))\n ax = fig.add_subplot(111)\n if flux_unit == 'Jy':\n ax.plot(lst/15, antpower_Jy[:,nchan/2], 'k-', lw=2)\n elif flux_unit == 'K':\n ax.plot(lst/15, antpower_K[:,nchan/2], 'k-', lw=2)\n ax.set_xlim(0, 24)\n ax.set_xlabel('RA [hours]', fontsize=18, weight='medium')\n ax.set_ylabel(r'$T_\\mathrm{ant}$'+' [ '+flux_unit+' ]', fontsize=16, weight='medium')\n ax_y2 = ax.twinx()\n if flux_unit == 'Jy':\n ax_y2.set_yticks(Jy2K(ax.get_yticks(), chans[nchan/2]*1e9, pixres))\n ax_y2.set_ylim(Jy2K(NP.asarray(ax.get_ylim())), chans[nchan/2]*1e9, pixres)\n ax_y2.set_ylabel(r'$T_\\mathrm{ant}$'+' [ K ]', fontsize=16, weight='medium')\n elif flux_unit == 'K':\n ax_y2.set_yticks(K2Jy(ax.get_yticks(), chans[nchan/2]*1e9, pixres))\n ax_y2.set_ylim(K2Jy(NP.asarray(ax.get_ylim()), chans[nchan/2]*1e9, pixres))\n ax_y2.set_ylabel(r'$T_\\mathrm{ant}$'+' [ Jy ]', fontsize=16, weight='medium')\n\n ax.text(0.5, 0.9, '{0:.1f} MHz'.format(chans[nchan/2]*1e3), transform=ax.transAxes, fontsize=12, weight='medium', ha='center', color='black')\n\n fig.subplots_adjust(right=0.85)\n fig.subplots_adjust(left=0.15)\n\n PLT.savefig(rootdir+project_dir+'/figures/'+outfile+'.png', bbox_inches=0)\n\nhdulist = []\nhdulist += [fits.PrimaryHDU()]\nhdulist[0].header['EXTNAME'] = 'PRIMARY'\nhdulist[0].header['telescope_id'] = (telescope_id, 'Telescope ID')\nhdulist[0].header['element_shape'] = (telescope['shape'], 'Antenna element shape')\nhdulist[0].header['element_size'] = (telescope['size'], 'Antenna element size (m)')\nhdulist[0].header['A_eff'] = (A_eff, 'Effective area [m^2]') \nif telescope['ocoords'] is not None:\n hdulist[0].header['element_ocoords'] = (telescope['ocoords'], 'Antenna element orientation coordinates')\nif telescope['groundplane'] is not None:\n hdulist[0].header['ground_plane'] = (telescope['groundplane'], 'Antenna element height above ground plane [m]')\nhdulist[0].header['latitude'] = (latitude, 'Latitude of telescope')\nhdulist[0].header['obs_mode'] = (obs_mode, 'Observing mode')\nhdulist[0].header['t_snap'] = (NP.mean(t_snap), 'Average snapshot duration (s)')\nhdulist[0].header['n_snaps'] = (n_snaps, 'Number of snapshots')\nhdulist[0].header['center_freq'] = (freq, 'Center Frequency')\nhdulist[0].header['freq_resolution'] = (freq_resolution, 'Frequency Resolution')\nhdulist[0].header['freq_unit'] = ('Hz', 'Frequency Units')\nhdulist[0].header['nchan'] = (nchan, 'Number of frequency channels')\nhdulist[0].header['pointing_coords'] = ('RADEC', 'Pointing coordinate system')\nhdulist[0].header['fgmodel'] = (fg_str, 'Foreground model')\nhdulist[0].header['Temperature_unit'] = ('K', 'Antenna temperature unit')\nhdulist[0].header['Power_unit'] = ('Jy', 'Antenna power unit')\n \nif telescope['orientation'] is not None:\n hdulist += [fits.ImageHDU(telescope['orientation'], name='Antenna element orientation')]\n \nif 'element_locs' in telescope:\n hdulist += [fits.ImageHDU(telescope['element_locs'], name='Antenna element locations')]\n hdulist[0].header['phased_array'] = (telescope['element_locs'].shape[0], 'Number of phased array elements')\n\nhdulist += [fits.ImageHDU(t_snap, name='Snapshot duration')] \nhdulist += [fits.ImageHDU(chans*1e9, name='Frequencies')]\n\ncols = []\ncols += [fits.Column(name='LST', format='D', array=NP.asarray(lst).ravel())]\ncols += [fits.Column(name='pointing_center', format='2D', array=pointings_radec)]\ncolumns = fits.ColDefs(cols, ascii=False)\ntbhdu = fits.new_table(columns)\ntbhdu.header.set('EXTNAME', 'POINTINGS')\nhdulist += [tbhdu]\n\nhdulist += [fits.ImageHDU(antpower_K, name='Antenna Temperature')]\nhdulist += [fits.ImageHDU(antpower_Jy, name='Antenna Power')]\n\nhdu = fits.HDUList(hdulist)\nhdu.writeto(rootdir+project_dir+'/'+outfile+'.fits', clobber=True)\n\nPDB.set_trace()\n\n\n\n" ]
[ [ "numpy.sqrt", "numpy.linspace", "numpy.asarray", "numpy.concatenate", "numpy.mean", "numpy.random.randn", "numpy.where", "numpy.arange", "numpy.copy", "numpy.repeat", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.loadtxt", "numpy.logical_and", "numpy.sum", "numpy.abs", "numpy.random.seed", "numpy.ones", "numpy.fromstring", "numpy.vstack" ] ]
shixun404/elegatnRL-marl
[ "89ade546e00fd00fee35e6d0efb6fa6b0239f0e8" ]
[ "elegantrl/env.py" ]
[ "import os\nimport gym # not necessary\nimport numpy as np\nfrom copy import deepcopy\n\n\"\"\"[ElegantRL.2021.09.01](https://github.com/AI4Finance-LLC/ElegantRL)\"\"\"\n\ngym.logger.set_level(40) # Block warning\n\n\nclass PreprocessEnv(gym.Wrapper): # environment wrapper\n def __init__(self, env, if_print=True, if_norm=False):\n \"\"\"Preprocess a standard OpenAI gym environment for training.\n\n `object env` a standard OpenAI gym environment, it has env.reset() and env.step()\n `bool if_print` print the information of environment. Such as env_name, state_dim ...\n `object data_type` convert state (sometimes float64) to data_type (float32).\n \"\"\"\n self.env = gym.make(env) if isinstance(env, str) else env\n super().__init__(self.env)\n\n (self.env_name, self.state_dim, self.action_dim, self.action_max, self.max_step,\n self.if_discrete, self.target_return) = get_gym_env_info(self.env, if_print)\n self.env.env_num = getattr(self.env, 'env_num', 1)\n self.env_num = 1\n\n if if_norm:\n state_avg, state_std = get_avg_std__for_state_norm(self.env_name)\n self.neg_state_avg = -state_avg\n self.div_state_std = 1 / (state_std + 1e-4)\n\n self.reset = self.reset_norm\n self.step = self.step_norm\n else:\n self.reset = self.reset_type\n self.step = self.step_type\n\n def reset_type(self):\n tmp = self.env.reset()\n return [tmp[0].astype(np.float32), tmp[1].astype(np.float32),tmp[2].astype(np.float32)]\n\n def step_type(self, action):\n state, reward, done, info = self.env.step(action * self.action_max)\n return state[0].astype(np.float32), reward, done, info\n\n def reset_norm(self) -> np.ndarray:\n \"\"\" convert the data type of state from float64 to float32\n do normalization on state\n\n return `array state` state.shape==(state_dim, )\n \"\"\"\n state = self.env.reset()\n state = (state + self.neg_state_avg) * self.div_state_std\n return state.astype(np.float32)\n\n def step_norm(self, action: np.ndarray) -> (np.ndarray, float, bool, dict):\n \"\"\"convert the data type of state from float64 to float32,\n adjust action range to (-action_max, +action_max)\n do normalization on state\n\n return `array state` state.shape==(state_dim, )\n return `float reward` reward of one step\n return `bool done` the terminal of an training episode\n return `dict info` the information save in a dict. OpenAI gym standard. Send a `None` is OK\n \"\"\"\n state, reward, done, info = self.env.step(action * self.action_max)\n state = (state + self.neg_state_avg) * self.div_state_std\n return state.astype(np.float32), reward, done, info\n\n\ndef get_gym_env_info(env, if_print) -> (str, int, int, int, int, bool, float):\n \"\"\"get information of a standard OpenAI gym env.\n\n The DRL algorithm AgentXXX need these env information for building networks and training.\n\n `object env` a standard OpenAI gym environment, it has env.reset() and env.step()\n `bool if_print` print the information of environment. Such as env_name, state_dim ...\n return `env_name` the environment name, such as XxxXxx-v0\n return `state_dim` the dimension of state\n return `action_dim` the dimension of continuous action; Or the number of discrete action\n return `action_max` the max action of continuous action; action_max == 1 when it is discrete action space\n return `max_step` the steps in an episode. (from env.reset to done). It breaks an episode when it reach max_step\n return `if_discrete` Is this env a discrete action space?\n return `target_return` the target episode return, if agent reach this score, then it pass this game (env).\n \"\"\"\n assert isinstance(env, gym.Env)\n\n #env_name = getattr(env, 'env_name', None)\n env_name = 'simple_spread'\n #env_name = env.unwrapped.spec.id if env_name is None else env_name\n\n state_shape = env.observation_space[0].shape\n state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list\n\n target_return = getattr(env, 'target_return', None)\n target_return_default = getattr(env.spec, 'reward_threshold', None)\n if target_return is None:\n target_return = target_return_default\n if target_return is None:\n target_return = 2 ** 16\n\n max_step = getattr(env, 'max_step', None)\n max_step_default = getattr(env, '_max_episode_steps', None)\n if max_step is None:\n max_step = max_step_default\n if max_step is None:\n max_step = 2 ** 10\n\n \n if_discrete = isinstance(env.action_space[0], gym.spaces.Discrete)\n if if_discrete: # make sure it is discrete action space\n action_dim = env.action_space[0].n\n action_max = int(1)\n elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space\n action_dim = env.action_space.shape[0]\n action_max = float(env.action_space.high[0])\n assert not any(env.action_space.high + env.action_space.low)\n else:\n raise RuntimeError('| Please set these value manually: if_discrete=bool, action_dim=int, action_max=1.0')\n\n if if_print:\n print(f\"\\n| env_name: {env_name}, action if_discrete: {if_discrete}\"\n f\"\\n| state_dim: {state_dim:4}, action_dim: {action_dim}, action_max: {action_max}\"\n f\"\\n| max_step: {max_step:4}, target_return: {target_return}\")\n return env_name, state_dim, action_dim, action_max, max_step, if_discrete, target_return\n\n\n\"\"\"Utils\"\"\"\n\n\ndef build_env(env, if_print=False):\n env_name = getattr(env, 'env_name', env)\n assert isinstance(env_name, str)\n\n if env_name in {'LunarLanderContinuous-v2', 'BipedalWalker-v3', 'BipedalWalkerHardcore-v3',\n 'CartPole-v0', 'LunarLander-v2', }:\n env = gym.make(env_name)\n env = PreprocessEnv(env, if_print=if_print)\n elif env_name in {'ReacherBulletEnv-v0', 'AntBulletEnv-v0',\n 'HumanoidBulletEnv-v0', 'MinitaurBulletEnv-v0'}:\n import pybullet_envs\n dir(pybullet_envs)\n env = gym.make(env_name)\n env = PreprocessEnv(env, if_print=if_print)\n elif env_name == 'Pendulum-v0':\n env = gym.make('Pendulum-v0')\n env.target_return = -200\n env = PreprocessEnv(env=env, if_print=if_print)\n elif env_name == 'CarRacingFix': # Box2D\n from elegantrl.envs.CarRacingFix import CarRacingFix\n env = CarRacingFix()\n else:\n assert not isinstance(env, str)\n env = deepcopy(env)\n # raise ValueError(f'| build_env_from_env_name: need register: {env_name}')\n return env\n\n\ndef get_avg_std__for_state_norm(env_name) -> (np.ndarray, np.ndarray):\n \"\"\"return the state normalization data: neg_avg and div_std\n\n ReplayBuffer.print_state_norm() will print `neg_avg` and `div_std`\n You can save these array to here. And PreprocessEnv will load them automatically.\n eg. `state = (state + self.neg_state_avg) * self.div_state_std` in `PreprocessEnv.step_norm()`\n neg_avg = -states.mean()\n div_std = 1/(states.std()+1e-5) or 6/(states.max()-states.min())\n\n `str env_name` the name of environment that helps to find neg_avg and div_std\n return `array avg` neg_avg.shape=(state_dim)\n return `array std` div_std.shape=(state_dim)\n \"\"\"\n avg = 0\n std = 1\n if env_name == 'LunarLanderContinuous-v2':\n avg = np.array([1.65470898e-02, -1.29684399e-01, 4.26883133e-03, -3.42124557e-02,\n -7.39076972e-03, -7.67103031e-04, 1.12640885e+00, 1.12409466e+00])\n std = np.array([0.15094465, 0.29366297, 0.23490797, 0.25931464, 0.21603736,\n 0.25886878, 0.277233, 0.27771219])\n elif env_name == \"BipedalWalker-v3\":\n avg = np.array([1.42211734e-01, -2.74547996e-03, 1.65104509e-01, -1.33418152e-02,\n -2.43243194e-01, -1.73886203e-02, 4.24114229e-02, -6.57800099e-02,\n 4.53460692e-01, 6.08022244e-01, -8.64884810e-04, -2.08789053e-01,\n -2.92092949e-02, 5.04791247e-01, 3.33571745e-01, 3.37325723e-01,\n 3.49106580e-01, 3.70363115e-01, 4.04074671e-01, 4.55838055e-01,\n 5.36685407e-01, 6.70771701e-01, 8.80356865e-01, 9.97987386e-01])\n std = np.array([0.84419678, 0.06317835, 0.16532085, 0.09356959, 0.486594,\n 0.55477525, 0.44076614, 0.85030824, 0.29159821, 0.48093035,\n 0.50323634, 0.48110776, 0.69684234, 0.29161077, 0.06962932,\n 0.0705558, 0.07322677, 0.07793258, 0.08624322, 0.09846895,\n 0.11752805, 0.14116005, 0.13839757, 0.07760469])\n elif env_name == 'ReacherBulletEnv-v0':\n avg = np.array([0.03149641, 0.0485873, -0.04949671, -0.06938662, -0.14157104,\n 0.02433294, -0.09097818, 0.4405931, 0.10299437], dtype=np.float32)\n std = np.array([0.12277275, 0.1347579, 0.14567468, 0.14747661, 0.51311225,\n 0.5199606, 0.2710207, 0.48395795, 0.40876198], dtype=np.float32)\n elif env_name == 'AntBulletEnv-v0':\n avg = np.array([-1.4400886e-01, -4.5074993e-01, 8.5741436e-01, 4.4249415e-01,\n -3.1593361e-01, -3.4174921e-03, -6.1666980e-02, -4.3752361e-03,\n -8.9226037e-02, 2.5108769e-03, -4.8667483e-02, 7.4835382e-03,\n 3.6160579e-01, 2.6877613e-03, 4.7474738e-02, -5.0628246e-03,\n -2.5761038e-01, 5.9789192e-04, -2.1119279e-01, -6.6801407e-03,\n 2.5196713e-01, 1.6556121e-03, 1.0365561e-01, 1.0219718e-02,\n 5.8209229e-01, 7.7563477e-01, 4.8815918e-01, 4.2498779e-01],\n dtype=np.float32)\n std = np.array([0.04128463, 0.19463477, 0.15422264, 0.16463493, 0.16640785,\n 0.08266512, 0.10606721, 0.07636797, 0.7229637, 0.52585346,\n 0.42947173, 0.20228386, 0.44787514, 0.33257666, 0.6440182,\n 0.38659114, 0.6644085, 0.5352245, 0.45194066, 0.20750992,\n 0.4599643, 0.3846344, 0.651452, 0.39733195, 0.49320385,\n 0.41713253, 0.49984455, 0.4943505], dtype=np.float32)\n elif env_name == 'HumanoidBulletEnv-v0':\n avg = np.array([-1.25880212e-01, -8.51390958e-01, 7.07488894e-01, -5.72232604e-01,\n -8.76260102e-01, -4.07587215e-02, 7.27005303e-04, 1.23370838e+00,\n -3.68912554e+00, -4.75829793e-03, -7.42472351e-01, -8.94218776e-03,\n 1.29535913e+00, 3.16205365e-03, 9.13809776e-01, -6.42679911e-03,\n 8.90435696e-01, -7.92571157e-03, 6.54826105e-01, 1.82383414e-02,\n 1.20868635e+00, 2.90832808e-03, -9.96598601e-03, -1.87555347e-02,\n 1.66691601e+00, 7.45300390e-03, -5.63859344e-01, 5.48619963e-03,\n 1.33900166e+00, 1.05895223e-02, -8.30249667e-01, 1.57017610e-03,\n 1.92912612e-02, 1.55787319e-02, -1.19833803e+00, -8.22103582e-03,\n -6.57119334e-01, -2.40323972e-02, -1.05282271e+00, -1.41856335e-02,\n 8.53593826e-01, -1.73063378e-03, 5.46878874e-01, 5.43514848e-01],\n dtype=np.float32)\n std = np.array([0.08138401, 0.41358876, 0.33958328, 0.17817754, 0.17003846,\n 0.15247536, 0.690917, 0.481272, 0.40543965, 0.6078898,\n 0.46960834, 0.4825346, 0.38099176, 0.5156369, 0.6534775,\n 0.45825616, 0.38340876, 0.89671516, 0.14449312, 0.47643778,\n 0.21150663, 0.56597894, 0.56706554, 0.49014297, 0.30507362,\n 0.6868296, 0.25598812, 0.52973163, 0.14948095, 0.49912784,\n 0.42137524, 0.42925757, 0.39722264, 0.54846555, 0.5816031,\n 1.139402, 0.29807225, 0.27311933, 0.34721208, 0.38530213,\n 0.4897849, 1.0748593, 0.30166605, 0.30824476], dtype=np.float32)\n elif env_name == 'MinitaurBulletEnv-v0': # need check\n avg = np.array([0.90172989, 1.54730119, 1.24560906, 1.97365306, 1.9413892,\n 1.03866835, 1.69646277, 1.18655352, -0.45842347, 0.17845232,\n 0.38784456, 0.58572877, 0.91414561, -0.45410697, 0.7591031,\n -0.07008998, 3.43842258, 0.61032482, 0.86689961, -0.33910894,\n 0.47030415, 4.5623528, -2.39108079, 3.03559422, -0.36328256,\n -0.20753499, -0.47758384, 0.86756409])\n std = np.array([0.34192648, 0.51169916, 0.39370621, 0.55568461, 0.46910769,\n 0.28387504, 0.51807949, 0.37723445, 13.16686185, 17.51240024,\n 14.80264211, 16.60461412, 15.72930229, 11.38926597, 15.40598346,\n 13.03124941, 2.47718145, 2.55088804, 2.35964651, 2.51025567,\n 2.66379017, 2.37224904, 2.55892521, 2.41716885, 0.07529733,\n 0.05903034, 0.1314812, 0.0221248])\n return avg, std\n\n\ndef demo_get_video_to_watch_gym_render():\n import cv2 # pip3 install opencv-python\n # import gym # pip3 install gym==0.17 pyglet==1.5.0 # env.render() bug in gym==0.18, pyglet==1.6\n import torch\n\n \"\"\"init env\"\"\"\n env = build_env(env='CarRacingFix')\n\n '''init agent'''\n # agent = None # means use random action\n from elegantrl.agent import AgentPPO\n agent = AgentPPO() # means use the policy network which saved in cwd\n agent_cwd = '/mnt/sdb1/Yonv/code/ElegantRL/AgentPPO_CarRacingFix_3'\n net_dim = 2 ** 8\n state_dim = env.state_dim\n action_dim = env.action_dim\n os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n agent.init(net_dim, state_dim, action_dim)\n agent.save_or_load_agent(cwd=agent_cwd, if_save=False)\n device = agent.device\n\n '''initialize evaluete and env.render()'''\n save_frame_dir = '' # means don't save video, just open the env.render()\n # save_frame_dir = 'frames' # means save video in this directory\n if save_frame_dir:\n os.makedirs(save_frame_dir, exist_ok=True)\n\n state = env.reset()\n episode_return = 0\n step = 0\n for i in range(2 ** 10):\n print(i) if i % 128 == 0 else None\n for j in range(1):\n if agent is None:\n action = env.action_space.sample()\n else:\n s_tensor = torch.as_tensor((state,), dtype=torch.float32, device=device)\n a_tensor = agent.act(s_tensor)\n action = a_tensor.detach().cpu().numpy()[0] # if use 'with torch.no_grad()', then '.detach()' not need.\n next_state, reward, done, _ = env.step(action)\n\n episode_return += reward\n step += 1\n\n if done:\n print(f'\\t'\n f'TotalStep {i:>6}, epiStep {step:6.0f}, '\n f'Reward_T {reward:8.3f}, epiReward {episode_return:8.3f}')\n state = env.reset()\n episode_return = 0\n step = 0\n else:\n state = next_state\n\n if save_frame_dir:\n frame = env.render('rgb_array')\n cv2.imwrite(f'{save_frame_dir}/{i:06}.png', frame)\n cv2.imshow('OpenCV Window', frame)\n cv2.waitKey(1)\n else:\n env.render()\n env.close()\n\n '''convert frames png/jpg to video mp4/avi using ffmpeg'''\n if save_frame_dir:\n frame_shape = cv2.imread(f'{save_frame_dir}/{3:06}.png').shape\n print(f\"frame_shape: {frame_shape}\")\n\n save_video = 'gym_render.mp4'\n os.system(f\"| Convert frames to video using ffmpeg. Save in {save_video}\")\n os.system(f'ffmpeg -r 60 -f image2 -s {frame_shape[0]}x{frame_shape[1]} '\n f'-i ./{save_frame_dir}/%06d.png '\n f'-crf 25 -vb 20M -pix_fmt yuv420p {save_video}')\n" ]
[ [ "numpy.array", "torch.as_tensor" ] ]
OAB-exams/oab-exams
[ "b47d6f318c98fc213c1dc5d0ee7de8d2b98cfbd3" ]
[ "src/retrieval.py" ]
[ "import copy\nimport csv\nimport os\nimport json\nfrom functools import reduce\nimport collections\nfrom lxml import etree\nimport nltk\nimport numpy\nimport networkx\nnltk.download('punkt')\n\n\n\"\"\"\n## examples\n\n# parse OAB exam, return generator of OABQuestion instances\noab = parse_xml('/home/bruno/git/oab-exams/OAB/raw/2010-01.xml')\nquestions = questions_in_tree(oab)\nfirst_q = next(questions)\n\n# parse law XML, return tuple (article-ID, list-of-raw-article-text)\nlei = law_articles_in_file('/home/bruno/git/oab-exams/lexml/lei-8906.xml')\nleis = all_law_articles_in_path('/home/bruno/git/oab-exams/lexml/')\n\n# create an instance of collection of articles, which processes the \n# text in each article, creates a node for each, creates a graph of \n# them, and caches their TF-IDF vectors\nartcol = ArticleCollection(leis, rm_stopwords=True)\nlaws = read_laws_into_artcollection('/home/bruno/git/oab-exams/lexml/', False, True) # see code for arguments\n\n# add first question to graph constructed from the articles in artcol\n# return the shortest path and distance from the question statement\n# to each item\npaths_dict = question_paths_in_graph(artcol, first_q)\n\n# shallow question answering justified questions in justify.txt, using\n# laws in lexml/ and getting the questions at OAB/raw/\nresult = sqa_justified_questions('doc/justify.txt', 'lexml/', 'OAB/raw/', rm_stopwords=True, separate=False)\n\n# shallow question answering non-justified questions in an exam\npaths = sqa_questions_in_exam('/home/bruno/git/oab-exams/OAB/raw/2016-20a.xml', artcol, max_questions=10)\n\n# calculate paths and write them to json\nquestions_in_exams_to_json('exams_path', artcol, max_questions=10)\n\n\n\"\"\"\n\n\n#\n## reading XML\ndef parse_xml(path, parser=etree.XMLParser(remove_blank_text=True)):\n return etree.parse(path)\n\ndef elements_in_tree(tree, element_tag):\n assert isinstance(tree, etree._ElementTree)\n for element in tree.getiterator(tag=element_tag):\n yield element\n\n\n#\n## reading OAB exams\ndef get_exam_id(tree):\n exam_id = tree.getroot()\n return exam_id.get('year')+'-'+exam_id.get('edition')\n\ndef get_statement_text(question):\n return question.find('statement').text\n\ndef get_items(question):\n return question.find('items').getchildren()\n\ndef get_correct_item(question):\n for i in get_items(question):\n if i.get('correct') == \"true\":\n return i.get('letter')\n\ndef make_items_dict(items):\n return dict((i.get('letter'), getattr(i, 'text')) for i in items)\n\nclass OABQuestion():\n def __init__(self, number, exam, valid, statement, items, justification=None):\n self.number = number\n self.exam = exam\n self.valid = valid\n self.statement = statement\n self.items = items\n self.justification = justification\n\n def str_repr(self):\n if self.valid and self.justification:\n return \"OAB:{}|Q{}|ans:{}|just:{}\".format(self.exam, self.number, self.valid, self.justification)\n elif self.valid:\n return \"OAB:{}|Q{}|ans:{}|just:{}\".format(self.exam, self.number, self.valid, \".\")\n else:\n return \"OAB:{}|Q{}|ans:{}\".format(self.exam, self.number, \"NULL\")\n\n def __repr__(self):\n return self.str_repr()\n\ndef questions_in_tree(tree):\n for question in elements_in_tree(tree, 'question'):\n yield OABQuestion(question.get('number'),\n get_exam_id(tree),\n get_correct_item(question),\n get_statement_text(question),\n make_items_dict(get_items(question)))\n\n\n#\n## reading law XML\n\n# lexML namespaces\n\ndef namespace_it(namespace, key, element):\n # namespaced element in {namespace}element syntax\n return \"{{{}}}{}\".format(namespace[key], element)\n\ndef lazy_articles_in_tree(tree):\n for artigo in elements_in_tree(tree, namespace_it(tree.getroot().nsmap, None, 'Artigo')):\n yield artigo.get('id'), ''.join(artigo.itertext())\n\ndef articles_in_tree(tree):\n return list(lazy_articles_in_tree(tree))\n\ndef get_urn(law_xml):\n assert isinstance(law_xml, etree._ElementTree)\n # fixme http://lxml.de/xpathxslt.html#namespaces-and-prefixes\n id_element = law_xml.find(namespace_it(law_xml.getroot().nsmap, None, 'Metadado') + '/' + namespace_it(law_xml.getroot().nsmap, None, 'Identificacao'))\n return id_element.get('URN')\n\ndef law_articles_in_file(law_path):\n law_xml = parse_xml(law_path)\n law_urn = get_urn(law_xml)\n return (law_urn, articles_in_tree(law_xml))\n\ndef all_law_articles_in_path(laws_path):\n # reads all .xml files in laws_path to a list of law_articles\n assert os.path.isdir(laws_path)\n laws = []\n for file in os.scandir(laws_path):\n if file.name.endswith(\".xml\"):\n law = law_articles_in_file(file.path)\n laws.append(law)\n return laws\n\n#\n## text processing\n \ndef is_number(token):\n try:\n float(token.replace(',', '.').replace('º', ''))\n except ValueError:\n return False\n return True\n\ndef is_punctuation(token):\n if token in '!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`´{|}~§–':\n return True\n\ndef is_stopword(token, language='portuguese'):\n if token in nltk.corpus.stopwords.words(language):\n return True\n\ndef is_useful(token, rm_stopwords):\n token = token.strip()\n if is_number(token) or is_punctuation(token) or (rm_stopwords and is_stopword(token)):\n return False\n else:\n return True\n\ndef preprocess_text(text, rm_stopwords):\n assert isinstance(rm_stopwords, bool)\n return [token.lower().strip() for token in nltk.tokenize.word_tokenize(text) if is_useful(token, rm_stopwords)]\n\n\n#\n## tf-idf and base graph making\n\ndef cosine_similarity(vec1, vec2):\n denominator = numpy.linalg.norm(vec1) * numpy.linalg.norm(vec2)\n if denominator == 0:\n return 0\n else:\n return numpy.dot(vec1, vec2) / denominator\n\nclass ArticleCollection(nltk.TextCollection):\n # source is [(law-urn [(art-id, raw-art-text)+])+]\n def __init__(self, source, rm_stopwords=False, text_preprocessing_fn=preprocess_text, similarity_fn=cosine_similarity):\n assert isinstance(source, list)\n self.rm_stopwords = rm_stopwords\n self._text_preprocessing_fn = text_preprocessing_fn\n self._similarity_fn = similarity_fn\n # map article id to its index\n self.ids, self.raw_texts = self.make_ids_and_raw_texts(source)\n self.laws = [law[0] for law in source]\n # remove law id\n # so that we have useful methods such as .idf(token)\n nltk.TextCollection.__init__(self, list(map(lambda x: text_preprocessing_fn(x, self.rm_stopwords), self.raw_texts)))\n # index tokens to create TF-IDF vector\n self.token_index_dict = {key:ix for ix, key in enumerate(self.vocab().keys())}\n self.vocab_size = len(self.vocab().keys())\n self.tfidf_vectors = [self.tfidf_vectorize(text) for text in self._texts]\n self.size = len(self._texts)\n # graph w/ only the articles as nodes, no edges\n self.base_graph = self.make_base_graph()\n\n def __repr__(self):\n return \"ArticleCollection: {}\".format(self.laws)\n\n def make_ids_and_raw_texts(self, source):\n ids = {}\n raw_texts = []\n ix = 0\n for law in source:\n law_id = law[0]\n for article in law[1]:\n art_id = article[0]\n art_id = law_id + art_id\n ids[art_id] = ix\n raw_texts.append(article[1])\n ix += 1\n return ids, raw_texts\n\n def tf_tokens(self, tokens):\n count = collections.Counter(tokens)\n length = len(tokens)\n return list(map(lambda x: count[x]/length, tokens))\n\n def tfidf_vectorize(self, text):\n # text must be preprocessed first!\n tfidf_vector = numpy.zeros(self.vocab_size)\n tf_vector = self.tf_tokens(text)\n for ix, token in enumerate(text):\n idf = self.idf(token)\n if idf == 0:\n continue\n tfidf_vector[self.token_index_dict[token]] = tf_vector[ix] * idf\n return tfidf_vector\n\n def inverse_similarity(self, vec1, vec2):\n similarity = self._similarity_fn(vec1, vec2)\n if similarity == 0:\n return numpy.Infinity\n else:\n return 1 / similarity\n\n def make_base_graph(self):\n graph = networkx.DiGraph()\n graph.add_nodes_from(self.ids.keys())\n return graph\n\n\n#\n## add questions\n\ndef add_temporary_node(graph, artcol, text, label, to_nodes=True):\n \"\"\"\n article_collection is where graph and tfidf-calculation happen,\n text is raw question statement (which is preprocessed here) and\n label is question number in str.\n to_nodes is the direction of the edges to be built. should be \n from new node to the nodes already present, or from them to the\n node being added?\n \"\"\"\n graph.add_node(label)\n label_tfidf = artcol.tfidf_vectorize(artcol._text_preprocessing_fn(text, artcol.rm_stopwords))\n # to add edges only to the articles, and not every node\n for node_id in artcol.ids.keys():\n node_ix = artcol.ids[node_id]\n if to_nodes:\n graph.add_edge(label, node_id, weight=artcol.inverse_similarity(label_tfidf, artcol.tfidf_vectors[node_ix]))\n else:\n graph.add_edge(node_id, label, weight=artcol.inverse_similarity(label_tfidf, artcol.tfidf_vectors[node_ix]))\n return graph\n\ndef question_paths_in_graph(article_collection, oab_question):\n \"\"\"\n return distance and shortest path from statement to each item in\n oab_question.\n note that '1' (str) means question one.\n \"\"\"\n assert isinstance(article_collection, ArticleCollection)\n assert isinstance(oab_question, OABQuestion)\n # so that base_graph is not changed improperly:\n graph = copy.deepcopy(article_collection.base_graph)\n # add question statement:\n graph = add_temporary_node(graph, article_collection, oab_question.statement, oab_question.number, to_nodes=True)\n paths = {}\n for question_letter, item_text in oab_question.items.items():\n graph = add_temporary_node(graph, article_collection, item_text, question_letter, to_nodes=False)\n paths[question_letter] = networkx.algorithms.shortest_paths.bidirectional_dijkstra(graph, oab_question.number, question_letter, weight='weight')\n return paths\n\n#\n## add justified questions\n\ndef read_laws_into_separate_artcol(laws_path, rm_stopwords):\n laws = {}\n for file in os.scandir(laws_path):\n if file.name.endswith(\".xml\"):\n urn, artigos = law_articles_in_file(file.path)\n artcol = ArticleCollection([(urn, artigos)], rm_stopwords)\n laws[urn] = artcol\n return laws\n\ndef read_laws_into_artcollection(laws_path, separate, rm_stopwords=False):\n # reads all .xml files in laws_path to a dictionary of urn:artcol\n assert os.path.isdir(laws_path)\n if separate:\n laws = read_laws_into_separate_artcol(laws_path, rm_stopwords)\n else:\n laws_list = all_law_articles_in_path(laws_path)\n laws = ArticleCollection(laws_list, rm_stopwords)\n return laws\n\ndef get_law_artcol(laws, urn, separate):\n if separate:\n return laws[urn]\n else:\n return laws\n\ndef find_question(oab_exam, question_nr):\n assert isinstance(oab_exam, etree._ElementTree)\n for question in questions_in_tree(oab_exam):\n if question.number == question_nr:\n return question\n\ndef sqa_justified_questions(justification_path, laws_path, exams_path, rm_stopwords=False, separate=True):\n # sqa = shallow question answering\n # justification file must be in the format described in docs.\n assert os.path.isfile(justification_path)\n assert os.path.isdir(exams_path)\n laws = read_laws_into_artcollection(laws_path, separate, rm_stopwords)\n question_paths = {}\n with open(justification_path, 'r') as tsv:\n tsv = csv.reader(tsv, delimiter='\\t')\n for row in tsv:\n # row[0]: OAB exam filename\n exam_path = os.path.join(exams_path, row[0] + '.xml')\n oab_exam = parse_xml(exam_path)\n # row[1]: question number\n question = find_question(oab_exam, row[1])\n # row[3]: justification law URN\n artcol = get_law_artcol(laws, row[3], separate)\n # row[2]: justification article\n question.justification = (row[3], row[2])\n paths = question_paths_in_graph(artcol, question)\n question_paths[question] = paths\n return question_paths\n\ndef get_minimum_paths(question_paths):\n minimum_paths = {}\n for question, item_paths in question_paths.items():\n paths = []\n for item, item_path in item_paths.items():\n paths.append(item_path)\n minimum_path = reduce(lambda x,y: y if x[0]>y[0] else x if x[0] < y[0] else x + (\"can't decide between {} and {}\".format(x[1],y[1]),), paths)\n minimum_paths[question] = minimum_path\n return minimum_paths\n\ndef get_correct_item_paths(question_paths):\n correct_paths = {}\n for question, item_paths in question_paths.items():\n if not question.valid:\n continue\n correct_letter = question.valid\n correct_item_path = item_paths[correct_letter]\n correct_paths[question] = correct_item_path\n return correct_paths\n\ndef check_justification_correct_items(question_paths):\n # return True if justification for the correct article match with\n # the correct justification\n correct_items = {}\n for question, item_paths in question_paths.items():\n correct_letter = question.valid\n correct_item_path = item_paths[correct_letter]\n selected_article = correct_item_path[1][1]\n justification_urn = question.justification[0]\n justification_articles = question.justification[1].split(',')\n justification = list(map(lambda x: justification_urn + x, justification_articles))\n correct_items[question] = (selected_article in justification)\n return correct_items\n\n\n#\n## assign article to question\n\ndef sqa_questions_in_exam(exam_path, artcol, max_questions=-1):\n assert os.path.isfile(exam_path)\n exam = parse_xml(exam_path)\n question_paths = {}\n for ix, question in enumerate(questions_in_tree(exam)):\n if ix == max_questions:\n break\n paths = question_paths_in_graph(artcol, question)\n question_paths[question] = paths\n return question_paths\n\ndef make_paths_printable(question_paths):\n printable_paths = {}\n for question, item_paths in question_paths.items():\n question_str = question.str_repr()\n printable_paths[question_str] = item_paths\n return printable_paths\n\ndef to_json(dictionary, path):\n with open(path, 'w') as f:\n json.dump(dictionary, f, indent=4)\n\ndef questions_in_exams_to_json(exams_path, artcol, max_questions=-1):\n # make this work with all functions later\n assert os.path.isdir(exams_path)\n paths = {}\n for file in os.scandir(exams_path):\n if file.name.endswith(\".xml\"):\n exam_question_paths = sqa_questions_in_exam(file.path, artcol, max_questions=max_questions)\n paths[file.name] = make_paths_printable(exam_question_paths)\n result_path = os.path.join(os.path.dirname(file.path), 'results.json')\n to_json(paths, result_path)\n" ]
[ [ "numpy.dot", "numpy.zeros", "numpy.linalg.norm" ] ]
INCF/BrainImagingPipelines
[ "39915b5a313d049a0bb3ccd3a82a9a83e1241b11" ]
[ "bips/utils/reportsink/write_report.py" ]
[ "# general class for writing reportlab stuff. \n\nfrom reportlab.platypus import SimpleDocTemplate, Paragraph,\\\n Table, TableStyle, Spacer,\\\n PageBreak, PageTemplate\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.units import inch\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.lib import colors\nimport time\nfrom reportlab.lib.enums import TA_JUSTIFY, TA_RIGHT\nfrom reportlab.platypus import Image as Image2\nfrom reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\nfrom PIL import Image\n\n\ndef get_and_scale(imagefile,scale=1):\n from reportlab.platypus import Image as Image2\n im1 = scale_im(Image.open(imagefile))\n im = Image2(imagefile, im1.size[0]*scale, im1.size[1]*scale) \n return im \n \ndef scale_im(im):\n from numpy import array \n # scales an image so that it will fit on the page with various margins...\n width, height = letter\n newsize = array(im.size)/(max(array(im.size)/array([width-(1*inch), height-(2*inch)])))\n newsize = tuple(map(lambda x: int(x), tuple(newsize)))\n return im.resize(newsize) \n\nclass report():\n def __init__(self,fname,title):\n self.report = fname\n self.doc = SimpleDocTemplate(self.report, pagesize=letter,\n rightMargin=36,leftMargin=36,\n topMargin=72,bottomMargin=72)\n self.elements = []\n self.styles=getSampleStyleSheet()\n self.styles.add(ParagraphStyle(name='RIGHT', alignment=TA_RIGHT))\n \n formatted_time = time.ctime()\n \n ptext = '<font size=10>%s</font>' % formatted_time \n self.elements.append(Paragraph(ptext, self.styles[\"Normal\"]))\n self.elements.append(Spacer(1, 12)) \n \n ptext = '<font size=22>%s</font>' %(title) \n self.elements.append(Paragraph(ptext, self.styles[\"Normal\"]))\n self.elements.append(Spacer(1, 24))\n \n def add_text(self,text,fontsize=12):\n ptext = '<font size=%s>%s</font>' % (str(fontsize),text) \n self.elements.append(Paragraph(ptext, self.styles[\"Normal\"]))\n self.elements.append(Spacer(1, 12)) \n \n def add_image(self,fname,scale=1):\n im = get_and_scale(fname,scale=scale)\n self.elements.append(im) \n self.elements.append(Spacer(1, 12)) \n \n def add_table(self,data,para=False):\n\n def splitter(txt):\n if len(txt)>95:\n N = len(txt)/95\n parts = []\n\n for n in xrange(N):\n parts.append(txt[95*n:95*n+95])\n parts.append(txt[95*N:])\n txt = ''\n for p in parts:\n txt += p+'\\n'\n return txt\n else:\n return txt\n\n data_para = []\n for dat in data:\n temp = []\n for da in dat:\n if isinstance(da,str):\n if da.endswith('.png'):\n temp.append(get_and_scale(da,0.35))\n else:\n if para:\n temp.append(Paragraph(da,self.styles[\"Normal\"]))\n else:\n if len(da) > 95:\n da = splitter(da)\n temp.append(da)\n else:\n if para:\n temp.append(Paragraph(str(da),self.styles[\"Normal\"]))\n else:\n temp.append(da)\n\n data_para.append(temp)\n\n t=Table(data_para)\n \n t.setStyle(TableStyle([('ALIGN',(0,0), (-1,-1),'LEFT'),\n ('VALIGN',(0,0), (-1,-1), 'TOP'),\n ('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),\n ('BOX', (0,0), (-1,-1), 0.25, colors.black)]))\n t.hAlign='LEFT'\n self.elements.append(t)\n self.elements.append(Spacer(1, 12))\n \n def add_pagebreak(self):\n self.elements.append(PageBreak())\n \n def write(self):\n self.doc.build(self.elements)\n return self.report \n \n" ]
[ [ "numpy.array" ] ]
lei56/snli-ethics
[ "d5b18717ff1a6d135fed52086d2406cf26adaf2f" ]
[ "snli_query.py" ]
[ "#!/usr/bin/env python\n\n\nfrom math import log\nfrom heapq import nlargest\nfrom itertools import product\nfrom functools import partial\nfrom contextlib import contextmanager\nfrom csv import DictWriter\nimport logging\nimport pickle\nimport sys\n\nimport yaml\nimport numpy as np\nfrom scipy.stats import chi2\n\nfrom snli_cooccur import CooccurrenceCounts # noqa\nfrom snli_cooccur import resource_usage_str, mkdirp_parent\n\n\ndef parse_ngram(s):\n '''\n Given a string/unicode representing an ngram as a sequence of\n tokens separated by spaces, return the corresponding tuple\n representation used in the counting/scoring code.\n\n >>> parse_ngram('hello world')\n ('hello', 'world')\n >>> parse_ngram('\\thello world ')\n ('hello', 'world')\n '''\n return tuple(s.strip().split())\n\n\ndef format_ngram(ngram):\n '''\n Given a tuple of string/unicode representing an ngram,\n return a single string/unicode with the tokens of the ngram\n separated by spaces.\n\n >>> format_ngram(('hello', 'world'))\n 'hello world'\n '''\n return ' '.join(ngram)\n\n\ndef g_test_obs_table(counts, x, y):\n '''\n Return 2 x 2 contingency table (array) of observed cooccurrence\n (x, y) counts for G-test. Read observed counts from counts,\n an instance of CooccurrenceCounts.\n\n >>> c = CooccurrenceCounts()\n >>> c.increment('the', 'dog')\n >>> c.increment('good', 'dog')\n >>> c.increment('bad', 'dog')\n >>> c.increment('dog', 'ran')\n >>> c.increment('cat', 'ran')\n >>> c.increment('fish', 'ran')\n >>> g_test_obs_table(c, 'the', 'dog').tolist()\n [[1, 0], [2, 3]]\n '''\n xy_count = counts.xy[(x, y)]\n x_count = counts.x[x]\n y_count = counts.y[y]\n return np.array([\n [xy_count, x_count - xy_count],\n [y_count - xy_count, counts.xy_total - (x_count + y_count - xy_count)]\n ])\n\n\ndef g_test_exp_table(obs_table):\n '''\n Return 2 x 2 contingency table (array) of expected cooccurrence\n (x, y) counts for G-test. Read observed counts from obs_table,\n the corresponding 2 x 2 contingency table of observed counts.\n\n >>> g_test_exp_table(np.array([[3, 2],\n ... [1, 10]])).tolist()\n [[1.25, 3.75], [2.75, 8.25]]\n '''\n return np.outer(\n np.sum(obs_table, axis=1), # row sums\n np.sum(obs_table, axis=0) # col sums\n ) / np.sum(obs_table, dtype=np.float)\n\n\ndef g_test_stat(counts, x, y, min_count=1):\n '''\n Return G-test statistic for (x, y) cooccurrence using counts from\n counts (an instance of CooccurrenceCounts).\n\n Return -inf if (x, y) has a count less than min_count.\n '''\n if counts.xy[(x, y)] >= min_count:\n obs_table = g_test_obs_table(counts, x, y)\n exp_table = g_test_exp_table(obs_table)\n return 2 * np.sum(obs_table * (np.log(obs_table) - np.log(exp_table)))\n else:\n return float('-inf')\n\n\ndef g_test_p_value(g):\n '''\n Return the p-value for a given 2 x 2 G-test statistic value.\n\n See http://www.itl.nist.gov/div898/handbook/eda/section3/eda3674.htm\n >>> np.allclose(g_test_p_value(2.706), 0.1, rtol=0.01)\n True\n >>> np.allclose(g_test_p_value(3.841), 0.05, rtol=0.01)\n True\n >>> np.allclose(g_test_p_value(6.635), 0.01, rtol=0.01)\n True\n >>> np.allclose(g_test_p_value(10.828), 0.001, rtol=0.01)\n True\n '''\n return chi2.sf(g, 1)\n\n\ndef pmi(counts, x, y, min_count=1):\n '''\n Return PMI for (x, y) cooccurrence using counts from counts (an\n instance of CooccurrenceCounts).\n\n Return -inf if (x, y) has a count less than min_count.\n '''\n if counts.xy[(x, y)] >= min_count:\n return (\n (log(counts.xy[(x, y)]) - log(counts.xy_total)) - (\n (log(counts.x[x]) - log(counts.xy_total)) +\n (log(counts.y[y]) - log(counts.xy_total))\n )\n )\n else:\n return float('-inf')\n\n\ndef filter_y(counts, x, min_count=1, filter_to_unigrams=False):\n '''\n Return list of y representing (x, y) cooccurrences,\n computed using counts (an instance of CooccurrenceCounts),\n filtered as follows.\n\n Cooccurrences (x, y) whose count is less than min_count are not\n included in the list.\n\n If filter_to_unigrams is True, filter results to unigrams only.\n '''\n return [\n y\n for y in counts.y.keys()\n if counts.xy[(x, y)] >= min_count and not (\n filter_to_unigrams and len(y) > 1\n )\n ]\n\n\ndef top_y(score_func, counts, x, k=10, min_count=1, filter_to_unigrams=False):\n '''\n Return list of top (y, score) pairs where y is hashable\n and score is a float, representing the\n top k (x, y) cooccurrences sorted by score (in descending order)\n computed using counts (an instance of CooccurrenceCounts).\n\n The score is computed by score_func and can be e.g. pmi\n or g_test_stat.\n\n Cooccurrences (x, y) whose count is less than min_count are not\n included in the list. (If there are not enough candidates the\n list will be shorter than k.)\n\n If filter_to_unigrams is True, filter results to unigrams only\n before truncating at k.\n '''\n return nlargest(\n k,\n [\n (y, score_func(counts, x, y))\n for y in filter_y(counts, x, min_count=min_count,\n filter_to_unigrams=filter_to_unigrams)\n ],\n key=lambda t: t[1],\n )\n\n\ndef top_y_batch(score_func, counts_map, x_list, *args, **kwargs):\n '''\n Given counts_map, a dictionary of identifiers (e.g., filenames)\n to CooccurrenceCounts instances, x_list, a list of hashables,\n and any args to top_y, return a list of triples representing\n the top (x, y) pairs by score in each counter, for each x in x_list.\n\n The score is computed by score_func and can be e.g. pmi\n or g_test_stat. args and kwargs are passed through to score_func.\n '''\n return [\n (counts_name, x, top_y(score_func, counts, x, *args, **kwargs))\n for ((counts_name, counts), x)\n in product(counts_map.items(), x_list)\n ]\n\n\ndef tex_format_signif(word, stars):\n r'''\n >>> tex_format_signif('foo', '')\n 'foo'\n >>> tex_format_signif('foo', '*')\n 'foo'\n >>> tex_format_signif('foo', '**')\n 'foo$^\\\\dagger$'\n >>> tex_format_signif('foo', '***')\n 'foo$^\\\\ddagger$'\n >>> tex_format_signif('foo', '****')\n 'foo$^\\\\ddagger$'\n '''\n if len(stars) < 2:\n return word\n elif len(stars) == 2:\n return r'%s$^\\dagger$' % word\n else:\n return r'%s$^\\ddagger$' % word\n\n\ndef write_top_y_tex_batch_yaml(score_func, output_file, counts, queries_path,\n *args, **kwargs):\n '''\n Load top-y queries from the YAML specification in the file at\n queries_path and execute them using counts (an instance of\n CooccurrenceCounts), passing score_func, args, and kwargs to top_y,\n writing results in tex friendly format to output_file.\n '''\n with open(queries_path) as f:\n queries = yaml.load(f)\n\n filter_y_kwargs = dict((k, v) for (k, v) in kwargs.items() if k != 'k')\n x_ngram_y_ngram_pairs = []\n for (query_name, query) in queries.items():\n for x in query['x']:\n x_ngram = parse_ngram(x)\n x_ngram_y_ngram_pairs.extend([\n (x_ngram, y_ngram) for y_ngram in\n filter_y(counts, x_ngram, *args, **filter_y_kwargs)\n ])\n p_values = bonferroni_holm_g_test_p_values(\n counts, x_ngram_y_ngram_pairs)\n\n for (query_name, query) in queries.items():\n output_file.write('\\n %% %s\\n' % query_name)\n for x in query['x']:\n output_file.write(r' \\textbf{%s} &' % x)\n\n x_ngram = parse_ngram(x)\n y_ngrams = [y_ngram for (y_ngram, score) in\n top_y(score_func, counts, x_ngram, *args, **kwargs)]\n for y_ngram in y_ngrams:\n p_value = p_values[(x_ngram, y_ngram)]\n stars = p_value_to_stars(p_value)\n output_file.write(\n ' %s' % tex_format_signif(format_ngram(y_ngram), stars))\n output_file.write(' \\\\\\\\\\n')\n\n\ndef write_top_y_csv_batch_yaml(score_func, output_file, counts,\n queries_path, *args, **kwargs):\n '''\n Load top-y queries from the YAML specification in the file at\n queries_path and execute them using counts (an instance of\n CooccurrenceCounts), passing score_func, args, and kwargs to top_y,\n writing query, x, y, score tuples as CSV to output_file.\n '''\n with open(queries_path) as f:\n queries = yaml.load(f)\n\n filter_y_kwargs = dict((k, v) for (k, v) in kwargs.items() if k != 'k')\n x_ngram_y_ngram_pairs = []\n for (query_name, query) in queries.items():\n for x in query['x']:\n x_ngram = parse_ngram(x)\n x_ngram_y_ngram_pairs.extend([\n (x_ngram, y_ngram) for y_ngram in\n filter_y(counts, x_ngram, *args, **filter_y_kwargs)\n ])\n\n writer = DictWriter(output_file, ('query', 'x', 'y', 'score'))\n writer.writeheader()\n for (query_name, query) in queries.items():\n for x in query['x']:\n x_ngram = parse_ngram(x)\n y_ngram_score_pairs = [\n (y_ngram, score)\n for (y_ngram, score)\n in top_y(score_func, counts, x_ngram, *args, **kwargs)\n if score > 0\n ]\n for (y_ngram, score) in y_ngram_score_pairs:\n writer.writerow(dict(\n query=query_name,\n x=x,\n y=format_ngram(y_ngram),\n score=score))\n\n\ndef bonferroni_holm_g_test_p_values(counts, x_ngram_y_ngram_pairs):\n '''\n Compute Bonferroni-Holm adjusted p-values for the G-test statistics\n for (x_ngram, y_ngram) pairs in x_ngram_y_ngram_pairs (an iterable).\n Return dict of adjusted p-values indexed by (x_ngram, y_ngram).\n '''\n xyp_triples = sorted(\n [\n (\n x_ngram,\n y_ngram,\n g_test_p_value(g_test_stat(counts, x_ngram, y_ngram))\n )\n for (x_ngram, y_ngram) in x_ngram_y_ngram_pairs\n ],\n key=lambda p: p[2])\n\n min_alpha = None\n group_p_values = dict()\n num_tests = len(xyp_triples)\n for (test_num, (x_ngram, y_ngram, p_value)) in enumerate(xyp_triples):\n # reject at level alpha if p <= alpha / (m + 1 - i) for all i up\n # to k where m is the number of tests and k is the 1-based index\n alpha = p_value * (num_tests - test_num)\n min_alpha = alpha if min_alpha is None else max(alpha, min_alpha)\n group_p_values[(x_ngram, y_ngram)] = min_alpha\n\n return group_p_values\n\n\ndef p_value_to_stars(p_value, alpha=(0.05, 0.01, 0.001)):\n '''\n Return string containing as many stars as the number of significance\n levels in alpha (a tuple of significance levels, order-independent)\n that p_value is less than or equal to.\n\n >>> p_value_to_stars(0.075)\n ''\n >>> p_value_to_stars(0.05)\n '*'\n >>> p_value_to_stars(0.025)\n '*'\n >>> p_value_to_stars(0.0099)\n '**'\n >>> p_value_to_stars(0.005)\n '**'\n >>> p_value_to_stars(0.0025)\n '**'\n >>> p_value_to_stars(0.00099)\n '***'\n '''\n return len([_alpha for _alpha in alpha if p_value <= _alpha]) * '*'\n\n\ndef write_top_y_batch_yaml(score_func, output_file, counts, queries_path,\n *args, **kwargs):\n '''\n Load top-y queries from the YAML specification in the file at\n queries_path and execute them using counts (an instance of\n CooccurrenceCounts), passing score_func, args, and kwargs to top_y,\n writing results to output_file.\n '''\n with open(queries_path) as f:\n queries = yaml.safe_load(f)\n\n filter_y_kwargs = dict((k, v) for (k, v) in kwargs.items() if k != 'k')\n x_ngram_y_ngram_pairs = []\n for (query_name, query) in queries.items():\n for x in query['x']:\n x_ngram = parse_ngram(x)\n x_ngram_y_ngram_pairs.extend([\n (x_ngram, y_ngram) for y_ngram in\n filter_y(counts, x_ngram, *args, **filter_y_kwargs)\n ])\n p_values = bonferroni_holm_g_test_p_values(\n counts, x_ngram_y_ngram_pairs)\n\n for (query_name, query) in queries.items():\n output_file.write(query_name)\n output_file.write('\\n')\n for x in query['x']:\n x_ngram = parse_ngram(x)\n output_file.write('\\t' + x)\n output_file.write('\\n')\n y_ngrams = [y_ngram for (y_ngram, score) in\n top_y(score_func, counts, x_ngram, *args, **kwargs)]\n for y_ngram in y_ngrams:\n _g = g_test_stat(counts, x_ngram, y_ngram)\n _pmi = pmi(counts, x_ngram, y_ngram)\n p_value = p_values[(x_ngram, y_ngram)]\n stars = p_value_to_stars(p_value)\n output_file.write('\\t\\t%20s\\t%9.2f\\t%9.2f%s\\t%7.2g\\t%d' % (\n format_ngram(y_ngram),\n _pmi,\n _g,\n stars,\n p_value,\n counts.xy[(x_ngram, y_ngram)]))\n output_file.write('\\n')\n\n\ndef write_score_batch_yaml(output_file, counts, queries_path,\n min_count=1):\n '''\n Load score queries from the YAML specification in the file at\n queries_path and execute them using counts (an instance of\n CooccurrenceCounts), writing results to output_file.\n\n The score is computed by score_func and can be e.g. pmi\n or g_test_stat.\n '''\n with open(queries_path) as f:\n queries = yaml.load(f)\n\n x_ngram_y_ngram_pairs = []\n for (query_name, query) in queries.items():\n for x in query['x']:\n x_ngram = parse_ngram(x)\n for y in query['y']:\n y_ngram = parse_ngram(y)\n x_ngram_y_ngram_pairs.append((x_ngram, y_ngram))\n p_values = bonferroni_holm_g_test_p_values(\n counts, x_ngram_y_ngram_pairs)\n\n for (query_name, query) in queries.items():\n output_file.write(query_name)\n output_file.write('\\n')\n for x in query['x']:\n x_ngram = parse_ngram(x)\n output_file.write('\\t' + x)\n output_file.write('\\n')\n y_ngrams = [parse_ngram(y) for y in query['y']]\n for y_ngram in y_ngrams:\n _g = g_test_stat(counts, x_ngram, y_ngram, min_count=min_count)\n _pmi = pmi(counts, x_ngram, y_ngram, min_count=min_count)\n p_value = p_values[(x_ngram, y_ngram)]\n stars = p_value_to_stars(p_value)\n output_file.write('\\t\\t%20s\\t%5.2f\\t%9.2f%s\\t%7.2g\\t%d' % (\n format_ngram(y_ngram),\n _pmi,\n _g,\n stars,\n p_value,\n counts.xy[(x_ngram, y_ngram)]))\n output_file.write('\\n')\n\n\ndef write_identity_concept_batch_yaml(output_file, counts,\n queries_path, min_count=1):\n '''\n Load identity/concept tests from the YAML specification in the\n file at queries_path and execute them using counts (an instance of\n CooccurrenceCounts), scoring by score_func, writing results to\n output_file.\n\n The score is computed by score_func and can be e.g. pmi\n or g_test_stat.\n '''\n with open(queries_path) as f:\n queries = yaml.load(f)\n\n x_ngram_y_ngram_pairs = []\n for query in queries['experiments']:\n identity_name = query['identity']\n concept_name = query['concept']\n identity = queries['identities'][identity_name]\n concept = queries['concepts'][concept_name]\n concept_ngrams = [parse_ngram(concept_term)\n for concept_term in concept]\n for (id_group_name, id_group) in identity['groups'].items():\n for key in identity['keys']:\n id_term = id_group[key]\n id_ngram = parse_ngram(id_term)\n for concept_ngram in concept_ngrams:\n x_ngram_y_ngram_pairs.append((id_ngram, concept_ngram))\n p_values = bonferroni_holm_g_test_p_values(\n counts, x_ngram_y_ngram_pairs)\n\n for query in queries['experiments']:\n identity_name = query['identity']\n concept_name = query['concept']\n output_file.write('%s + %s' % (identity_name, concept_name))\n output_file.write('\\n')\n identity = queries['identities'][identity_name]\n concept = queries['concepts'][concept_name]\n concept_ngrams = [parse_ngram(concept_term)\n for concept_term in concept]\n for (id_group_name, id_group) in identity['groups'].items():\n output_file.write('\\t%s' % id_group_name)\n output_file.write('\\n')\n for key in identity['keys']:\n id_term = id_group[key]\n id_ngram = parse_ngram(id_term)\n output_file.write('\\t\\t%s' % id_term)\n output_file.write('\\n')\n for concept_ngram in concept_ngrams:\n _g = g_test_stat(counts, id_ngram, concept_ngram,\n min_count=min_count)\n _pmi = pmi(counts, id_ngram, concept_ngram,\n min_count=min_count)\n p_value = p_values[(id_ngram, concept_ngram)]\n stars = p_value_to_stars(p_value)\n output_file.write('\\t\\t%20s\\t%5.2f\\t%9.2f%s\\t%7.2g\\t%d' % (\n format_ngram(concept_ngram),\n _pmi,\n _g,\n stars,\n p_value,\n counts.xy[(id_ngram, concept_ngram)]))\n output_file.write('\\n')\n\n\npmi_top_y = partial(top_y, pmi)\npmi_top_y_batch = partial(top_y_batch, pmi)\nwrite_pmi_top_y_tex_batch_yaml = partial(write_top_y_tex_batch_yaml, pmi)\nwrite_pmi_top_y_csv_batch_yaml = partial(write_top_y_csv_batch_yaml, pmi)\nwrite_pmi_top_y_batch_yaml = partial(write_top_y_batch_yaml, pmi)\nwrite_pmi_score_batch_yaml = partial(write_score_batch_yaml, pmi)\nwrite_pmi_identity_concept_batch_yaml = partial(\n write_identity_concept_batch_yaml, pmi)\n\ng_test_stat_top_y = partial(top_y, g_test_stat)\ng_test_stat_top_y_batch = partial(top_y_batch, g_test_stat)\nwrite_g_test_stat_top_y_tex_batch_yaml = partial(\n write_top_y_tex_batch_yaml, g_test_stat)\nwrite_g_test_stat_top_y_csv_batch_yaml = partial(\n write_top_y_csv_batch_yaml, g_test_stat)\nwrite_g_test_stat_top_y_batch_yaml = partial(\n write_top_y_batch_yaml, g_test_stat)\nwrite_g_test_stat_score_batch_yaml = partial(\n write_score_batch_yaml, g_test_stat)\nwrite_g_test_stat_identity_concept_batch_yaml = partial(\n write_identity_concept_batch_yaml, g_test_stat)\n\n\ndef main():\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n from snli_cooccur import configure_logging\n\n parser = ArgumentParser(\n formatter_class=ArgumentDefaultsHelpFormatter,\n description='run SNLI queries from YAML',\n )\n parser.add_argument('pickle_path', type=str,\n help='path to pickled counts')\n parser.add_argument('queries_type', type=str,\n choices=('score',\n 'top-y', 'top-y-tex', 'top-y-csv',\n 'identity-concept'),\n help='type of queries to run')\n parser.add_argument('queries_path', type=str,\n help='path to query YAML spec')\n parser.add_argument('output_path', type=str,\n help='path to output (- for standard output)')\n parser.add_argument('-k', type=int, default=10,\n help='number of items to print for top-y queries')\n parser.add_argument('--min-count', type=int, default=1,\n help='min count to filter to in top-y queries')\n parser.add_argument('--top-y-score-func',\n type=lambda s: {\n 'pmi': pmi,\n 'g-test-stat': g_test_stat\n }[s],\n default='pmi',\n help='name of score function to sort by '\n '(pmi, g-test-stat)')\n parser.add_argument('--filter-to-unigrams', action='store_true',\n help='only output unigrams (filter out other results)')\n\n args = parser.parse_args()\n configure_logging()\n\n if args.output_path == '-':\n @contextmanager\n def _open_output_file():\n yield sys.stdout\n else:\n def _open_output_file():\n mkdirp_parent(args.output_path)\n return open(args.output_path, 'w')\n\n with _open_output_file() as output_file:\n logging.info('loading counts (%s)' % resource_usage_str())\n\n with open(args.pickle_path, 'rb') as f:\n counts = pickle.load(f)\n\n logging.info('counts loaded (%s)' % resource_usage_str())\n\n if args.queries_type == 'top-y':\n logging.info('running top-y queries')\n write_top_y_batch_yaml(\n args.top_y_score_func,\n output_file,\n counts, args.queries_path,\n k=args.k, min_count=args.min_count,\n filter_to_unigrams=args.filter_to_unigrams)\n\n elif args.queries_type == 'top-y-tex':\n logging.info('running top-y queries (tex output)')\n write_top_y_tex_batch_yaml(\n args.top_y_score_func,\n output_file,\n counts, args.queries_path,\n k=args.k, min_count=args.min_count,\n filter_to_unigrams=args.filter_to_unigrams)\n\n elif args.queries_type == 'top-y-csv':\n logging.info('running top-y queries (csv output)')\n write_top_y_csv_batch_yaml(\n args.top_y_score_func,\n output_file,\n counts, args.queries_path,\n k=args.k, min_count=args.min_count,\n filter_to_unigrams=args.filter_to_unigrams)\n\n elif args.queries_type == 'score':\n logging.info('running score queries')\n write_score_batch_yaml(\n output_file,\n counts, args.queries_path,\n min_count=args.min_count)\n\n elif args.queries_type == 'identity-concept':\n logging.info('running identity-concept queries')\n write_identity_concept_batch_yaml(\n output_file,\n counts,\n args.queries_path,\n min_count=args.min_count)\n\n else:\n raise ValueError('unknown query type %s' % args.queries_type)\n\n logging.info('done')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.log", "numpy.array", "numpy.sum", "scipy.stats.chi2.sf" ] ]
interactivereport/cellxgene_VIP
[ "084ad2d1bc576801bf75b47d5ee59fcc121cd256" ]
[ "VIPInterface.py" ]
[ "import requests\nimport json\nimport traceback\nimport sqlite3\nimport server.app.decode_fbs as decode_fbs\nimport scanpy as sc\nimport anndata as ad\nimport pandas as pd\nimport numpy as np\nimport diffxpy.api as de\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport matplotlib.patches as mpatches\nfrom matplotlib import rcParams\nimport plotly.graph_objects as go\nimport plotly.io as plotIO\nimport base64\nimport math\nfrom io import BytesIO\nimport sys\nimport time\nimport os\nimport re\nimport glob\nimport subprocess\nstrExePath = os.path.dirname(os.path.abspath(__file__))\n\nimport pprint\nppr = pprint.PrettyPrinter(depth=6)\n\nimport server.compute.diffexp_generic as diffDefault\nimport pickle\nfrom pyarrow import feather\n\nsys.setrecursionlimit(10000)\nsc.settings.verbosity = 2\nrcParams.update({'figure.autolayout': True})\n\napi_version = \"/api/v0.2\"\n\nimport threading\njobLock = threading.Lock()\ndef getLock(lock):\n while not lock.acquire():\n time.sleep(1.0)\ndef freeLock(lock):\n lock.release()\n\ndef route(data,appConfig):\n #ppr.pprint(\"current working dir:%s\"%os.getcwd())\n data = initialization(data,appConfig)\n #ppr.pprint(data)\n try:\n getLock(jobLock)\n taskRes = distributeTask(data[\"method\"])(data)\n freeLock(jobLock)\n return taskRes\n except Exception as e:\n freeLock(jobLock)\n return 'ERROR @server: '+traceback.format_exc() # 'ERROR @server: {}, {}'.format(type(e),str(e))\n #return distributeTask(data[\"method\"])(data)\n\nimport server.app.app as app\n\ndef initialization(data,appConfig):\n # obtain the server host information\n data = json.loads(str(data,encoding='utf-8'))\n\n # update the environment information\n data.update(VIPenv)\n\n # updatting the hosting data information\n if appConfig.is_multi_dataset():\n data[\"url_dataroot\"]=appConfig.server_config.multi_dataset__dataroot['d']['base_url']\n data['h5ad']=os.path.join(appConfig.server_config.multi_dataset__dataroot['d']['dataroot'], data[\"dataset\"])\n else:\n data[\"url_dataroot\"]=None\n data[\"dataset\"]=None\n data['h5ad']=appConfig.server_config.single_dataset__datapath\n\n # setting the plotting options\n if 'figOpt' in data.keys():\n setFigureOpt(data['figOpt'])\n\n # get the var (gene) and obv index\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n data['obs_index'] = scD.get_schema()[\"annotations\"][\"obs\"][\"index\"]\n data['var_index'] = scD.get_schema()[\"annotations\"][\"var\"][\"index\"]\n return data\n\ndef setFigureOpt(opt):\n sc.set_figure_params(dpi_save=int(opt['dpi']),fontsize= float(opt['fontsize']),vector_friendly=(opt['vectorFriendly'] == 'Yes'),transparent=(opt['transparent'] == 'Yes'),color_map=opt['colorMap'])\n rcParams.update({'savefig.format':opt['img']})\n\ndef getObs(data):\n selC = list(data['cells'].values())\n cNames = [\"cell%d\" %i for i in selC]\n ## obtain the category annotation\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n selAnno = [data['obs_index']]+data['grp']\n dAnno = list(scD.get_obs_keys())\n anno = []\n sel = list(set(selAnno)&set(dAnno))\n if len(sel)>0:\n tmp = scD.data.obs.loc[selC,sel].astype('str')\n tmp.index = cNames\n anno += [tmp]\n sel = list(set(selAnno)-set(dAnno))\n if len(sel)>0:\n annotations = scD.dataset_config.user_annotations\n if annotations:\n labels = annotations.read_labels(scD)\n tmp = labels.loc[list(scD.data.obs.loc[selC,data['obs_index']]),sel]\n tmp.index = cNames\n anno += [tmp]\n obs = pd.concat(anno,axis=1)\n #ppr.pprint(obs)\n ## update the annotation Abbreviation\n combUpdate = cleanAbbr(data)\n if 'abb' in data.keys():\n for i in data['grp']:\n obs[i] = obs[i].map(data['abb'][i])\n return combUpdate, obs\n\ndef getObsNum(data):\n selC = list(data['cells'].values())\n cNames = [\"cell%d\" %i for i in selC]\n ## obtain the category annotation\n obs = pd.DataFrame()\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n selAnno = data['grpNum']\n dAnno = list(scD.get_obs_keys())\n sel = list(set(selAnno)&set(dAnno))\n if len(sel)>0:\n obs = scD.data.obs.loc[selC,sel]\n obs.index = cNames\n return obs\n\ndef getVar(data):\n ## obtain the gene annotation\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n gInfo = scD.data.var\n gInfo.index = list(gInfo[data['var_index']])\n gInfo = gInfo.drop([data['var_index']],axis=1)\n return gInfo\n\ndef collapseGeneSet(data,expr,gNames,cNames,fSparse):\n Y = expr\n if 'geneGrpColl' in data.keys() and not data['geneGrpColl']=='No' and 'geneGrp' in data.keys() and len(data['geneGrp'])>0:\n data['grpLoc'] = []\n data['grpID'] = []\n if fSparse:\n Y = pd.DataFrame.sparse.from_spmatrix(Y,columns=gNames,index=cNames)\n for aN in data['geneGrp'].keys():\n if data['geneGrpColl']=='mean':\n Y = pd.concat([Y,Y[data['geneGrp'][aN]].mean(axis=1).rename(aN)],axis=1,sort=False)\n if data['geneGrpColl']=='median':\n Y = pd.concat([Y,Y[data['geneGrp'][aN]].median(axis=1).rename(aN)],axis=1,sort=False)\n for gene in data['geneGrp'][aN]:\n if gene in data['genes']:\n data['genes'].remove(gene)\n data['genes'] += [aN]\n gNames = list(Y.columns)\n return Y,gNames\n\ndef createData(data):\n selC = list(data['cells'].values())\n cNames = [\"cell%d\" %i for i in selC]\n\n ## onbtain the expression matrix\n gNames = []\n expr = []\n fSparse = False\n X = []\n if 'genes' in data.keys():\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n if not type(scD.data.X) is np.ndarray:\n fSparse = True\n if len(data['genes'])>0:\n fullG = list(scD.data.var[data['var_index']])\n selG = sorted([fullG.index(i) for i in data['genes']]) #when data loaded backed, incremental is required\n X = scD.data.X[:,selG]\n gNames = [fullG[i] for i in selG] #data['genes']\n else:\n X = scD.data.X\n gNames = list(scD.data.var[data['var_index']])\n if 'figOpt' in data.keys() and data['figOpt']['scale'] == 'Yes':\n X = sc.pp.scale(X,zero_center=(data['figOpt']['scaleZero'] == 'Yes'),max_value=(float(data['figOpt']['scaleMax']) if data['figOpt']['clipValue']=='Yes' else None))\n X = X[selC]\n if fSparse:\n expr = X\n else:\n expr = pd.DataFrame(X,columns=gNames,index=cNames)\n\n expr,gNames = collapseGeneSet(data,expr,gNames,cNames,fSparse)\n #ppr.pprint(\"finished expression ...\")\n ## obtain the embedding\n embed = {}\n if 'layout' in data.keys():\n layout = data['layout']\n if isinstance(layout,str):\n layout = [layout]\n if len(layout)>0:\n for one in layout:\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n embed['X_%s'%one] = pd.DataFrame(scD.data.obsm['X_%s'%one][selC][:,[0,1]],columns=['%s1'%one,'%s2'%one],index=cNames)\n #ppr.pprint(\"finished layout ...\")\n ## obtain the category annotation\n combUpdate, obs = getObs(data)\n\n ## create a custom annotation category and remove cells which are not in the selected annotation\n if combUpdate and len(data['grp'])>1:\n newGrp = 'Custom_combine'\n combineGrp = list(data['combine'].keys());\n obs[newGrp] = obs[combineGrp[0]]\n for i in combineGrp:\n if not i==combineGrp[0]:\n obs[newGrp] += \":\"+obs[i]\n selC = ~obs[newGrp].str.contains(\"Other\").to_numpy()\n expr = expr[selC]\n for i in embed.keys():\n embed[i] = embed[i][selC]\n obs = obs[selC].astype('category')\n obs[newGrp].cat.set_categories(data['combineOrder'],inplace=True)\n data['grp'] = [newGrp]\n\n obs = obs.astype('category')\n ## empty selection\n if expr.shape[0]==0 or expr.shape[1]==0:\n return []\n #ppr.pprint(\"finished obv ...\")\n\n return sc.AnnData(expr,obs,var=pd.DataFrame([],index=gNames),obsm={layout:embed[layout].to_numpy() for layout in embed.keys()})\n\ndef cleanAbbr(data):\n updated = False\n if 'abb' in data.keys() and 'combine' in data.keys():\n if len(data['combine'])>0:\n updated = True\n for cate in data['abb'].keys():\n if cate in data['combine'].keys():\n for anName in data['abb'][cate].keys():\n if not anName in data['combine'][cate]:\n data['abb'][cate][anName] = \"Other\";\n else:\n if not data['abb'][cate][anName]==anName:\n data['combineOrder'] = [one.replace(anName,data['abb'][cate][anName]) for one in data['combineOrder']]\n else:\n data['abb'][cate] = {key:\"Other\" for key in data['abb'][cate].keys()}\n return updated\n\ndef errorTask(data):\n raise ValueError('Error task!')\n\ndef distributeTask(aTask):\n return {\n 'SGV':SGV,\n 'SGVcompare':SGVcompare,\n 'PGV':PGV,\n 'VIOdata':VIOdata,\n 'HEATplot':pHeatmap,\n 'HEATdata':HeatData,\n 'GD':GD,\n 'DEG':DEG,\n 'DOT':DOT,\n 'EMBED':EMBED,\n 'TRAK':TRACK,\n 'DUAL':DUAL,\n 'MARK': MARK,\n 'MINX':MINX,\n 'DENS':DENS,\n 'DENS2D':DENS2D,\n 'SANK':SANK,\n 'STACBAR':STACBAR,\n 'HELLO':HELLO,\n 'CLI':CLI,\n 'preDEGname':getPreDEGname,\n 'preDEGvolcano':getPreDEGvolcano,\n 'preDEGmulti':getPreDEGbubble,\n 'mergeMeta': mergeMeta,\n 'isMeta': isMeta,\n 'testVIPready':testVIPready,\n 'Description':getDesp,\n 'GSEAgs':getGSEA,\n\t'SPATIAL':SPATIAL,\n 'saveTest':saveTest,\n 'getBWinfo':getBWinfo,\n 'plotBW':plotBW\n }.get(aTask,errorTask)\n\ndef HELLO(data):\n return 'Hi, connected.'\n\ndef iostreamFig(fig):\n #getLock(iosLock)\n figD = BytesIO()\n #ppr.pprint('io located at %d'%int(str(figD).split(\" \")[3].replace(\">\",\"\"),0))\n fig.savefig(figD,bbox_inches=\"tight\")\n #ppr.pprint(sys.getsizeof(figD))\n #ppr.pprint('io located at %d'%int(str(figD).split(\" \")[3].replace(\">\",\"\"),0))\n imgD = base64.encodebytes(figD.getvalue()).decode(\"utf-8\")\n figD.close()\n #ppr.pprint(\"saved Fig\")\n #freeLock(iosLock)\n if 'matplotlib' in str(type(fig)):\n plt.close(fig)#'all'\n return imgD\n\ndef Msg(msg):\n fig = plt.figure(figsize=(5,2))\n plt.text(0,0.5,msg)\n ax = plt.gca()\n ax.axis('off')\n return iostreamFig(fig)\n\ndef SPATIAL(data):\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n #ppr.pprint(vars(scD.data.uns[\"spatial\"]))\n spatial=scD.data.uns[\"spatial\"]\n if (data['embedding'] == \"get_spatial_list\"):\n return json.dumps({'list':list(spatial)})\n library_id=list(spatial)[0]\n if (data['embedding'] in list(spatial)):\n library_id=data['embedding']\n\n height, width, depth = spatial[library_id][\"images\"][data['resolution']].shape\n\n embedding = 'X_'+data['embedding']\n spatialxy = scD.data.obsm[embedding]\n tissue_scalef = spatial[library_id]['scalefactors']['tissue_' + data['resolution'] + '_scalef']\n i = data['spots']['spoti_i']\n x = 0\n y = 1\n # from original embedding to (0,1) coordinate system (cellxgene embedding)\n scalex = (data['spots']['spot0_x'] - data['spots']['spoti_x']) / (spatialxy[0][x] - spatialxy[i][x])\n scaley = (data['spots']['spot0_y'] - data['spots']['spoti_y']) / (spatialxy[0][y] - spatialxy[i][y])\n\n # image is in (-1,0,1) coordinate system, so multiplied by 2\n translatex = (spatialxy[i][x]*scalex - data['spots']['spoti_x']) * 2\n translatey = (spatialxy[i][y]*scaley - data['spots']['spoti_y']) * 2\n scale = 1/tissue_scalef * scalex * 2\n # Addtional translate in Y due to flipping of the image if needed\n ppr.pprint(scalex)\n ppr.pprint(scaley)\n ppr.pprint(translatex)\n ppr.pprint(translatey)\n\n # from (-1,0,1) (image layer) to (0,1) coordinate system (cellxgene embedding). Overlapping (0,0) origins of both.\n translatex = -(1+translatex)\n if (translatey > -0.1):\n flip = True\n translatey = -(1+translatey) + height*scale\n else:\n flip = False\n translatey = -(1+translatey)\n\n returnD = [{'translatex':translatex,'translatey':translatey,'scale':scale}]\n\n dpi=100\n figsize = width / float(dpi), height / float(dpi)\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis('off')\n if (flip):\n ax.imshow(np.flipud(spatial[library_id][\"images\"][data['resolution']]), interpolation='nearest')\n else:\n ax.imshow(spatial[library_id][\"images\"][data['resolution']], interpolation='nearest')\n\n figD = BytesIO()\n plt.savefig(figD, dpi=dpi)\n ppr.pprint(sys.getsizeof(figD))\n imgD = base64.encodebytes(figD.getvalue()).decode(\"utf-8\")\n figD.close()\n plt.close(fig)\n return json.dumps([returnD, imgD])\n\ndef MINX(data):\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n minV = min(scD.data.X[0])\n return '%.1f'%minV\n\ndef geneFiltering(adata,cutoff,opt):\n ## 1. remove cells if the max expression of all genes is lower than the cutoff\n if opt==1:\n #sT = time.time()\n #ix = adata.to_df().apply(lambda x: max(x)>float(cutoff),axis=1)\n #ppr.pprint(time.time()-sT)\n #sT=time.time()\n df = adata.to_df()\n ix = df[df>float(cutoff)].count(axis=1)>0\n #ppr.pprint(time.time()-sT)\n #sT = time.time()\n #ix = pd.DataFrame((adata.X>float(cutoff)).sum(1)>0,index=list(adata.obs.index)).iloc[:,0]\n #ppr.pprint(time.time()-sT)\n\n adata = adata[ix,]\n ## 2. Set all expression level smaller than the cutoff to be NaN not for plotting without removing any cells\n elif opt==2:\n def cutoff(x):\n return x if x>float(cutoff) else None\n X = adata.to_df()\n X=X.applymap(cutoff)\n adata = sc.AnnData(X,adata.obs)\n return adata\n\ndef SGV(data):\n # figure width and heights depends on number of unique categories\n # characters of category names, gene number\n #ppr.pprint(\"SGV: creating data ...\")\n adata = createData(data)\n #ppr.pprint(\"SGV: data created ...\")\n adata = geneFiltering(adata,data['cutoff'],1)\n if len(adata)==0:\n raise ValueError('No cells in the condition!')\n a = list(set(list(adata.obs[data['grp'][0]])))\n ncharA = max([len(x) for x in a])\n w = len(a)/4+1\n h = ncharA/6+2.5\n ro = math.acos(10/max([15,ncharA]))/math.pi*180\n ##\n fig = plt.figure(figsize=[w,h])\n sc.pl.violin(adata,data['genes'],groupby=data['grp'][0],ax=fig.gca(),show=False)\n fig.autofmt_xdate(bottom=0.2,rotation=ro,ha='right')\n return iostreamFig(fig)\n\ndef SGVcompare(data):\n adata = createData(data)\n #adata = geneFiltering(adata,data['cutoff'],1)\n if len(adata)==0:\n raise ValueError('No cells in the condition!')\n\n # plot in R\n strF = ('%s/SGV%f.csv' % (data[\"CLItmp\"],time.time()))\n pd.concat([adata.to_df(),adata.obs[data['grp']]],axis=1,sort=False).to_csv(strF,index=False)\n strCMD = \" \".join([\"%s/Rscript\"%data['Rpath'],strExePath+'/violin.R',strF,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']])\n #ppr.pprint(strCMD)\n res = subprocess.run([strExePath+'/violin.R',strF,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']],capture_output=True)#\n img = res.stdout.decode('utf-8')\n os.remove(strF)\n if 'Error' in res.stderr.decode('utf-8'):\n raise SyntaxError(\"in R: \"+res.stderr.decode('utf-8'))\n\n return img\n\ndef VIOdata(data):\n adata = createData(data)\n adata = geneFiltering(adata,data['cutoff'],1)\n if len(adata)==0:\n raise ValueError('No cells in the condition!')\n return pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()\n\ndef unique(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\ndef updateGene(data):\n grpID = []\n grpLoc=[]\n allG = []\n if 'geneGrp' in data.keys():\n for aN in data['geneGrp'].keys():\n grpLoc += [(len(allG),len(allG)+len(data['geneGrp'][aN])-1)]\n allG += data['geneGrp'][aN]\n grpID += [aN]\n\n data['genes'] = unique(allG+data['genes'])\n data['grpLoc'] = grpLoc\n data['grpID'] = grpID\n\ndef PGV(data):\n # figure width and heights depends on number of unique categories\n # characters of category names, gene number #pecam1 pdpn\n updateGene(data)\n #ppr.pprint(\"PGV: creating data ...\")\n adata = createData(data)\n #ppr.pprint(\"PGV: data created ...\")\n adata = geneFiltering(adata,data['cutoff'],1)\n if adata.shape[0]==0 or adata.shape[1]==0:\n return Msg('No cells in the condition!')\n a = list(set(list(adata.obs[data['grp'][0]])))\n ncharA = max([len(x) for x in a])\n w = max([3,ncharA/8])+len(data['genes'])/2+1.5\n h = len(a)+0.5\n swapAx = False\n ##\n if data['by']=='Columns':\n a = w\n w = h\n h = a\n swapAx = True\n if 'split_show' in data['figOpt']['scanpybranch']: #.dev140+ge9cbc5f\n vp = sc.pl.stacked_violin(adata,data['genes'],groupby=data['grp'][0],return_fig=True,figsize=(w,h),swap_axes=swapAx,var_group_positions=data['grpLoc'],var_group_labels=data['grpID'])\n vp.add_totals().style(yticklabels=True, cmap=data['color']).show()\n #vp.add_totals().show()\n fig = vp#plt.gcf()\n else:\n fig = plt.figure(figsize=[w,h])\n axes = sc.pl.stacked_violin(adata,data['genes'],groupby=data['grp'][0],show=False,ax=fig.gca(),swap_axes=swapAx,\n var_group_positions=data['grpLoc'],var_group_labels=data['grpID'])\n return iostreamFig(fig)\n\ndef pHeatmap(data):\n # figure width is depends on the number of categories was choose to show\n # and the character length of each category term\n # if the number of element in a category is smaller than 10, \"Set1\" or \"Set3\" is choosen\n # if the number of element in a category is between 10 and 20, default is choosen\n # if the number of element in a category is larger than 20, husl is choosen\n #Xsep = createData(data,True)\n #adata = sc.AnnData(Xsep['expr'],Xsep['obs'])\n #sT = time.time()\n\n adata = createData(data)\n data['grp'] += data['addGrp']\n #Xdata = pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()\n #ppr.pprint('HEAT data reading cost %f seconds' % (time.time()-sT) )\n #sT = time.time()\n exprOrder = True\n if data['order']!=\"Expression\":\n exprOrder = False;\n adata = adata[adata.obs.sort_values(data['order']).index,]\n #s = adata.obs[data['order']]\n #ix = sorted(range(len(s)), key=lambda k: s[k])\n #adata = adata[ix,]\n colCounter = 0\n colName =['Set1','Set3']\n grpCol = list()\n grpLegend = list()\n grpWd = list()\n grpLen = list()\n h = 8\n w = len(data['genes'])/3+0.3\n for gID in data['grp']:\n grp = adata.obs[gID]\n Ugrp = grp.unique()\n if len(Ugrp)<10:\n lut = dict(zip(Ugrp,sns.color_palette(colName[colCounter%2],len(Ugrp)).as_hex()))\n colCounter += 1\n elif len(Ugrp)<20:\n lut = dict(zip(Ugrp,sns.color_palette(n_colors=len(Ugrp)).as_hex()))\n else:\n lut = dict(zip(Ugrp,sns.color_palette(\"husl\",len(Ugrp)).as_hex()))\n grpCol.append(grp.map(lut))\n grpLegend.append([mpatches.Patch(color=v,label=k) for k,v in lut.items()])\n grpWd.append(max([len(x) for x in Ugrp]))#0.02*fW*max([len(x) for x in Ugrp])\n grpLen.append(len(Ugrp)+2)\n\n w += 2\n Zscore=None\n heatCol=data['color']\n heatCenter=None\n colTitle=\"Expression\"\n if data['norm']=='zscore':\n Zscore=1\n #heatCol=\"vlag\"\n heatCenter=0\n colTitle=\"Z-score\"\n #ppr.pprint('HEAT data preparing cost %f seconds' % (time.time()-sT) )\n #sT = time.time()\n\n try:\n g = sns.clustermap(adata.to_df(),\n method=\"ward\",row_cluster=exprOrder,z_score=Zscore,cmap=heatCol,center=heatCenter,\n row_colors=pd.concat(grpCol,axis=1).astype('str'),yticklabels=False,xticklabels=True,\n figsize=(w,h),colors_ratio=0.05,\n cbar_pos=(.3, .95, .55, .02),\n cbar_kws={\"orientation\": \"horizontal\",\"label\": colTitle,\"shrink\": 0.5})\n except Exception as e:\n return 'ERROR: Z score calculation failed for 0 standard diviation. '+traceback.format_exc() # 'ERROR @server: {}, {}'.format(type(e),str(e))\n\n\n #ppr.pprint('HEAT plotting cost %f seconds' % (time.time()-sT) )\n #sT = time.time()\n g.ax_col_dendrogram.set_visible(False)\n #g.ax_row_dendrogram.set_visible(False)\n plt.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)\n grpW = [1.02]\n grpH = [1.2]\n cumulaN = 0\n cumulaMax = 0\n characterW=1/40 # a character is 1/40 of heatmap width\n characterH=1/40 # a character is 1/40 of heatmap height\n for i in sorted(range(len(grpLen)),key=lambda k:grpLen[k]):#range(5):#\n cumulaN += grpLen[i]\n if cumulaN>(10+1/characterH):\n grpW.append(grpW[-1]+cumulaMax)\n grpH = [1.2]\n cumulaN =0\n cumulaMax=0\n leg = g.ax_heatmap.legend(handles=grpLegend[i],frameon=True,title=data['grp'][i],loc=\"upper left\",\n bbox_to_anchor=(grpW[-1],grpH[-1]),fontsize=5)#grpW[i],0.5,0.3\n #leg = g.ax_heatmap.legend(handles=grpLegend[0],frameon=True,title=data['grp'][0],loc=\"upper left\",\n # bbox_to_anchor=(1.02,1-i*0.25),fontsize=5)#grpW[i],0.5,0.\n cumulaMax = max([cumulaMax,grpWd[i]*characterW])\n grpH.append(grpH[-1]-grpLen[i]*characterH)\n\n leg.get_title().set_fontsize(6)#min(grpSize)+2\n g.ax_heatmap.add_artist(leg)\n #ppr.pprint('HEAT post plotting cost %f seconds' % (time.time()-sT) )\n return iostreamFig(g)#json.dumps([iostreamFig(g),Xdata])#)#\n\ndef HeatData(data):\n adata = createData(data)\n Xdata = pd.concat([adata.to_df(),adata.obs], axis=1, sort=False).to_csv()\n return Xdata\n\ndef GD(data):\n adata = None;\n for one in data['cells'].keys():\n #sT = time.time()\n oneD = data.copy()\n oneD.update({'cells':data['cells'][one],\n 'genes':[],\n 'grp':[]})\n D = createData(oneD)\n #ppr.pprint(\"one grp aquire data cost %f seconds\" % (time.time()-sT))\n D.obs['cellGrp'] = one\n if adata is None:\n adata = D\n else:\n #sT =time.time()\n adata = adata.concatenate(D)\n #ppr.pprint(\"Concatenate data cost %f seconds\" % (time.time()-sT))\n if adata is None:\n return Msg(\"No cells were satisfied the condition!\")\n\n ##\n adata.obs.astype('category')\n cutOff = 'geneN_cutoff'+data['cutoff']\n #sT = time.time()\n #adata.obs[cutOff] = adata.to_df().apply(lambda x: sum(x>float(data['cutoff'])),axis=1)\n #ppr.pprint(time.time()-sT)\n #sT = time.time()\n #df = adata.to_df()\n #adata.obs[cutOff] = df[df>float(data['cutoff'])].count(axis=1)\n #ppr.pprint(time.time()-sT)\n sT = time.time()\n adata.obs[cutOff] = (adata.X >float(data['cutoff'])).sum(1)\n ppr.pprint(time.time()-sT)\n ##\n w = 3\n if len(data['cells'])>1:\n w += 3\n fig = plt.figure(figsize=[w,4])\n sc.pl.violin(adata,cutOff,groupby='cellGrp',ax=fig.gca(),show=False,rotation=0,size=2)\n return iostreamFig(fig)\n\ndef getGSEA(data):\n strGSEA = '%s/gsea/'%strExePath\n return json.dumps(sorted([os.path.basename(i).replace(\".symbols.gmt\",\"\") for i in glob.glob(strGSEA+\"*.symbols.gmt\")]))\n\ndef DEG(data):\n adata = None;\n genes = data['genes']\n data['genes'] = []\n comGrp = 'cellGrp'\n if 'combine' in data.keys():\n if data['DEmethod']=='default':\n combUpdate, obs = getObs(data)\n if combUpdate and len(data['grp'])>1:\n obs[comGrp] = obs[data['grp'][0]]\n for i in data['grp']:\n if i!=data['grp'][0]:\n obs[comGrp] += \":\"+obs[i]\n mask = [obs[comGrp].isin([data['comGrp'][i]]) for i in [0,1]]\n else:\n data['figOpt']['scale'] = 'No'\n adata = createData(data)\n comGrp = data['grp'][0]\n adata = adata[adata.obs[comGrp].isin(data['comGrp'])]\n else:\n mask = [pd.Series(range(data['cellN'])).isin(data['cells'][one].values()) for one in data['comGrp']]\n for one in data['comGrp']:\n oneD = data.copy()\n oneD['cells'] = data['cells'][one]\n oneD['genes'] = []\n oneD['grp'] = []\n oneD['figOpt']['scale']='No'\n #oneD = {'cells':data['cells'][one],\n # 'genes':[],\n # 'grp':[],\n # 'figOpt':{'scale':'No'},\n # 'url':data['url']}\n\n D = createData(oneD)\n D.obs[comGrp] = one\n if adata is None:\n adata = D\n else:\n adata = adata.concatenate(D)\n\n if data['DEmethod']=='default':\n if sum(mask[0]==True)<10 or sum(mask[1]==True)<10:\n raise ValueError('Less than 10 cells in a group!')\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n res = diffDefault.diffexp_ttest(scD,mask[0].to_numpy(),mask[1].to_numpy(),scD.data.shape[1])# shape[cells as rows, genes as columns]\n gNames = list(scD.data.var[data['var_index']])\n deg = pd.DataFrame(res,columns=['gID','log2fc','pval','qval'])\n gName = pd.Series([gNames[i] for i in deg['gID']],name='gene')\n deg = pd.concat([deg,gName],axis=1).loc[:,['gene','log2fc','pval','qval']]\n else:\n if not 'AnnData' in str(type(adata)):\n raise ValueError('No data extracted by user selection')\n adata.obs.astype('category')\n nm = None\n if data['DEmethod']=='wald':\n nm = 'nb'\n if data['DEmethod']=='wald':\n res = de.test.wald(adata,formula_loc=\"~1+\"+comGrp,factor_loc_totest=comGrp)\n elif data['DEmethod']=='t-test':\n res = de.test.t_test(adata,grouping=comGrp)\n elif data['DEmethod']=='rank':\n res = de.test.rank_test(adata,grouping=comGrp)\n else:\n raise ValueError('Unknown DE methods:'+data['DEmethod'])\n #res = de.test.two_sample(adata,comGrp,test=data['DEmethod'],noise_model=nm)\n deg = res.summary()\n deg = deg.sort_values(by=['qval']).loc[:,['gene','log2fc','pval','qval']]\n deg['log2fc'] = -1 * deg['log2fc']\n ## plot in R\n #strF = ('/tmp/DEG%f.csv' % time.time())\n strF = ('%s/DEG%f.csv' % (data[\"CLItmp\"],time.time()))\n deg.to_csv(strF,index=False)\n #ppr.pprint([strExePath+'/volcano.R',strF,'\"%s\"'%';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0]])\n res = subprocess.run([strExePath+'/volcano.R',strF,';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0],str(data['sigFDR']),str(data['sigFC']),data['Rlib']],capture_output=True)#\n if 'Error' in res.stderr.decode('utf-8'):\n raise SyntaxError(\"in volcano.R: \"+res.stderr.decode('utf-8'))\n img = res.stdout.decode('utf-8')\n\n # GSEA\n GSEAimg=\"\"\n GSEAtable=pd.DataFrame()\n if data['gsea']['enable']:\n res = subprocess.run([strExePath+'/fgsea.R',\n strF,\n '%s/gsea/%s.symbols.gmt'%(strExePath,data['gsea']['gs']),\n str(data['gsea']['gsMin']),\n str(data['gsea']['gsMax']),\n str(data['gsea']['padj']),\n data['gsea']['up'],\n data['gsea']['dn'],\n str(data['gsea']['collapse']),\n data['figOpt']['img'],\n str(data['figOpt']['fontsize']),\n str(data['figOpt']['dpi']),\n data['Rlib']],capture_output=True)#\n if 'Error' in res.stderr.decode('utf-8'):\n raise SyntaxError(\"in fgsea.R: \"+res.stderr.decode('utf-8'))\n GSEAimg = res.stdout.decode('utf-8')\n GSEAtable = pd.read_csv(strF)\n GSEAtable['leadingEdge'] = GSEAtable['leadingEdge'].apply(lambda x:'|'.join(x.split('|')[:10]))\n\n os.remove(strF)\n #####\n gInfo = getVar(data)\n deg.index = deg['gene']\n deg = pd.concat([deg,gInfo],axis=1,sort=False)\n #return deg.to_csv()\n\n if not data['topN']=='All':\n deg = deg.iloc[range(int(data['topN'])),]\n #deg.loc[:,'log2fc'] = deg.loc[:,'log2fc'].apply(lambda x: '%.2f'%x)\n #deg.loc[:,'pval'] = deg.loc[:,'pval'].apply(lambda x: '%.4E'%x)\n #deg.loc[:,'qval'] = deg.loc[:,'qval'].apply(lambda x: '%.4E'%x)\n #ppr.pprint(GSEAtable)\n #ppr.pprint(GSEAtable.sort_values('pval'))\n return json.dumps([deg.to_csv(index=False),img,GSEAtable.to_csv(index=False),GSEAimg])#json.dumps([deg.values.tolist(),img])\n\ndef DOT(data):\n #ppr.pprint(\"DOT, starting ...\")\n updateGene(data)\n # Dot plot, The dotplot visualization provides a compact way of showing per group, the fraction of cells expressing a gene (dot size) and the mean expression of the gene in those cell (color scale). The use of the dotplot is only meaningful when the counts matrix contains zeros representing no gene counts. dotplot visualization does not work for scaled or corrected matrices in which zero counts had been replaced by other values, see http://scanpy-tutorials.readthedocs.io/en/multiomics/visualizing-marker-genes.html\n data['figOpt']['scale'] = 'No';\n #ppr.pprint(\"DOT: creating data ...\")\n adata = createData(data)\n #ppr.pprint(\"DOT: data created!\")\n if len(adata)==0:\n return Msg('No cells in the condition!')\n #return adata\n grp = adata.obs[data['grp'][0]].unique()\n if len(grp)<10:\n col = np.array(sns.color_palette('Set1',len(grp)).as_hex())\n elif len(grp)<20:\n col = np.array(sns.color_palette(n_colors=len(grp)).as_hex())\n else:\n col = np.array(sns.color_palette(\"husl\",len(grp)).as_hex())\n adata.uns[data['grp'][0]+'_colors'] = col\n #ppr.pprint(sc.__version__)\n if 'split_show' in data['figOpt']['scanpybranch']:#.dev140+ge9cbc5f\n dp = sc.pl.dotplot(adata,data['genes'],groupby=data['grp'][0],expression_cutoff=float(data['cutoff']),mean_only_expressed=(data['mean_only_expressed'] == 'Yes'),\n var_group_positions=data['grpLoc'],var_group_labels=data['grpID'],\n return_fig=True)#\n dp = dp.add_totals(size=1.2).legend(show_size_legend=True,width=float(data['legendW'])).style(cmap=data['color'], dot_edge_color='black', dot_edge_lw=1, size_exponent=1.5)\n dp.show()\n fig = dp.get_axes()['mainplot_ax'].figure\n else:\n sc.pl.dotplot(adata,data['genes'],groupby=data['grp'][0],show=False,expression_cutoff=float(data['cutoff']),mean_only_expressed=(data['mean_only_expressed'] == 'Yes'),var_group_positions=data['grpLoc'],var_group_labels=data['grpID'], color_map=data['color'])\n fig = plt.gcf()\n #ppr.pprint(adata)\n\n return iostreamFig(fig)\n\ndef EMBED(data):\n adata = createData(data)\n if len(data['grpNum'])>0:\n adata.obs = pd.concat([adata.obs,getObsNum(data)],axis=1)\n subSize = 4\n ncol = int(data['ncol'])\n ngrp = len(data['grp'])\n ngrpNum = len(data['grpNum'])\n ngene = len(data['genes'])\n nrow = ngrp+math.ceil(ngrpNum/ncol)+math.ceil(ngene/ncol)\n if 'splitGrp' in data.keys():\n splitName = list(adata.obs[data['splitGrp']].unique())\n nsplitRow = math.ceil(len(splitName)/ncol)\n nrow = ngrp+math.ceil(ngrpNum/ncol)+ngene*nsplitRow\n step =11\n grpCol = {gID:math.ceil(len(list(adata.obs[gID].unique()))/step) for gID in data['grp']}\n\n rcParams['figure.constrained_layout.use'] = False\n fig = plt.figure(figsize=(ncol*subSize,subSize*nrow))\n gs = fig.add_gridspec(nrow,ncol,wspace=0.2)\n for i in range(ngrp):\n grpName = adata.obs[data['grp'][i]].value_counts().to_dict()\n grpPalette = None\n plotOrder = None\n dotSize = None\n if len(grpName)==2 and max(grpName.values())/min(grpName.values())>10:\n grpPalette = {max(grpName,key=grpName.get):'#c0c0c030',min(grpName,key=grpName.get):'#de2d26ff'}\n plotOrder = min(grpName,key=grpName.get) #list(grpPalette.keys()) #\n grpPalette = [grpPalette[k] for k in list(adata.obs[data['grp'][i]].cat.categories)]\n dotSize = adata.obs.apply(lambda x: 360000/adata.shape[1] if x['HIVcell']==plotOrder else 120000/adata.shape[1],axis=1).tolist()\n ax = sc.pl.embedding(adata,data['layout'],color=data['grp'][i],ax=fig.add_subplot(gs[i,0]),show=False,palette=grpPalette,groups=plotOrder,size=dotSize)\n if grpCol[data['grp'][i]]>1:\n ax.legend(ncol=grpCol[data['grp'][i]],loc=6,bbox_to_anchor=(1,0.5),frameon=False)\n ax.set_xlabel('%s1'%data['layout'])\n ax.set_ylabel('%s2'%data['layout'])\n\n for i in range(ngrpNum):\n x = int(i/ncol)+ngrp\n y = i % ncol\n ax = sc.pl.embedding(adata,data['layout'],color=data['grpNum'][i],ax=fig.add_subplot(gs[x,y]),show=False)#,wspace=0.25\n ax.set_xlabel('%s1'%data['layout'])\n ax.set_ylabel('%s2'%data['layout'])\n\n if 'splitGrp' in data.keys():\n vMax = adata.to_df().apply(lambda x: max(x))\n vMin = adata.to_df().apply(lambda x: min(x))\n dotSize = 120000 / adata.n_obs\n for i in range(ngene):\n for j in range(len(splitName)):\n x = ngrp + math.ceil(ngrpNum/ncol) + i*nsplitRow+int(j/ncol)\n y = j % ncol\n ax = sc.pl.embedding(adata,data['layout'],ax=fig.add_subplot(gs[x,y]),show=False)#color=data['genes'][i],wspace=0.25,\n ax = sc.pl.embedding(adata[adata.obs[data['splitGrp']]==splitName[j]],data['layout'],color=data['genes'][i],\n vmin=vMin[data['genes'][i]],vmax=vMax[data['genes'][i]],ax=ax,show=False,\n size=dotSize,title='{} in {}'.format(data['genes'][i],splitName[j]))\n ax.set_xlabel('%s1'%data['layout'])\n ax.set_ylabel('%s2'%data['layout'])\n else:\n for i in range(ngene):\n x = int(i/ncol)+ngrp+math.ceil(ngrpNum/ncol)\n y = i % ncol\n ax = sc.pl.embedding(adata,data['layout'],color=data['genes'][i],ax=fig.add_subplot(gs[x,y]),show=False)\n ax.set_xlabel('%s1'%data['layout'])\n ax.set_ylabel('%s2'%data['layout'])\n\n return iostreamFig(fig)\n\ndef TRACK(data):\n updateGene(data)\n adata = createData(data)\n if len(adata)==0:\n return Msg('No cells in the condition!')\n w = math.log2(adata.n_obs)\n h = adata.n_vars/2\n\n ## a bug in scanpy reported: https://github.com/theislab/scanpy/issues/1265, if resolved the following code is not needed\n if len(data['grpLoc'])>0 and data['grpLoc'][len(data['grpLoc'])-1][1] < (len(data['genes'])-1):\n data['grpLoc'] += [(data['grpLoc'][len(data['grpLoc'])-1][1]+1,len(data['genes'])-1)]\n data['grpID'] += ['others']\n ##############\n #ppr.pprint(data['grpLoc'])\n #ppr.pprint(data['grpID'])\n\n ax = sc.pl.tracksplot(adata,data['genes'],groupby=data['grp'][0],figsize=(w,h),\n var_group_positions=data['grpLoc'],var_group_labels=data['grpID'],\n show=False)\n fig=ax['track_axes'][0].figure\n return iostreamFig(fig)\n\ndef cut(x,cutoff,anno):\n iC = x[x>cutoff].count()\n if iC ==0:\n return \"None\"\n elif iC==2:\n return \"Both\"\n elif x[0]>cutoff:\n return anno[0]\n elif x[1]>cutoff:\n return anno[1]\n return \"ERROR\"\ndef dualExp(df,cutoff,anno):\n label = ['None']+list(anno)+['Both']\n a = df.iloc[:,0]>cutoff\n b = df.iloc[:,1]>cutoff\n return pd.Series([label[i] for i in list(a+2*b)],index=df.index,dtype='category')\n\ndef DUAL(data):\n adata = createData(data)\n adata.obs['Expressed'] = dualExp(adata.to_df(),float(data['cutoff']),adata.var_names)\n sT = time.time()\n pCol = {\"None\":\"#AAAAAA44\",\"Both\":\"#EDDF01AA\",data['genes'][0]:\"#1CAF82AA\",data['genes'][1]:\"#FA2202AA\"}\n adata.uns[\"Expressed_colors\"]=[pCol[i] for i in adata.obs['Expressed'].cat.categories]\n\n rcParams['figure.figsize'] = 4.5, 4\n fig = sc.pl.embedding(adata,data['layout'],color='Expressed',return_fig=True,show=False,legend_fontsize=\"small\")\n plt.xlabel('%s1'%data['layout'])\n plt.ylabel('%s2'%data['layout'])\n rcParams['figure.figsize'] = 4, 4\n return iostreamFig(fig)\n\ndef MARK(data):\n adata = createData(data)\n if len(adata)==0:\n return Msg('No cells in the condition!')\n ## remove the annotation whose cell counts are smaller than 2 to avoid division by zero\n vCount = adata.obs[data[\"grp\"][0]].value_counts()\n keepG = [key for key,val in vCount.items() if val>2]\n adata = adata[adata.obs[data[\"grp\"][0]].isin(keepG),:]\n\n if len(adata.obs[data['grp'][0]].unique())<3:\n return 'ERROR @server: {}'.format('Less than 3 groups in selected cells! Please use DEG for 2 groups')\n #return json.dumps([[['name','scores'],['None','0']],Msg('Less than 3 groups in selected cells!Please use DEG for 2 groups')])\n\n sc.tl.rank_genes_groups(adata,groupby=data[\"grp\"][0],n_genes=int(data['geneN']),method=data['markMethod'])#\n ppr.pprint(int(data['geneN']))\n sc.pl.rank_genes_groups(adata,n_genes=int(data['geneN']),ncols=min([3,len(adata.obs[data['grp'][0]].unique())]),show=False)\n fig =plt.gcf()\n\n gScore = adata.uns['rank_genes_groups']\n #ppr.pprint(gScore)\n pKeys = [i for i in ['names','scores','logfoldchanges','pvals','pvals_adj'] if i in gScore.keys()]\n scoreM = [pKeys+['Group']]\n for i in gScore['scores'].dtype.names:\n for j in range(len(gScore['scores'][i])):\n one = []\n for k in pKeys:\n if k=='logfoldchanges':\n one += ['%.2f' % gScore[k][i][j]]\n elif k in ['pvals','pvals_adj']:\n one += ['%.4E' % gScore[k][i][j]]\n elif k=='scores':\n one += ['%.4f' % gScore[k][i][j]]\n else:\n one += [gScore[k][i][j]]\n scoreM += [one+[i]]\n return json.dumps([scoreM,iostreamFig(fig)])\n\ndef DENS(data):\n #sT = time.time()\n adata = createData(data)\n #ppr.pprint(\"read data cost: %f seconds\" % (time.time()-sT))\n #sT = time.time()\n adata.obs['None'] = pd.Categorical(['all']*adata.shape[0])\n bw=float(data['bw'])\n sGrp = data['category'][0]\n cGrp = data['category'][1]\n\n defaultFontsize = 16\n if 'figOpt' in data.keys():\n defaultFontsize = float(data['figOpt']['fontsize'])\n subSize = 4\n #split = list(adata.obs[sGrp].unique())\n split = sorted(list(adata.obs[sGrp].cat.categories))\n genes = sorted(list(adata.var.index))\n #colGrp = list(adata.obs[cGrp].unique())\n colGrp = sorted(list(adata.obs[cGrp].cat.categories))\n legendCol = math.ceil(len(colGrp)/(len(split)*11))\n fig = plt.figure(figsize=(len(genes)*subSize,len(split)*(subSize-1)))\n plt.xlabel(\"Expression\",labelpad=20,fontsize=defaultFontsize+1)\n #plt.ylabel(sGrp,labelpad=50,fontsize=defaultFontsize+1)\n plt.xticks([])\n plt.yticks([])\n plt.box(on=None)\n\n #plt.xlabel(\"Expression\")\n #plt.ylabel(sGrp)\n gs = fig.add_gridspec(len(split),len(genes),wspace=0.2)#\n #dataT = 0\n #plotT = 0\n for i in range(len(split)):\n #resT = time.time()\n Dobs = adata[adata.obs[sGrp]==split[i]].obs[cGrp]\n D = adata[adata.obs[sGrp]==split[i]].to_df()\n #dataT += (time.time()-resT)\n for j in range(len(genes)):\n ax = fig.add_subplot(gs[i,j])\n #resT = time.time()\n for one in colGrp:\n if sum(Dobs==one)<1:\n sns.kdeplot([0],label=one)\n else:\n sns.kdeplot(D[Dobs==one][genes[j]].to_numpy(),bw_method=bw,label=one)\n\n ax.set_ylabel(\"\",fontsize=defaultFontsize)\n if i==0:\n ax.set_title(genes[j],fontsize=defaultFontsize+2)\n if j==0:\n ax.set_ylabel(split[i],fontsize=defaultFontsize)\n if i==0 and j==(len(genes)-1):\n ax.legend(prop={'size': 10},title = cGrp,loc=2,bbox_to_anchor=(1,1),ncol=legendCol,frameon=False)#\n else:\n leg = ax.get_legend()\n if not leg==None:\n leg.remove()\n #fig.text(0.6,0.09,\"Expression\",ha='center')\n #ppr.pprint(\"plotting data cost: %f seconds\" % dataT)\n #ppr.pprint(\"plotting plot cost: %f seconds\" % plotT)\n #ppr.pprint(\"plotting total cost: %f seconds\" % (time.time()-sT))\n return iostreamFig(fig)\n\ndef SANK(data):\n updateGene(data)\n if len(data['genes'])==0:\n tmp, D = getObs(data)\n D = D.apply(lambda x:x.apply(lambda y:x.name+\":\"+y))\n else:\n adata = createData(data)\n D = pd.concat([adata.obs.apply(lambda x:x.apply(lambda y:x.name+\":\"+y)),\n adata.to_df().apply(lambda x:pd.cut(x,int(data['sankBin'])).apply(lambda y:x.name+\":\"+'%.1f_%.1f'%(y.left,y.right)))],\n axis=1,sort=False)\n D = D.astype('str').astype('category')\n if data['obs_index'] in D.columns:\n del D[data['obs_index']]\n\n colName =['Set1','Set3','viridis']\n labels = []\n cols = []\n colindex = 0\n for gID in D.columns:\n gNames = list(D[gID].unique())\n labels += gNames\n if len(gNames) <10:\n cols += sns.color_palette(colName[colindex%2],len(gNames)).as_hex()\n colindex += 1\n else:\n cols += sns.color_palette(colName[2],len(gNames)).as_hex()\n\n sIDs =[]\n dIDs =[]\n v=[]\n Dnames = data['sankOrder']#list(D.columns)\n #maxGrp = 0\n #ppr.pprint(Dnames)\n for i in range(len(Dnames)-1):\n oneName = Dnames[i:i+2]\n #maxGrp = max(maxGrp,len(D[oneName[0]].unique()))\n summaryOne = D.groupby(oneName).size().reset_index(name='Count')\n summaryOne=summaryOne[summaryOne['Count']>0]\n sIDs += list(summaryOne[oneName[0]].apply(lambda x: labels.index(x)))\n dIDs += list(summaryOne[oneName[1]].apply(lambda x: labels.index(x)))\n v += list(summaryOne['Count'])\n\n data_trace = dict(\n type='sankey',\n domain=dict(x=[0,1],y=[0,1]),\n orientation='h',\n valueformat = \".0f\",\n node = dict(\n pad = 10,\n thickness = 15,\n line = dict(\n color = \"black\",\n width = 0.5\n ),\n label = labels,\n color = cols\n ),\n link = dict(\n source = sIDs,\n target = dIDs,\n value = v\n )\n )\n ## if the image is requested\n if 'imgSave' in data.keys():\n layout = dict(\n font = dict(size=int(data['figOpt']['fontsize'])),\n height= int(data['imgH']),\n width = int(data['imgW'])*D.shape[1]\n )\n fig = go.Figure(data=[go.Sankey(data_trace)],layout=layout)\n img = plotIO.to_image(fig,data['imgSave'])\n return base64.encodebytes(img).decode('utf-8')\n\n layout = dict(\n font = dict(size=int(data['figOpt']['fontsize'])),\n height= int(data['imgH']),\n width = int(data['imgW'])*D.shape[1],\n updatemenus= [\n dict(\n y=0.9,\n buttons=[\n dict(\n label='Thick',\n method='restyle',\n args=['node.thickness', 15]\n ),\n dict(\n label='Thin',\n method='restyle',\n args=['node.thickness', 8]\n )\n ]\n ),\n dict(\n y=0.8,\n buttons=[\n dict(\n label='Small gap',\n method='restyle',\n args=['node.pad', 15]\n ),\n dict(\n label='Large gap',\n method='restyle',\n args=['node.pad', 20]\n )\n ]\n ),\n dict(\n y=0.7,\n buttons=[\n dict(\n label='Snap',\n method='restyle',\n args=['arrangement', 'snap']\n ),\n dict(\n label='Perpendicular',\n method='restyle',\n args=['arrangement', 'perpendicular']\n ),\n dict(\n label='Freeform',\n method='restyle',\n args=['arrangement', 'freeform']\n ),\n dict(\n label='Fixed',\n method='restyle',\n args=['arrangement', 'fixed']\n )\n ]\n ),\n dict(\n y=0.6,\n buttons=[\n dict(\n label='Horizontal',\n method='restyle',\n args=['orientation','h']#{,'height':700,'width':250*D.shape[1]}\n ),\n dict(\n label='Vertical',\n method='restyle',\n args=['orientation','v']#{'orientation': 'v','height':250*D.shape[1],'width':700}\n )\n ]\n\n )\n ]\n )\n fig = go.Figure(data=[go.Sankey(data_trace)],layout=layout)\n div = plotIO.to_html(fig)\n return div#[div.find('<div>'):(div.find('</div>')+6)]\n\ndef DENS2D(data):\n adata = createData(data)\n\n ## plot in R\n strF = ('%s/DENS2D%f.csv' % (data[\"CLItmp\"],time.time()))\n adata.to_df().to_csv(strF)#\n res = subprocess.run([strExePath+'/Density2D.R',strF,data['figOpt']['img'],str(data['cutoff']),str(data['bandwidth']),data['figOpt']['colorMap'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']],capture_output=True)#\n img = res.stdout.decode('utf-8')\n os.remove(strF)\n if 'Error' in res.stderr.decode('utf-8'):\n raise SyntaxError(\"in R: \"+res.stderr.decode('utf-8'))\n\n return img\n\ndef toInt(x):\n if len(x)==0:\n return 0\n return int(x)\n\ndef STACBAR(data):\n if len(data['genes'])==0:\n tmp, D = getObs(data)\n D = D.apply(lambda x:x.apply(lambda y:y))\n else:\n adata = createData(data)\n\n D = pd.concat([adata.obs.apply(lambda x:x.apply(lambda y:y)),\n adata.to_df().apply(lambda x:pd.cut(x,int(data['Nbin'])).apply(lambda y:'%s:%.1f_%.1f'%(x.name,y.left,y.right)))],\n axis=1,sort=False)\n D = D.astype('str').astype('category')\n if data['obs_index'] in D.columns:\n del D[data['obs_index']]\n cellN = D.groupby(list(D.columns)).size().reset_index(name=\"Count\")\n\n strCol = data['colorBy']\n tmp = list(D.columns)\n tmp.remove(strCol)\n strX = tmp[0]\n returnD = [{'name':i,\n 'sales':[{'year':j,#.replace(strX+':',''),\n 'profit':toInt(cellN[(cellN[strCol]==i) & (cellN[strX]==j)]['Count'])}\n for j in cellN[strX].unique()]}\n for i in cellN[strCol].unique()]\n return json.dumps(returnD)\n\ndef CLI(data):\n strPath = data[\"CLItmp\"]+('/CLI%f' % time.time())\n script = data['script']\n del data['script']\n\n adata = createData(data)\n\n strData = strPath + '.h5ad'\n adata.write(strData)\n #with open(strData,'wb') as f:\n #pickle.dump(adata,f)\n ppr.pprint(len(re.findall(r'```',script)))\n if (len(re.findall(r'```',script)) >0):\n strScript = strPath + '.Rmd'\n with open(strScript,'w') as f:\n f.writelines(['---\\noutput:\\n html_document:\\n code_folding: hide\\n---\\n\\n```{r}\\nstrPath <- \"%s\"\\n```\\n\\n'%strPath])\n f.write(script)\n #ppr.pprint(subprocess.run('which Rscript',capture_output=True,shell=True).stdout.decode('utf-8'))\n res = subprocess.run('Rscript -e \\'rmarkdown::render(\"%s\", output_file=\"%s.html\")\\''%(strScript,strPath),capture_output=True,shell=True)\n if (os.path.exists('%s.html'%strPath)):\n with open('%s.html'%strPath,'r') as file:\n html = file.read()\n else:\n html = ''\n ppr.pprint(res.stdout.decode('utf-8'))\n ppr.pprint(res.stderr.decode('utf-8'))\n else:\n strScript = strPath + '.py'\n with open(strScript,'w') as f:\n f.writelines(['%load_ext rpy2.ipython\\n','from anndata import read_h5ad\\n','adata=read_h5ad(\"%s\")\\n'%strData, 'strPath=\"%s\"\\n\\n'%strPath])\n #f.writelines(['%load_ext rpy2.ipython\\n','import pickle\\n','with open(\"%s\",\"rb\") as f:\\n'%strData,' adata=pickle.load(f)\\n','strPath=\"%s\"\\n\\n'%strPath])\n f.writelines(['%%R\\n','strPath=\"%s\"\\n\\n'%strPath])\n f.write(script)\n ppr.pprint(subprocess.run('which Rscript',capture_output=True,shell=True).stdout.decode('utf-8'))\n ppr.pprint(subprocess.run('which pandoc',capture_output=True,shell=True).stdout.decode('utf-8'))\n ppr.pprint(subprocess.run(\"Rscript -e 'reticulate::py_config()'\",capture_output=True,shell=True).stdout.decode('utf-8'))\n res = subprocess.run('jupytext --to notebook --output - %s | jupyter nbconvert --ExecutePreprocessor.timeout=1800 --to html --execute --stdin --stdout'%strScript,capture_output=True,shell=True)\n html = res.stdout.decode('utf-8')\n h,s,e = html.partition('<div class=\"cell border-box-sizing code_cell rendered\">')\n h1,s,e = e.partition('<div class=\"cell border-box-sizing code_cell rendered\">') ## remove the first cell\n h1,s,e = e.partition('<div class=\"cell border-box-sizing code_cell rendered\">') ## remove the second cell\n html = h+s+e\n if 'Error' in res.stderr.decode('utf-8'):\n html = 'ERROR @server:\\nstderr:\\n' + res.stderr.decode('utf-8') + '\\nstdout:\\n' + res.stdout.decode('utf-8')\n for f in glob.glob(strPath+\"*\"):\n try:\n os.remove(f)\n except:\n continue\n\n return html\n\ndef getDesp(data):\n strF = re.sub(\"h5ad$\",\"txt\",data[\"h5ad\"])\n if not os.path.isfile(strF):\n return \"\"\n txt = \"\"\n with open(strF,'r') as fp:\n for line in fp:\n txt = \"%s<br>%s\"%(txt,line)\n return txt\n\ndef getPreDEGname(data):\n strF = re.sub(\"h5ad$\",\"db\",data[\"h5ad\"])\n if not os.path.isfile(strF):\n #ppr.pprint(strF+\" is NOT found!\")\n return \"\"\n conn = sqlite3.connect(strF)\n df = pd.read_sql_query(\"select DISTINCT contrast,tags from DEG;\", conn)\n conn.close()\n\n return json.dumps(list(df['contrast']+\"::\"+df['tags']))\n\ndef getPreDEGvolcano(data):\n strF = re.sub(\"h5ad$\",\"db\",data[\"h5ad\"])\n comGrp = data[\"compSel\"].split(\"::\")\n\n conn = sqlite3.connect(strF)\n df = pd.read_sql_query(\"select gene,log2fc,pval,qval from DEG where contrast=? and tags=?;\", conn,params=comGrp)\n conn.close()\n deg = df.sort_values(by=['qval'])\n data[\"comGrp\"] = comGrp[0].split(\".vs.\")\n\n ## plot in R\n strF = ('%s/DEG%f.csv' % (data[\"CLItmp\"],time.time()))\n deg.to_csv(strF,index=False)\n #ppr.pprint([strExePath+'/volcano.R',strF,';'.join(genes),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0]])\n res = subprocess.run([strExePath+'/volcano.R',strF,';'.join(data['genes']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),str(data['logFC']),data['comGrp'][1],data['comGrp'][0],str(data['sigFDR']),str(data['sigFC']),data['Rlib']],capture_output=True)#\n img = res.stdout.decode('utf-8')\n os.remove(strF)\n if 'Error' in res.stderr.decode('utf-8'):\n raise SyntaxError(\"in R: \"+res.stderr.decode('utf-8'))\n #####\n gInfo = getVar(data)\n deg.index = deg['gene']\n deg = pd.concat([deg,gInfo],axis=1,join='inner',sort=False)\n #return deg.to_csv()\n\n if not data['topN']=='All':\n deg = deg.iloc[range(min(deg.shape[0],int(data['topN']))),]\n #deg.loc[:,'log2fc'] = deg.loc[:,'log2fc'].apply(lambda x: '%.2f'%x)\n #deg.loc[:,'pval'] = deg.loc[:,'pval'].apply(lambda x: '%.4E'%x)\n #deg.loc[:,'qval'] = deg.loc[:,'qval'].apply(lambda x: '%.4E'%x)\n\n return json.dumps([deg.to_csv(index=False),img])#json.dumps([deg.values.tolist(),img])\n\ndef getPreDEGbubble(data):\n #data={'compSel':['MS.vs.Control::EN.L4','MS.vs.Control::Endo.cells','MS.vs.Control::EN.PYR'],'genes':['RASGEF1B','SLC26A3','UNC5C','AHI1','CD9']}\n sql = \"select gene,log2fc,pval,qval,contrast || '::' || tags as tag from DEG where tag in ({comp}) and gene in ({gList}) order by case tag {oList} end;\".format(\n comp=','.join(['?']*len(data['compSel'])),\n gList=','.join(['?']*len(data['genes'])),\n oList=' '.join(['WHEN ? THEN %d'%i for i in range(len(data['compSel']))]))\n\n strF = re.sub(\"h5ad$\",\"db\",data[\"h5ad\"])\n conn = sqlite3.connect(strF)\n deg = pd.read_sql_query(sql,conn,params=data['compSel']+data['genes']+data['compSel'])\n conn.close()\n if deg.shape[0]==0:\n raise ValueError(\"No data for selected genes (\"+\", \".join(data['genes'])+\") in selected comparison (\"+\", \".join(data['compSel'])+\")!\")\n\n ## add selected genes which is not in the database back to the dataframe as NA\n addG = [[i,np.nan,np.nan,np.nan,data['compSel'][0]] for i in data['genes'] if i not in list(deg.gene.unique())]\n if len(addG)>0:\n deg = pd.concat([deg,pd.DataFrame(addG,columns=deg.columns)])\n ## add selected comparison which is not in the database back to the dataframe as NA\n addComp = [[data['genes'][0],np.nan,np.nan,np.nan,i] for i in data['compSel'] if i not in list(deg.tag.unique())]\n if len(addComp)>0:\n deg = pd.concat([deg,pd.DataFrame(addComp,columns=deg.columns)])\n #ppr.pprint(deg)\n ## plot in R\n strF = ('%s/DEG%f.csv' % (data[\"CLItmp\"],time.time()))\n deg.to_csv(strF,index=False)\n #ppr.pprint(' '.join([strExePath+'/bubbleMap.R',strF,data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['scale'],data['Rlib']]))\n res = subprocess.run([strExePath+'/bubbleMap.R',strF,data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['scale'],data['Rlib']],capture_output=True)#\n img = res.stdout.decode('utf-8')\n os.remove(strF)\n if 'Error' in res.stderr.decode('utf-8'):\n raise SyntaxError(\"in R: \"+res.stderr.decode('utf-8'))\n\n #RASGEF1B SLC26A3 UNC5C AHI1 CD9\n return img\n\ndef getEnv():\n config = {'CLItmp':'/tmp','Rpath':'','Rlib':'','METAtmp':'/tmp','METAurl':'','METAmax':1e4}\n strEnv = '%s/vip.env'%strExePath\n if os.path.isfile(strEnv):\n with open(strEnv,'r') as fp:\n for line in fp:\n one = line.strip().replace(\"\\t\", \"\").replace(\" \", \"\").split(\"=\")\n if not len(one)==2:\n continue\n config[one[0]]=one[1]\n #ppr.pprint(config)\n if len(config['Rpath'])>3:\n os.stat(\"%s/Rscript\"%config['Rpath'])\n os.environ['PATH'] = config['Rpath']+os.pathsep+os.environ['PATH']\n return config\ntry:\n VIPenv = getEnv()\nexcept Exception as e:\n ppr.pprint(\"The specified R path is incorrect, please check or remove from vip.env!\")\n raise e\n\ndef mergeMeta(data):\n selC = list(data['cells'].values())\n ## obtain the category annotation\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n if not 'cellN' in scD.data.obs:\n raise ValueError('This is not a metacell data!')\n obs = scD.data.obs.loc[selC,[data['obs_index'],'cellN']]\n ppr.pprint(obs)\n ppr.pprint(obs['cellN'].sum())\n if obs['cellN'].sum() > int(data['METAmax']):\n raise ValueError('The selected meta cells include more than maximum %d cells!'% int(data['METAmax']))\n strPath = re.sub(\".h5ad$\",\"\",data[\"h5ad\"])\n selCells = []\n for i in obs[data['obs_index']]:\n strOne = strPath+\"/\"+i+\".h5ad\"\n if os.path.exists(strOne):\n selCells += [ad.read(strOne)]\n strOut = data['METAtmp']+\"/\"+os.path.basename(strPath)+\"_\"+data['metaPostfix']+\".h5ad\"\n ad.concat(selCells).write(strOut)\n return data['METAurl']+\"/d/\"+os.path.basename(strOut)+\"/\"\n\ndef isMeta(data):\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n if not 'cellN' in scD.data.obs:\n return \"FALSE\"\n strPath = re.sub(\".h5ad$\",\"\",data[\"h5ad\"])\n if not os.path.exists(strPath):\n return \"FALSE\"\n return \"TRUE\"\n\ndef getBWinfo(data):\n BWinfo = {\"BWfile\":[],\"BWannotation\":[],\"BWlink\":[],\"BWpeak\":[],\"BWcluster\":[]}\n strD = re.sub(\".h5ad$\",\"/\",data[\"h5ad\"])\n if os.path.isdir(strD):\n for one in os.listdir(strD):\n if not re.search(\"bw$\",one)==None:\n BWinfo[\"BWfile\"].append(one)\n elif one==\"annotation.rds\":\n BWinfo[\"BWannotation\"]=\"annotation.rds\"\n elif one==\"peaks.rds\":\n BWinfo[\"BWpeak\"]=\"peaks.rds\"\n elif one==\"links.rds\":\n BWinfo[\"BWlink\"]=\"links.rds\"\n elif one==\"bw.cluster\":\n BWinfo[\"BWcluster\"]=\"bw.cluster\"\n return json.dumps(BWinfo)\n\ndef plotBW(data):\n strD = re.sub(\".h5ad$\",\"/\",data[\"h5ad\"])\n strCSV = ('%s/BW%f.csv' % (data[\"CLItmp\"],time.time()))\n ## select all cells\n strType = strD + 'bw.cluster'\n grpFlag = False\n if os.path.isfile(strType) and len(data['genes'])>0:\n with open(strType,\"r\") as f:\n grp = f.readline().strip()\n with app.get_data_adaptor(url_dataroot=data['url_dataroot'],dataset=data['dataset']) as scD:\n dAnno = list(scD.get_obs_keys())\n if grp in dAnno:\n grpFlag = True\n if grpFlag:\n data['grp'] = [grp]\n adata = createData(data)\n if len(adata)==0:\n grpFlag = False\n else:\n cluster = pd.read_csv(strType,sep=\"\\t\",header=None,index_col=1,skiprows=1)#delimiter=\"\\n\",\n adata = adata[adata.obs[grp].isin(list(cluster.index)),:]\n obsCluster = pd.DataFrame(list(cluster.loc[adata.obs[grp],:][0]),index=adata.obs.index,columns=[grp])\n pd.concat([obsCluster,adata.to_df()], axis=1, sort=False).to_csv(strCSV)\n ## plot in R\n #strCMD = ' '.join([strExePath+'/browserPlot.R',strD,data['region'],str(data['exUP']),str(data['exDN']),strCSV,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']])\n #ppr.pprint(strCMD)\n res = subprocess.run([strExePath+'/browserPlot.R',strD,data['region'],str(data['exUP']),str(data['exDN']),strCSV,str(data['cutoff']),data['figOpt']['img'],str(data['figOpt']['fontsize']),str(data['figOpt']['dpi']),data['Rlib']],capture_output=True)#\n img = res.stdout.decode('utf-8')\n if grpFlag:\n os.remove(strCSV)\n if 'Error' in res.stderr.decode('utf-8'):\n raise SyntaxError(\"in R: \"+res.stderr.decode('utf-8'))\n\n return img\n\n#make sure the h5ad file full name is listed in vip.env as a variable 'testVIP';\ndef testVIPready(data):\n strH5ad = os.path.basename(data[\"h5ad\"])\n if 'testVIP' in data and strH5ad==data[\"testVIP\"]:\n both = True\n for one in [re.sub(\"h5ad$\",\"info.txt\",strH5ad),re.sub(\"h5ad$\",\"img.txt\",strH5ad)]:\n both = both and os.path.exists(strExePath+\"/../common/web/static/testVIP/\"+one)\n if both:\n return \"SHOW\"\n else:\n return \"TRUE\"\n return \"FALSE\"\n\ndef saveTest(data):\n strPath = strExePath+\"/../common/web/static/testVIP/\"\n if not os.path.exists(strPath):\n os.makedirs(strPath)\n strH5ad = os.path.basename(data[\"h5ad\"])\n\n if len(data['info'])>100:\n #ppr.pprint(strPath+re.sub(\"h5ad$\",\"info.txt\",strH5ad))\n with open(strPath+re.sub(\"h5ad$\",\"info.txt\",strH5ad),'w') as f:\n f.write(data['info'])\n if len(data['img'])>100:\n with open(strPath+re.sub(\"h5ad$\",\"img.txt\",strH5ad),'w') as f:\n f.write(data['img'])\n return 'success'\n" ]
[ [ "pandas.Series", "numpy.flipud", "pandas.DataFrame", "pandas.DataFrame.sparse.from_spmatrix", "matplotlib.pyplot.gca", "matplotlib.patches.Patch", "pandas.read_csv", "matplotlib.pyplot.gcf", "matplotlib.pyplot.close", "matplotlib.pyplot.text", "matplotlib.pyplot.figure", "pandas.concat", "matplotlib.pyplot.box", "pandas.Categorical", "matplotlib.pyplot.savefig", "matplotlib.rcParams.update", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "pandas.read_sql_query", "matplotlib.use", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks" ] ]
jaisenc/qutil
[ "1485fe486f4f2141f236d25addcdc34eceb3727d" ]
[ "qutil/format/number.py" ]
[ "import numpy as np\n\n\ndef fmtp(number, decimals=2):\n \"\"\"\n Formatting helper - percent: 0.52 -> 52.00%\n \"\"\"\n if np.isnan(number):\n return '-'\n return format(number, '.{}%'.format(decimals))\n\n\ndef fmtpn(number, decimals=2):\n \"\"\"\n Formatting helper - percent no % sign: 0.523 -> 52.30\n \"\"\"\n if np.isnan(number):\n return '-'\n return format(number * 100, '.{}f'.format(decimals))\n\n\ndef fmtph(number, decimals=2):\n \"\"\"\n Formatting helper - percent no % sign: 52.30 -> 52.30%\n \"\"\"\n if np.isnan(number):\n return '-'\n return format(number / 100, '.{}%'.format(decimals))\n\n\ndef fmtn(number, decimals=2):\n \"\"\"\n Formatting helper - float\n \"\"\"\n if np.isnan(number):\n return '-'\n return format(number, '.{}f'.format(decimals))\n\n\ndef fmti(number):\n \"\"\"\n Formatting helper - int\n :param number:\n :return:\n \"\"\"\n if np.isnan(number):\n return '-'\n return format(number, '.0f')\n\n\ndef fmtth(x, decimal=0):\n if np.isnan(x):\n return '-'\n return '{0:.{1}f}'.format(x, decimal)\n\n\ndef fmtpx(x):\n return fmtth(x, decimal=2)\n\n\ndef fmtl(x):\n \"\"\"\n Formatting helper - large number\n\n :param x:\n :return:\n \"\"\"\n if np.isnan(x):\n return '-'\n elif abs(x) >= 1000000000:\n return '{:,.0f} Bln'.format(x / 1000000000)\n elif abs(x) >= 1000000:\n return '{:,.0f} Mln'.format(x / 1000000)\n elif abs(x) >= 1000:\n return '{:,.0f} k'.format(x / 1000)\n else:\n return '{:,.0f}'.format(x)\n" ]
[ [ "numpy.isnan" ] ]
timtonthat/batch8_ceebios
[ "224debcd4325f5f44b334b2e98594208cf83fe1c" ]
[ "pokedex/search/search_gbif.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 25 14:08:30 2020\n\nModule de recherche sur la base gbif https://www.gbif.org/fr/\n\nhttp://tecfa.unige.ch/perso/lombardf/calvin/teaching/mammiferes-fr-latin.html\n\n@author: CHRISTIAN\n\"\"\"\n\n\nimport os\nimport time\nimport pprint\nimport json\nimport re\nimport pandas as pd\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\npd.set_option('max_colwidth', 1000)\n\nfrom nltk.probability import FreqDist\n\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\nfrom pygbif import species\nfrom scipy import sparse\nimport pickle\nfrom scipy.spatial import distance\n\n\nif False: # local desktop working\n os.chdir('D:/ecomdataforgoodfr/Ceebios/batch8_ceebios/lib')\n print(os.getcwd())\n\n\n\ndef test_species(): \n data = species.name_suggest(q='Puma concolor')\n for x in data:\n print(len(x))\n \n data2 = species.name_suggest()\n for x in data2:\n print(len(x))\n \n nb_species = 0\n for i in range(0, 10):\n tab_rep = species.name_suggest(q='vespa', offset=i)\n print(len(tab_rep))\n for doc in tab_rep:\n if 'species' in doc:\n nb_species += 1\n print(doc['species'])\n \n \n print(nb_species) \n\ndef collect_all_species(fname = \"../data/tfidf/data_gbif.json\"):\n ''' collect all species '''\n \n last_read = 1\n offset = 0\n ts = time.time()\n dct = {}\n nb_species = 0\n data = []\n while last_read>0:\n tab_rep = species.name_suggest(offset=offset)\n offset += 1 \n # if time.time()-ts>60:\n # break\n last_read = len(tab_rep)\n for x in tab_rep:\n nb_species += 1\n data.append(x)\n for k in x.keys():\n if k not in dct:\n dct[k] = 1\n else:\n dct[k] += 1 \n \n pp = pprint.PrettyPrinter(2) \n print('nb species', nb_species)\n print('Dict:', len(dct))\n pp.pprint(dct)\n \n with open(fname,\"w\", encoding='utf-8') as jsonfile:\n json.dump(data, jsonfile,ensure_ascii=False)\n print(time.time()-ts) \n \n \ndef read_collect_bgif(fname = \"../data/tfidf/data_gbif.json\"):\n datain = []\n with open(fname,\"r\", encoding='utf-8') as jsonfile:\n datain = json.load(jsonfile)\n return datain\n\n\ndef create_Tf_matrix(corpus, \\\n filename_npz='../data/tfidf/data_tf.npz', \\\n filename_features=\"../data/tfidf/data_feature_names.pkl\"):\n ''' creation d'une matrice TF '''\n \n vectorizer = CountVectorizer(max_features=len(corpus))\n X = vectorizer.fit_transform(corpus)\n print('-Vectorized matrix, ', X.toarray().shape)\n print(' first line:')\n print(X.toarray()[0])\n print('- Nombre de features :'+str(len(vectorizer.get_feature_names())))\n print(vectorizer.get_feature_names()[0:10], ' ...')\n \n data = pd.DataFrame(vectorizer.get_feature_names())\n data.to_pickle(filename_features) \n print('tf feature names - saved')\n sparse.save_npz(filename_npz, X)\n print('tf matrix:', filename_npz,' - saved')\n\ndef create_TfIdf(corpus, \\\n filename_npz='../data/tfidf/data_tfidf.npz', \\\n filename_features=\"../data/tfidf/data_tfidf_feature_names.pkl\",\n vectorizer_path=\"../data/tfidf/tfidf_vectorizer.pickle\"):\n ''' \n Création de la matrice pondérée TF-IDF (Term Frequency times Inverse Document Frequency)\n La matrice TF-IDF est une mesure qui permet de faire de la ségrégation entre documents.\n Si un mot a une très haute fréquence dans une question, mais une fréquence basse dans les autres questions du corpus,\n c’est qu’il s’agit d’un mot important pour caractériser le document en question.\n '''\n vectorizer_model = TfidfVectorizer(max_features=len(corpus))\n X = vectorizer_model.fit_transform(corpus)\n print('-Tfidf matrix, ', X.toarray().shape)\n print(' first line:')\n print(X.toarray()[0])\n \n print('- Nombre de features :'+str(len(vectorizer_model.get_feature_names())))\n print(vectorizer_model.get_feature_names()[0:10], ' ...')\n \n data = pd.DataFrame(vectorizer_model.get_feature_names())\n data.to_pickle(filename_features) \n print('tfidf feature names - saved')\n sparse.save_npz(filename_npz, X)\n print('tfidf matrix:', filename_npz,' - saved')\n \n pickle.dump(vectorizer_model, open(vectorizer_path, \"wb\"))\n print('vectorizer model:', vectorizer_path,' - saved')\n\n\ndef read_clean_dataset(fname, disp_first=True):\n ''' load dataset '''\n \n datain = read_collect_bgif(fname)\n if disp_first==True:\n print('- We suppress key fields, and convert to minus')\n \n tab = []\n for doc in datain:\n terms = []\n for k, v in doc.items():\n if k.endswith('Key')==False:\n if type(v)!=str:\n continue\n v = v.lower()\n v = re.sub(r\"[^a-z]+\", ' ', v)\n terms.append(v)\n tab.append({'terms': terms})\n \n if disp_first==True:\n pp = pprint.PrettyPrinter(2)\n print('First doc, before')\n pp.pprint(datain[0])\n print('then')\n print(tab[0])\n \n dataset = pd.DataFrame(tab) \n dataset['d'] = dataset.index\n \n return dataset\n\ndef load_clean_and_generate_tf_idf(fname=\"../data/tfidf/data_gbif.json\"):\n ''' clean dataset and create tf and tfidf '''\n \n print(\" load dataset, and clean\")\n dataset = read_clean_dataset(fname)\n print('- Verification:')\n display(dataset.head())\n \n dataset['freq'] = dataset['terms'].apply(lambda x: FreqDist(x))\n \n #fdist1 : frequence dans le tableau de mots\n print(\"Calcul de la frequence des mots\")\n fdist = dataset['freq'].sum()\n print('Words list created, size:', len(fdist))\n \n \n print('- mots les plus fréquents:')\n d_list = pd.DataFrame(fdist.most_common(100))\n print(d_list)\n # c = sorted(d_list[0])\n dataset['line'] = dataset['terms'].apply(lambda x: ' '.join(x))\n corpus = dataset['line'].to_list()\n print('corpus len:', len(corpus))\n \n print('- create tf matrix')\n create_Tf_matrix(corpus)\n print('- create tfidf matrix')\n create_TfIdf(corpus)\n\ndef load_tfidf(filename_npz='../data/tfidf/data_tfidf.npz', \\\n filename_features=\"../data/tfidf/data_tfidf_feature_names.pkl\",\\\n vectorizer_path=\"../data/tfidf/tfidf_vectorizer.pickle\",\\\n transform_example=True):\n ''' Load tfidf weights function '''\n ''' Input filenames: tfidf, feature names, vectorizer \n Output: \n - data names, X tfidf weight, vectorizer\n '''\n \n data_names = pd.read_pickle(filename_features)\n X = sparse.load_npz(filename_npz)\n vectorizer_model = pickle.load(open(vectorizer_path,'rb'))\n \n if transform_example==True:\n print('-Example of transform names, with query', )\n query = ['zygnematophyceae zygomycota']\n print('-Example of transform names, with query', query)\n print(' result vector:')\n x_request = vectorizer_model.transform(query)\n x0 = x_request.toarray()\n print(x0.shape)\n print(x0)\n \n return data_names, X, vectorizer_model\n \n\ndef transform_query(vectorizer_model, query):\n ''' transform query in vector '''\n x_request = vectorizer_model.transform(query)\n x0 = x_request.toarray()\n return x0\n \ndef test_dist():\n data_names, X, vectorizer_model = load_tfidf()\n \n print(\" load dataset, and clean\")\n fname = \"../data/tfidf/data_gbif.json\"\n dataset = read_clean_dataset(fname)\n \n print('Data_names:', data_names)\n \n print('- Verification:')\n print('head:')\n display(dataset['terms'].head(20))\n print('tail:')\n print(dataset['terms'].tail())\n\n query = ['zygnematophyceae zygomycota']\n x0 = transform_query(vectorizer_model, query)\n \n kind = ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', \\\n 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',\\\n 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']\n for metric in kind:\n d = distance.cdist(x0, X.toarray(), metric)\n print('metric', metric)\n print(d)\n \n # \n print('Distance chebyshev Versus distance standard euclidian')\n print('-----------------------------------------------------')\n \n metric_lst = ['chebyshev','seuclidean']\n # The Chebyshev distance between two n-vectors u and v \\\n # is the maximum norm-1 distance between their respective elements.\n # seuclidean = Computes the standardized Euclidean distance\n \n test_lst = ['anthocerotophyta',\n 'archaea kingdom accepted',\n 'chromista ochrophyta thalassiosirales']\n for i, test in enumerate(test_lst):\n x0 = transform_query(vectorizer_model, [test])\n for metric in metric_lst:\n d = distance.cdist(x0, X.toarray(), metric)[0]\n print('query:',i+1, '\"', test, '\"', ', metric:', metric)\n # print('metric', metric)\n # print('distances: ', d) \n index_lst = sorted(range(len(d)), key=lambda k: d[k])\n dataset['d'] = d\n # df = dataset.sort_values(by=['d'], ascending=True) \n print(dataset['terms'][index_lst[0:10]])\n print()\n \n \n " ]
[ [ "pandas.read_pickle", "scipy.sparse.load_npz", "pandas.DataFrame", "pandas.set_option", "scipy.sparse.save_npz" ] ]
deepest-stack/backend
[ "e3b9e1f73b5b5160c0529d2a507daf7b87089f16" ]
[ "dl/python/dl/gnn/full_gcn.py" ]
[ "#!/usr/local/greenplum-db-6.10.0/ext/python/bin/python\n# coding=utf-8\n\n\"\"\"class for GCN implementation\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport graphlearn as gl\nimport tensorflow as tf\nfrom .base_gcn import BaseGCN\nfrom .utils import edge_ori\nimport warnings\n\n\nclass FullGCN(BaseGCN):\n \"\"\"\n Args:\n graph: Initialized gl.Graph object.\n output_dim: Output dimension.\n features_num: dict, format as {node_type: features_num}\n node_type: target node type\n sample_paths: list of sample paths, format as [\"(node_type)-[edge_type]->(node_type)\", \"(node_type)<-[edge_type]-(node_type)\"].\n For every target node, there should be at least 1 complete path for each sample path\n categorical_attrs_desc: A dict indicates discrete features, with the format\n {node_type: {feature_column_index : [name, discrete_features_count, embedding_dimension]}}.\n hidden_dim: Hidden dimension.\n hidden_act: action function for hidden layers\n in_drop_rate: Dropout ratio for input data.\n need_dense: whether use dense layer for feature encoder\n dense_act: action function of dense layer in feature encoder\n \"\"\"\n\n def __init__(self,\n graph,\n output_dim,\n features_num,\n node_type,\n sample_paths,\n categorical_attrs_desc=None,\n hidden_dim=16,\n hidden_act='relu',\n in_drop_rate=.0,\n use_input_bn=True,\n need_dense=True,\n dense_act=None,\n mode=\"train\"):\n super(FullGCN, self).__init__(\n graph=graph,\n output_dim=output_dim,\n features_num=features_num,\n node_type=node_type,\n sample_paths=sample_paths,\n neighs_num=None,\n categorical_attrs_desc=categorical_attrs_desc,\n hidden_dim=hidden_dim,\n hidden_act=hidden_act,\n in_drop_rate=in_drop_rate,\n use_input_bn=use_input_bn,\n need_dense=need_dense,\n dense_act=dense_act,\n mode=mode\n )\n warnings.warn(\"Every target node must have at least 1 complete path of each sample path,\"\n \" otherwise use `SampledGCN` instead\")\n\n def receptive_fn(self, t, node_ids):\n alias_list = [['v_%d_%d' % (j+1, i+1) for i in range(self._hops_num)] for j in range(len(self._sample_paths))]\n\n pt = re.compile(\"(<?-)\\[(.+?)\\](->?)\")\n params_list = [[(group[1], edge_ori(group)) for group in pt.findall(sample_path)] for sample_path in self._sample_paths]\n\n sample_func = lambda v, params: \\\n v.outV(params[0]).sample().by('full') if params[1] == 1 \\\n else v.inV(params[0]).sample().by('full')\n\n src, layers = self.graph.V(t, feed=node_ids).alias('v').each(\n lambda v: [\n v.repeat(\n sample_func,\n self._hops_num,\n params_list=params_list[idx],\n alias_list=alias_list[idx]\n ) for idx in range(len(self._sample_paths))]\n ).emit(lambda x: (x[\"v\"], [gl.Layer(nodes=x[name]) for alias in alias_list for name in alias]))\n\n return gl.EgoGraph(src, layers)\n\n\ndef train():\n global g, sample_paths, categorical_attrs_desc\n gcn = FullGCN(\n graph=g,\n output_dim=2,\n features_num={\"1\": 3, \"2\": 2, \"3\": 1},\n node_type=\"1\",\n sample_paths=sample_paths,\n categorical_attrs_desc=categorical_attrs_desc,\n hidden_dim=16,\n hidden_act=tf.nn.relu,\n in_drop_rate=0.1,\n use_input_bn=True,\n need_dense=True,\n dense_act=None\n )\n est = tf.estimator.Estimator(\n model_fn=gcn.model_fn,\n model_dir=\"/gpload/model_dir/full_gcn\"\n )\n\n epochs = 1\n train_sample_seed = lambda: gcn.graph.V(\"1\").shuffle(traverse=True).batch(64).values()\n est.train(\n input_fn=lambda: gcn.input_fn(sample_seed=train_sample_seed, epochs=epochs)\n )\n\n\nif __name__ == \"__main__\":\n from .utils import load_graph\n\n g = load_graph()\n sample_paths = [\n \"(1)-[1]->(1)-[1]->(1)-[1]->(1)\",\n \"(1)-[2]->(2)<-[2]-(1)-[2]->(2)\",\n \"(1)-[3]->(3)<-[3]-(1)-[3]->(3)\"\n ]\n\n categorical_attrs_desc = {\n \"1\": {0: [\"card_level\", 3, 16]},\n \"2\": {0: [\"ip_type\", 4, 16], 1: [\"ip_city\", 10, 16]},\n \"3\": {0: [\"device_type\", 3, 16]}\n }\n train()\n" ]
[ [ "tensorflow.estimator.Estimator" ] ]
kirchhausenlab/incasem
[ "ee9e007c5c04571e547e2fb5af5e800bd2d2b435" ]
[ "incasem/gunpowder/unsqueeze.py" ]
[ "import copy\nfrom typing import List\nimport logging\n\nimport numpy as np\nimport gunpowder as gp\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass Unsqueeze(gp.BatchFilter):\n \"\"\"Unsqueeze a batch at a given axis\n\n Args:\n arrays (List[gp.ArrayKey]): ArrayKeys to unsqueeze.\n axis: Position where the new axis is placed, defaults to 0.\n \"\"\"\n\n def __init__(self, arrays: List[gp.ArrayKey], axis: int = 0):\n self.arrays = arrays\n self.axis = axis\n\n if self.axis != 0:\n raise NotImplementedError(\n 'Unsqueeze only supported for leading dimension')\n\n def setup(self):\n self.enable_autoskip()\n for array in self.arrays:\n self.updates(array, self.spec[array].copy())\n\n def prepare(self, request):\n deps = gp.BatchRequest()\n for array in self.arrays:\n deps[array] = request[array].copy()\n return deps\n\n def process(self, batch, request):\n outputs = gp.Batch()\n for array in self.arrays:\n outputs[array] = copy.deepcopy(batch[array])\n outputs[array].data = np.expand_dims(batch[array].data, self.axis)\n return outputs\n" ]
[ [ "numpy.expand_dims" ] ]
eisber/sarplus
[ "ebc19a0a2297565c41e24413a0d33fbfab93aef3" ]
[ "python/tests/test_pyspark_sar.py" ]
[ "import calendar\nimport datetime\nimport math\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport os\nfrom sklearn.model_selection import train_test_split\n\nfrom pyspark.sql import SparkSession\n\nfrom pysarplus import SARPlus, SARModel\n\ndef assert_compare(expected_id, expected_score, actual_prediction):\n assert expected_id == actual_prediction.id\n assert math.isclose(expected_score, actual_prediction.score, rel_tol=1e-3, abs_tol=1e-3)\n\[email protected](scope=\"module\")\ndef spark(app_name=\"Sample\", url=\"local[*]\", memory=\"1G\"):\n \"\"\"Start Spark if not started\n Args:\n app_name (str): sets name of the application\n url (str): url for spark master\n memory (str): size of memory for spark driver\n \"\"\"\n\n spark = (\n SparkSession.builder.appName(app_name)\n .master(url)\n .config(\"spark.jars\", os.path.dirname(__file__) + \"/../../scala/target/scala-2.11/sarplus_2.11-0.2.5.jar\")\n .config(\"spark.driver.memory\", memory)\n .config(\"spark.sql.shuffle.partitions\", \"1\")\n .config(\"spark.default.parallelism\", \"1\")\n .config(\"spark.sql.crossJoin.enabled\", True)\n .config(\"spark.ui.enabled\", False)\n # .config(\"spark.eventLog.enabled\", True) # only for local debugging, breaks on build server\n .getOrCreate()\n )\n\n return spark\n\[email protected](scope=\"module\")\ndef sample_cache(spark):\n df = spark.read.csv(\"tests/sample-input.txt\", header=True, inferSchema=True)\n\n path = \"tests/sample-output.sar\"\n\n df.coalesce(1)\\\n .write.format(\"eisber.sarplus\")\\\n .mode(\"overwrite\")\\\n .save(path)\n\n return path\n\[email protected](scope=\"module\")\ndef header():\n header = {\n \"col_user\": \"UserId\",\n \"col_item\": \"MovieId\",\n \"col_rating\": \"Rating\",\n \"col_timestamp\": \"Timestamp\",\n }\n return header\n\[email protected](scope=\"module\")\ndef pandas_dummy_dataset(header):\n \"\"\"Load sample dataset in pandas for testing; can be used to create a Spark dataframe\n Returns:\n single Pandas dataframe\n \"\"\"\n ratings_dict = {\n header[\"col_user\"]: [1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3],\n header[\"col_item\"]: [1, 2, 3, 4, 1, 2, 7, 8, 9, 10, 1, 2],\n header[\"col_rating\"]: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n }\n return pd.DataFrame(ratings_dict)\n\[email protected]\ndef test_good(spark, sample_cache):\n model = SARModel(sample_cache)\n y = model.predict([0, 1], [10, 20], top_k=10, remove_seen=False)\n\n assert_compare(0, 5, y[0])\n assert_compare(1, 44, y[1])\n assert_compare(2, 64, y[2])\n\[email protected]\ndef test_good_less(spark, sample_cache):\n model = SARModel(sample_cache)\n y = model.predict([0, 2], [10, 3], top_k=5, remove_seen=False)\n\n assert_compare(0, 1, y[0])\n assert_compare(1, 11.6, y[1])\n assert_compare(2, 12.3, y[2])\n\[email protected]\ndef test_good_require_sort(spark, sample_cache):\n model = SARModel(sample_cache)\n y = model.predict([1, 0], [20, 10], top_k=10, remove_seen=False)\n\n assert_compare(0, 5, y[0])\n assert_compare(1, 44, y[1])\n assert_compare(2, 64, y[2])\n\n assert 3 == len(y)\n\[email protected]\ndef test_good_require_sort_remove_seen(spark, sample_cache):\n model = SARModel(sample_cache)\n y = model.predict([1, 0], [20, 10], top_k=10, remove_seen=True)\n\n assert_compare(2, 64, y[0])\n assert 1 == len(y)\n\[email protected]\ndef test_pandas(spark, sample_cache):\n item_scores = pd.DataFrame([(0, 2.3), (1, 3.1)], columns=[\"itemID\", \"score\"])\n\n model = SARModel(sample_cache)\n y = model.predict(item_scores[\"itemID\"].values, item_scores[\"score\"].values, top_k=10, remove_seen=False)\n\n assert_compare(0, 0.85, y[0])\n assert_compare(1, 6.9699, y[1])\n assert_compare(2, 9.92, y[2])\n\[email protected]\ndef test_e2e(spark, pandas_dummy_dataset, header):\n sar = SARPlus(spark, **header)\n \n df = spark.createDataFrame(pandas_dummy_dataset)\n sar.fit(df) \n\n # assert 4*4 + 32 == sar.item_similarity.count()\n\n # print(sar.item_similarity\n # .toPandas()\n # .pivot_table(index='i1', columns='i2', values='value'))\n\n test_df = spark.createDataFrame(pd.DataFrame({\n header['col_user']: [3],\n header['col_item']: [2]\n }))\n\n r1 = sar.recommend_k_items_slow(test_df, top_k=3, remove_seen=False)\\\n .toPandas()\\\n .sort_values([header['col_user'], header['col_item']])\\\n .reset_index(drop=True)\n\n r2 = sar.recommend_k_items(test_df, \"tests/test_e2e_cache\", top_k=3, n_user_prediction_partitions=2, remove_seen=False)\\\n .toPandas()\\\n .sort_values([header['col_user'], header['col_item']])\\\n .reset_index(drop=True)\n\n assert (r1.iloc[:,:2] == r2.iloc[:,:2]).all().all()\n assert np.allclose(\n r1.score.values,\n r2.score.values,\n 1e-3\n )\n\[email protected](scope=\"module\")\ndef pandas_dummy(header):\n ratings_dict = {\n header[\"col_user\"]: [1, 1, 1, 1, 2, 2, 2, 2, 2, 2],\n header[\"col_item\"]: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n header[\"col_rating\"]: [1, 2, 3, 4, 5, 1, 2, 3, 4, 5],\n }\n df = pd.DataFrame(ratings_dict)\n return df\n\n\[email protected](scope=\"module\")\ndef pandas_dummy_timestamp(pandas_dummy, header):\n time = 1535133442\n time_series = [time + 20 * i for i in range(10)]\n df = pandas_dummy\n df[header[\"col_timestamp\"]] = time_series\n return df\n\n\[email protected](scope=\"module\")\ndef train_test_dummy_timestamp(pandas_dummy_timestamp):\n return train_test_split(pandas_dummy_timestamp, test_size=0.2, random_state=0)\n\n\[email protected](scope=\"module\")\ndef demo_usage_data(header, sar_settings):\n # load the data\n data = pd.read_csv(sar_settings[\"FILE_DIR\"] + \"demoUsage.csv\")\n data[\"rating\"] = pd.Series([1] * data.shape[0])\n data = data.rename(\n columns={\n \"userId\": header[\"col_user\"],\n \"productId\": header[\"col_item\"],\n \"rating\": header[\"col_rating\"],\n \"timestamp\": header[\"col_timestamp\"],\n }\n )\n\n # convert timestamp\n data[header[\"col_timestamp\"]] = data[header[\"col_timestamp\"]].apply(\n lambda s: float(\n calendar.timegm(\n datetime.datetime.strptime(s, \"%Y/%m/%dT%H:%M:%S\").timetuple()\n )\n )\n )\n\n return data\n\n\[email protected](scope=\"module\")\ndef demo_usage_data_spark(spark, demo_usage_data, header):\n data_local = demo_usage_data[[x[1] for x in header.items()]]\n # TODO: install pyArrow in DS VM\n # spark.conf.set(\"spark.sql.execution.arrow.enabled\", \"true\")\n data = spark.createDataFrame(data_local)\n return data\n\n\[email protected](scope=\"module\")\ndef sar_settings():\n return {\n # absolute tolerance parameter for matrix equivalence in SAR tests\n \"ATOL\": 1e-8,\n # directory of the current file - used to link unit test data\n \"FILE_DIR\": \"http://recodatasets.blob.core.windows.net/sarunittest/\",\n # user ID used in the test files (they are designed for this user ID, this is part of the test)\n \"TEST_USER_ID\": \"0003000098E85347\",\n }\n\n\[email protected](\n \"similarity_type, timedecay_formula\", [(\"jaccard\", False), (\"lift\", True)]\n)\ndef test_fit(spark, similarity_type, timedecay_formula, train_test_dummy_timestamp, header):\n model = SARPlus(spark, **header)\n \n trainset, testset = train_test_dummy_timestamp\n\n df = spark.createDataFrame(trainset)\n df.write.mode(\"overwrite\").saveAsTable(\"trainset\")\n\n df = spark.table(\"trainset\")\n\n model.fit(df, \n timedecay_formula=timedecay_formula,\n similarity_type=similarity_type)\n\n\n\"\"\"\nMain SAR tests are below - load test files which are used for both Scala SAR and Python reference implementations\n\"\"\"\n\n# Tests 1-6\[email protected](\n \"threshold,similarity_type,file\",\n [\n (1, \"cooccurrence\", \"count\"),\n (1, \"jaccard\", \"jac\"),\n (1, \"lift\", \"lift\"),\n (3, \"cooccurrence\", \"count\"),\n (3, \"jaccard\", \"jac\"),\n (3, \"lift\", \"lift\"),\n ],\n)\ndef test_sar_item_similarity(\n spark, threshold, similarity_type, file, demo_usage_data, sar_settings, header\n):\n\n model = SARPlus(spark, **header)\n\n df = spark.createDataFrame(demo_usage_data)\n model.fit(df, \n timedecay_formula=False,\n time_decay_coefficient=30,\n time_now=None,\n threshold=threshold,\n similarity_type=similarity_type)\n\n # reference\n item_similarity_ref = pd.read_csv(sar_settings[\"FILE_DIR\"] + \"sim_\" + file + str(threshold) + \".csv\")\n\n item_similarity_ref = pd.melt(item_similarity_ref,\n item_similarity_ref.columns[0],\n item_similarity_ref.columns[1:],\n 'i2',\n 'value')\n item_similarity_ref.columns = ['i1', 'i2', 'value']\n\n item_similarity_ref = item_similarity_ref[item_similarity_ref.value > 0]\\\n .sort_values(['i1', 'i2'])\\\n .reset_index(drop=True)\\\n\n # actual\n item_similarity = model.item_similarity\\\n .toPandas()\\\n .sort_values(['i1', 'i2'])\\\n .reset_index(drop=True)\n\n if similarity_type is \"cooccurrence\":\n assert((item_similarity_ref == item_similarity).all().all())\n else:\n assert((item_similarity.iloc[:,:1] == item_similarity_ref.iloc[:,:1]).all().all())\n\n assert np.allclose(\n item_similarity.value.values,\n item_similarity_ref.value.values\n )\n\n# Test 7\ndef test_user_affinity(spark, demo_usage_data, sar_settings, header):\n time_now = demo_usage_data[header[\"col_timestamp\"]].max()\n\n model = SARPlus(spark, **header)\n\n df = spark.createDataFrame(demo_usage_data)\n model.fit(df, \n timedecay_formula=True,\n time_decay_coefficient=30,\n time_now=time_now,\n similarity_type=\"cooccurrence\")\n\n user_affinity_ref = pd.read_csv(sar_settings[\"FILE_DIR\"] + \"user_aff.csv\")\n user_affinity_ref = pd.melt(user_affinity_ref, user_affinity_ref.columns[0], user_affinity_ref.columns[1:], 'ItemId', 'Rating')\n user_affinity_ref = user_affinity_ref[user_affinity_ref.Rating > 0]\\\n .reset_index(drop=True)\n\n # construct dataframe with test user id we'd like to get the affinity for\n df_test = spark.createDataFrame(pd.DataFrame({header['col_user']:[sar_settings[\"TEST_USER_ID\"]]}))\n user_affinity = model.get_user_affinity(df_test).toPandas().reset_index(drop=True)\n\n # verify the that item ids are the same\n assert (user_affinity[header['col_item']] == user_affinity_ref.ItemId).all()\n\n assert np.allclose(\n user_affinity_ref[header['col_rating']].values,\n user_affinity['Rating'].values,\n atol=sar_settings[\"ATOL\"]\n )\n\n\n# Tests 8-10\[email protected](\n \"threshold,similarity_type,file\",\n [(3, \"cooccurrence\", \"count\"), (3, \"jaccard\", \"jac\"), (3, \"lift\", \"lift\")],\n)\ndef test_userpred(\n spark, threshold, similarity_type, file, header, sar_settings, demo_usage_data\n):\n time_now = demo_usage_data[header[\"col_timestamp\"]].max()\n\n test_id = '{0}_{1}_{2}'.format(threshold, similarity_type, file)\n\n model = SARPlus(spark, **header, table_prefix=test_id)\n\n df = spark.createDataFrame(demo_usage_data)\n model.fit(df, \n timedecay_formula=True,\n time_decay_coefficient=30,\n time_now=time_now,\n threshold=threshold,\n similarity_type=similarity_type)\n\n url = (sar_settings[\"FILE_DIR\"]\n + \"userpred_\"\n + file\n + str(threshold)\n + \"_userid_only.csv\")\n\n pred_ref = pd.read_csv(url)\n pred_ref = pd.wide_to_long(pred_ref, ['rec','score'], 'user', 'idx')\\\n .sort_values('score', ascending=False)\\\n .reset_index(drop=True)\n\n # Note: it's important to have a separate cache_path for each run as they're interferring with each other\n pred = model.recommend_k_items(\n spark.createDataFrame(demo_usage_data[\n demo_usage_data[header[\"col_user\"]] == sar_settings[\"TEST_USER_ID\"]\n ]),\n cache_path='test_userpred-' + test_id,\n top_k=10,\n n_user_prediction_partitions=1)\n\n pred = pred.toPandas()\\\n .sort_values('score', ascending=False)\\\n .reset_index(drop=True)\n\n assert (pred.MovieId.values == pred_ref.rec.values).all()\n assert np.allclose(pred.score.values, pred_ref.score.values, atol=sar_settings[\"ATOL\"])" ]
[ [ "pandas.read_csv", "pandas.Series", "numpy.allclose", "pandas.wide_to_long", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "pandas.melt" ] ]
moeyensj/atm
[ "0523600cf44423a1ef72ca40fff29bbfbe1281a8", "0523600cf44423a1ef72ca40fff29bbfbe1281a8" ]
[ "atm/functions/tests/test_blackbody.py", "atm/functions/tests/test_optical.py" ]
[ "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport numpy as np\nfrom astropy import constants as C\nfrom astropy import units as u\nfrom astropy.modeling.blackbody import blackbody_lambda\nfrom astropy.modeling.blackbody import blackbody_nu\n\nfrom ..blackbody import calcPlanckLambda\nfrom ..blackbody import calcPlanckNu\n\n# Test calcPlanckLambda against Astropy\ndef test_calcPlanckLambda():\n lambd = np.linspace(400*10**-9, 700*10**-9, 1000)\n astropyPlanck = blackbody_lambda(lambd*u.m, 5778*u.K).to(u.W*u.rad**-2*u.m**-3).value\n np.testing.assert_allclose(calcPlanckLambda(lambd, 5778),\n astropyPlanck,\n rtol=1e-5)\n\n# Test calcPlanckNu against Astropy\ndef test_calcPlanckNu():\n nu = np.linspace(C.c.value/(400*10**-9), C.c.value/(700*10**-9), 1000)\n astropyPlanck = blackbody_nu(nu*u.s**-1, 5778*u.K).to(u.W*u.rad**-2*u.m**-2*u.s).value\n np.testing.assert_allclose(calcPlanckNu(nu, 5778),\n astropyPlanck, \n rtol=1e-5)", "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport numpy as np\nfrom numpy import testing as test\n\nfrom ..optical import calcH\nfrom ..optical import calcD\nfrom ..optical import calcPv\n\n# Test calcH\ndef test_calcH():\n # Data taken from:\n # Reference\n # ---------\n # Alan W. Harris and Alan W. Harris, 1997: On the Revision of Radiometric Albedos and Diameters of Asteroids\n # https://www.sciencedirect.com/science/article/pii/S001910359695664X?via%3Dihub\n D = np.array([99.66e3, 19.7e3, 11.19e3, 101.60e3, 5.09e3])\n p_v = np.array([0.167, 0.220, 0.354, 0.062, 0.098])\n H = np.array([7.57, 10.79, 11.50, 8.60, 14.60])\n\n test.assert_allclose(H, calcH(D, p_v), rtol=0.01)\n\n# Test calcD\ndef test_calcD():\n # Data taken from:\n # Reference\n # ---------\n # Alan W. Harris and Alan W. Harris, 1997: On the Revision of Radiometric Albedos and Diameters of Asteroids\n # https://www.sciencedirect.com/science/article/pii/S001910359695664X?via%3Dihub\n D = np.array([99.66e3, 19.7e3, 11.19e3, 101.60e3, 5.09e3])\n p_v = np.array([0.167, 0.220, 0.354, 0.062, 0.098])\n H = np.array([7.57, 10.79, 11.50, 8.60, 14.60])\n\n test.assert_allclose(D, calcD(H, p_v), rtol=0.01)\n\n# Test calcPv\ndef test_calcPv():\n # Data taken from:\n # Reference\n # ---------\n # Alan W. Harris and Alan W. Harris, 1997: On the Revision of Radiometric Albedos and Diameters of Asteroids\n # https://www.sciencedirect.com/science/article/pii/S001910359695664X?via%3Dihub\n D = np.array([99.66e3, 19.7e3, 11.19e3, 101.60e3, 5.09e3])\n p_v = np.array([0.167, 0.220, 0.354, 0.062, 0.098])\n H = np.array([7.57, 10.79, 11.50, 8.60, 14.60])\n\n test.assert_allclose(p_v, calcPv(D, H), rtol=0.01)\n\n \n" ]
[ [ "numpy.linspace" ], [ "numpy.array" ] ]
iamacityzen/Project---Data-handling-with-Pandas.
[ "123f0427486a0f0807f6e53f1115ae07437add17" ]
[ "code.py" ]
[ "# --------------\n#Importing header files\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#Path of the file\r\npath\r\ndata = pd.read_csv(path)\r\n#Code starts here\r\ndata.rename(columns = {'Total' : 'Total_Medals'}, inplace = True)\r\ndata.head(10)\r\n\r\n\r\n\n\n\n# --------------\n\r\n\r\n\r\n\r\n\r\ndata['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'], 'Summer','Winter')\r\ndata['Better_Event'] = np.where(data['Total_Summer']==data['Total_Winter'], 'Both',data['Better_Event'])\r\n\r\n#Code starts here\r\nprint(data['Better_Event'])\r\n\r\nbetter_event = 'Summer'\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\ntop_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]\r\ntop_countries.drop(top_countries.tail(1).index,inplace = True)\r\ndef top_ten(top_countries,parameters):\r\n return list(top_countries.nlargest(10,parameters)['Country_Name'])\r\ntop_10_summer = top_ten(top_countries,'Total_Summer') \r\ntop_10_winter = top_ten(top_countries,'Total_Winter') \r\ntop_10 = top_ten(top_countries,'Total_Medals')\r\ncommon = list(set(top_10_summer).intersection(set(top_10_winter)).intersection(set\r\n(top_10))) \r\n\r\n\n\n\n# --------------\n#Code starts here\r\nsummer_df = data[data['Country_Name'].isin(top_10_summer)]\r\nwinter_df = data[data['Country_Name'].isin(top_10_winter)]\r\ntop_df = data[data['Country_Name'].isin(top_10)]\r\n\n\n\n# --------------\n#Code starts here\r\n\r\n\r\n\r\n\r\n\r\n\r\nsummer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']\r\nsummer_max_ratio=max(summer_df['Golden_Ratio'])\r\nsummer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']\r\nwinter_df['Golden_Ratio']=winter_df['Gold_Winter']/summer_df['Total_Winter']\r\nwinter_max_ratio=0.40\r\nwinter_country_gold=summer_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']\r\ntop_df['Golden_Ratio']=summer_df['Gold_Total']/summer_df['Total_Medals']\r\ntop_max_ratio=max(top_df['Golden_Ratio'])\r\ntop_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']\r\n\r\n\r\n\r\n\r\n\n\n\n# --------------\n\r\n\r\n\r\n\r\n#Code starts here\r\ndata_1 = data[:-1]\r\ndata_1['Total_Points'] = 3*data_1['Gold_Total'] + 2*data_1['Silver_Total'] + data_1['Bronze_Total']\r\nmost_points=max(data_1['Total_Points'])\r\nbest_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']\n\n\n# --------------\n#Code starts here\r\nbest = data[data['Country_Name'] == best_country]\r\nbest = best[['Gold_Total','Silver_Total','Bronze_Total']]\r\nbest.plot.bar()\r\n\n\n\n" ]
[ [ "pandas.read_csv", "numpy.where" ] ]
KalinNonchev/kipoiseq
[ "38d1134885e401198acd3883286dc55627cf12a6" ]
[ "kipoiseq/transforms/transforms.py" ]
[ "from __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport numpy as np\nfrom kipoiseq.transforms import functional as F\nfrom kipoiseq.utils import DNA, parse_alphabet, parse_dtype\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n\n # Arguments\n\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\n# numpy wrappers\n\nclass DummyAxis(object):\n \"\"\"np.expand_dims wrapper - Insert a dummy axis (calls np.expand_dims)\n \"\"\"\n\n def __init__(self, axis=None):\n self.axis = axis\n\n def __call__(self, x):\n if self.axis is not None:\n return np.expand_dims(x, self.axis)\n else:\n return x\n\n\nclass SwapAxes(object):\n \"\"\"np.swapaxes wrapper\n\n If any if the axis is None, do nothing.\n \"\"\"\n\n def __init__(self, axis1=None, axis2=None):\n self.axis1 = axis1\n self.axis2 = axis2\n\n def __call__(self, x):\n if self.axis1 is None or self.axis2 is None:\n return x\n else:\n return np.swapaxes(x, self.axis1, self.axis2)\n\n# Intervals\n\n\nclass ResizeInterval(object):\n \"\"\"Resize the interval\n \"\"\"\n\n def __init__(self, width, anchor='center'):\n self.width = width\n self.anchor = anchor\n\n def __call__(self, interval):\n return F.resize_interval(interval, self.width, self.anchor)\n\n\n# Sequences\n\nclass OneHot(object):\n \"\"\"One-hot encode the sequence\n\n # Arguments\n alphabet: alphabet to use for the one-hot encoding. This defines the order of the one-hot encoding.\n Can either be a list or a string: 'ACGT' or ['A, 'C', 'G', 'T']\n neutral_alphabet: which element to use\n neutral_value: value of the neutral element\n dtype: defines the numpy dtype of the returned array.\n alphabet_axis: axis along which the alphabet runs (e.g. A,C,G,T for DNA)\n dummy_axis: defines in which dimension a dummy axis should be added. None if no dummy axis is required.\n \"\"\"\n\n def __init__(self, alphabet=DNA, neutral_alphabet='N', neutral_value=0.25, dtype=None):\n self.alphabet = alphabet\n if isinstance(neutral_alphabet, str):\n neutral_alphabet = [neutral_alphabet]\n self.neutral_alphabet = neutral_alphabet\n self.neutral_value = neutral_value\n self.dtype = dtype\n\n def __call__(self, seq):\n if self.alphabet == DNA and self.neutral_alphabet == ['N'] and self.neutral_value == 0.25:\n return F.one_hot_dna(seq, self.dtype)\n else:\n return F.one_hot(seq,\n alphabet=self.alphabet,\n neutral_alphabet=self.neutral_alphabet,\n neutral_value=self.neutral_value,\n dtype=self.dtype)\n\n\nclass ReorderedOneHot(object):\n \"\"\"Flexible one-hot encoding class that can account for\n many different one-hot encoding formats.\n\n # Arguments\n alphabet: alphabet to use for the one-hot encoding. This defines the order of the one-hot encoding.\n Can either be a list or a string: 'ACGT' or ['A, 'C', 'G', 'T']\n neutral_alphabet: (single string character) neutral element representing\n neutral_value: value of the neutral element\n dtype: defines the numpy dtype of the returned array.\n alphabet_axis: axis along which the alphabet runs (e.g. A,C,G,T for DNA)\n dummy_axis: defines in which dimension a dummy axis should be added. None if no dummy axis is required.\n\n Examples (`None` = sequence axis):\n - `(None, 4)`: default\n - `(4, None)`: `alphabet_axis=0`\n - `(4, 1, None)`: `alphabet_axis=0, dummy_axis=1`\n \"\"\"\n\n def __init__(self,\n alphabet=DNA,\n neutral_alphabet='N',\n neutral_value=0.25,\n dtype=None,\n alphabet_axis=1,\n dummy_axis=None):\n # make sure the alphabet axis and the dummy axis are valid:\n if dummy_axis is not None:\n if alphabet_axis == dummy_axis:\n raise ValueError(\"dummy_axis can't be the same as dummy_axis\")\n if not (dummy_axis >= 0 and dummy_axis <= 2):\n raise ValueError(\"dummy_axis can be either 0,1 or 2\")\n assert alphabet_axis >= 0 and (alphabet_axis < 2 or (\n alphabet_axis <= 2 and dummy_axis is not None))\n\n self.alphabet_axis = alphabet_axis\n self.dummy_axis = dummy_axis\n self.alphabet = parse_alphabet(alphabet)\n self.dtype = parse_dtype(dtype)\n self.neutral_alphabet = neutral_alphabet\n self.neutral_value = neutral_value\n\n # set the transform parameters correctly\n if dummy_axis is not None and dummy_axis < 2:\n # dummy axis is added somewhere in the middle, so the alphabet axis is at the end now\n existing_alphabet_axis = 2\n else:\n # alphabet axis stayed the same\n existing_alphabet_axis = 1\n\n # check if no swapping needed\n if existing_alphabet_axis == self.alphabet_axis:\n self.alphabet_axis = None\n\n # how to transform the input\n self.transform = Compose([\n OneHot(self.alphabet,\n neutral_alphabet=self.neutral_alphabet,\n neutral_value=self.neutral_value,\n dtype=self.dtype), # one-hot-encode\n DummyAxis(self.dummy_axis), # optionally inject the dummy axis\n # put the alphabet axis elsewhere\n SwapAxes(existing_alphabet_axis, self.alphabet_axis),\n ])\n\n def __call__(self, seq):\n return self.transform(seq)\n\n def get_output_shape(self, seqlen=None):\n \"\"\"Compute the output shape\n \"\"\"\n if self.dummy_axis is not None and self.alphabet_axis == self.dummy_axis:\n raise ValueError(\"dummy_axis can't be the same as dummy_axis\")\n\n # default\n output_shape = (seqlen, len(self.alphabet))\n alphabet_axis = self.alphabet_axis\n\n if self.dummy_axis is not None and self.dummy_axis < 2:\n # dummy axis is added somewhere in the middle, so the alphabet axis is at the end now\n existing_alphabet_axis = 2\n else:\n existing_alphabet_axis = 1\n\n if existing_alphabet_axis == alphabet_axis:\n alphabet_axis = None\n\n # inject the dummy axis\n if self.dummy_axis is not None:\n output_shape = output_shape[:self.dummy_axis] + \\\n (1,) + output_shape[self.dummy_axis:]\n\n # swap axes\n if alphabet_axis is not None:\n sh = list(output_shape)\n sh[alphabet_axis], sh[existing_alphabet_axis] = sh[existing_alphabet_axis], sh[alphabet_axis]\n output_shape = tuple(sh)\n\n return output_shape\n\n\n# Splicing\n\nclass SplitSplicingSeq(object):\n \"\"\"Split returned splice sequence (exon with flanking intron) to required format.\n It splits into ['intron5prime', 'acceptor', 'exon', 'donor', 'intron3prime'].\n 'intron5prime' is the intron 5' of the exon, while 'intron3prime' is from the 3'.\n\n # Arguments\n exon_cut_l: when extract exon feature, how many base pair to cut out at the begining of an exon\n exon_cut_r: when extract exon feature, how many base pair to cut out at the end of an exon\n (cut out the part that is considered as acceptor site or donor site)\n intron5prime_cut: how many bp to cut out at the end of acceptor intron that consider as acceptor site\n intron3prime_cut: how many bp to cut out at the end of donor intron that consider as donor site\n acceptor_intron_len: what length in acceptor intron to consider for acceptor site model\n acceptor_exon_len: what length in acceptor exon to consider for acceptor site model\n donor_intron_len: what length in donor intron to consider for donor site model\n donor_exon_len: what length in donor exon to consider for donor site model\n \"\"\"\n\n def __init__(self,\n exon_cut_l=0,\n exon_cut_r=0,\n intron5prime_cut=6,\n intron3prime_cut=6,\n acceptor_intron_len=50,\n acceptor_exon_len=3,\n donor_exon_len=5,\n donor_intron_len=13\n ):\n\n self.exon_cut_l = exon_cut_l\n self.exon_cut_r = exon_cut_r\n self.intron5prime_cut = intron5prime_cut\n self.intron3prime_cut = intron3prime_cut\n self.acceptor_intron_len = acceptor_intron_len\n self.acceptor_exon_len = acceptor_exon_len\n self.donor_exon_len = donor_exon_len\n self.donor_intron_len = donor_intron_len\n\n def __call__(self,\n x,\n intron5prime_len,\n intron3prime_len\n ):\n \"\"\"\n # Arguments\n x: a sequence to split\n intron5prime_len: 5' intronic sequence length to take.\n intron3prime_len: 5' intronic sequence length to take.\n \"\"\"\n lackl = self.acceptor_intron_len - \\\n intron5prime_len # need to pad N if left seq not enough long\n if lackl >= 0:\n x = \"N\" * (lackl + 1) + x\n intron5prime_len += lackl + 1\n lackr = self.donor_intron_len - intron3prime_len\n if lackr >= 0:\n x = x + \"N\" * (lackr + 1)\n intron3prime_len += lackr + 1\n\n intron5prime = x[:intron5prime_len - self.intron5prime_cut]\n acceptor = x[(intron5prime_len - self.acceptor_intron_len)\n :(intron5prime_len + self.acceptor_exon_len)]\n exon = x[(intron5prime_len + self.exon_cut_l)\n :(-intron3prime_len - self.exon_cut_r)]\n donor = x[(-intron3prime_len - self.donor_exon_len)\n :(-intron3prime_len + self.donor_intron_len)]\n intron3prime = x[-intron3prime_len + self.intron3prime_cut:]\n\n import warnings\n if donor[self.donor_exon_len:self.donor_exon_len + 2] != \"GT\":\n warnings.warn(\"None GT donor\", UserWarning)\n if acceptor[self.acceptor_intron_len - 2:self.acceptor_intron_len] != \"AG\":\n warnings.warn(\"None AG donor\", UserWarning)\n if len(exon) == 0:\n exon = 'N'\n\n return {\n \"intron5prime\": intron5prime,\n \"acceptor\": acceptor,\n \"exon\": exon,\n \"donor\": donor,\n \"intron3prime\": intron3prime\n }\n" ]
[ [ "numpy.swapaxes", "numpy.expand_dims" ] ]
acdh-oeaw/thunau-old
[ "a3023885470e80f7312e43561028398bffd713e0" ]
[ "importLegacyData.py" ]
[ "\n# coding: utf-8\n\n# In[33]:\n\n\nimport pandas as pd\nfrom dateutil import parser\n\n\n# In[34]:\n\n\nfile = \"data/thunau_export_20170626.csv\"\n\n\n# In[35]:\n\n\ndf = pd.read_csv(file)\n\n\n# In[37]:\n\n\ntroubles = []\nfor index, row in df.iterrows():\n doc, _ = Document.objects.get_or_create(legacy_id=row['ID'])\n doc.filename = row['Dateiname']\n doc.entry_order = row['Ordnungskriterium/Eingabe']\n vocabs_media, _ = SkosConcept.objects.get_or_create(pref_label=row['Medium'])\n vocabs_media_scheme, _ = SkosConceptScheme.objects.get_or_create(dc_title='Medium')\n vocabs_media.scheme.set([vocabs_media_scheme])\n vocabs_media.save()\n doc.medium = vocabs_media\n vocabs_analogformat, _ = SkosConcept.objects.get_or_create(pref_label=row['Analoges Format'])\n vocabs_analogformat_scheme, _ = SkosConceptScheme.objects.get_or_create(dc_title='Analoges Format')\n vocabs_analogformat.scheme.set([vocabs_analogformat_scheme])\n vocabs_analogformat.save()\n doc.analogue_format = vocabs_analogformat\n try:\n names = row['Autor'].split(';')\n except:\n names = row['Autor']\n try:\n for x in names:\n name = x.split(' ')[-1]\n forename = x.split(' ')[-2]\n author, _ = Person.objects.get_or_create(name=name, forename=forename)\n doc.author.add(author)\n doc.save()\n except:\n troubles.append({'id': row['ID'], 'troublefield': 'Autor', 'value': row['Autor']})\n \n institution, _ = Institution.objects.get_or_create(name=row['Institution'])\n doc.institution.add(institution)\n \n doc.date_analogue = row['Analoges Datum']\n try:\n doc.date_digitization = parser.parse(row['Datum der Digitalisierung'])\n except:\n troubles.append({'id': row['ID'], 'troublefield': row['Datum der Digitalisierung']})\n vocabs_digitalformat, _ = SkosConcept.objects.get_or_create(pref_label=row['Speicherformat'])\n vocabs_digitalformat_scheme, _ = SkosConceptScheme.objects.get_or_create(dc_title='Ordnungskriterium/Eingabe')\n vocabs_digitalformat.scheme.set = vocabs_digitalformat_scheme\n vocabs_digitalformat.save()\n doc.digital_format = vocabs_digitalformat\n doc.note = row['Anmerkung']\n doc.content = row['Inhalt']\n vocabs_group, _ = SkosConcept.objects.get_or_create(pref_label=row['Gruppe'])\n vocabs_group_scheme, _ = SkosConceptScheme.objects.get_or_create(dc_title='Gruppe')\n vocabs_group.scheme.set([vocabs_group_scheme])\n vocabs_group.save()\n doc.topic_group = vocabs_group\n doc.combination = row['Kombination']\n doc.location_id = row['Fundnummer in FDB']\n temp_place, _ = Place.objects.get_or_create(name=row['KG/Areal'])\n doc.place = temp_place\n doc.location_digitized_object = row['Aufbewahrung Datei']\n doc.location_analogue = row['Standort analog']\n names = row['Bearbeiter Digitalisierung'].split(',')\n temp_curator, _ = Person.objects.get_or_create(\n name=names[0].split(' ')[1],\n forename=names[0].split(' ')[0]\n )\n temp_inst_a, _ = Institution.objects.get_or_create(name=names[1])\n temp_curator.institution = temp_inst_a\n temp_curator.save()\n doc.curator = temp_curator\n doc.filesize = row['Dateigröße KB']\n temp_dig_inst, _ = Institution.objects.get_or_create(name=row['Ort der Digitalisierung'])\n doc.place_digizization = temp_dig_inst\n doc.path = row['OREA_Doku_Plattform Thunau am Kamp_Dateipfad']\n doc.amendments = row['Ergänzungen']\n doc.save()\n doc.path = row['OREA_Doku_Plattform Thunau am Kamp_Dateipfad']\n\n\n# In[39]:\n\n\nimport json\n\n\n# In[41]:\n\n\nwith open('troubles.json', 'w') as fp:\n json.dump(troubles, fp)\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
GalBenZvi/niworkflows
[ "14c54533a0300ee760b285f7e4d0a88593b4c075" ]
[ "niworkflows/interfaces/surf.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n#\n# Copyright 2021 The NiPreps Developers <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# We support and encourage derived works from this project, please read\n# about our expectations at\n#\n# https://www.nipreps.org/community/licensing/\n#\n\"\"\"Handling surfaces.\"\"\"\nimport os\nimport re\nfrom pathlib import Path\nfrom collections import defaultdict\n\nimport numpy as np\nimport nibabel as nb\n\nfrom nipype.utils.filemanip import fname_presuffix\nfrom nipype.interfaces.base import (\n BaseInterfaceInputSpec,\n TraitedSpec,\n DynamicTraitedSpec,\n SimpleInterface,\n CommandLine,\n CommandLineInputSpec,\n File,\n traits,\n isdefined,\n InputMultiPath,\n OutputMultiPath,\n Undefined,\n)\n\n\nSECONDARY_ANAT_STRUC = {\n \"smoothwm\": \"GrayWhite\",\n \"pial\": \"Pial\",\n \"midthickness\": \"GrayMid\",\n}\n\n\nclass _NormalizeSurfInputSpec(BaseInterfaceInputSpec):\n in_file = File(mandatory=True, exists=True, desc=\"Freesurfer-generated GIFTI file\")\n transform_file = File(exists=True, desc=\"FSL or LTA affine transform file\")\n\n\nclass _NormalizeSurfOutputSpec(TraitedSpec):\n out_file = File(desc=\"output file with re-centered GIFTI coordinates\")\n\n\nclass NormalizeSurf(SimpleInterface):\n \"\"\"\n Normalize a FreeSurfer-generated GIFTI image.\n\n FreeSurfer includes an offset to the center of the brain volume that is not\n respected by all software packages.\n Normalization involves adding this offset to the coordinates of all\n vertices, and zeroing out that offset, to ensure consistent behavior\n across software packages.\n In particular, this normalization is consistent with the Human Connectome\n Project pipeline (see `AlgorithmSurfaceApplyAffine`_ and\n `FreeSurfer2CaretConvertAndRegisterNonlinear`_), although the the HCP\n may not zero out the offset.\n\n GIFTI files with ``midthickness``/``graymid`` in the name are also updated\n to include the following metadata entries::\n\n {\n AnatomicalStructureSecondary: MidThickness,\n GeometricType: Anatomical\n }\n\n This interface is intended to be applied uniformly to GIFTI surface files\n generated from the ``?h.white``/``?h.smoothwm`` and ``?h.pial`` surfaces,\n as well as externally-generated ``?h.midthickness``/``?h.graymid`` files.\n In principle, this should apply safely to any other surface, although it is\n less relevant to surfaces that don't describe an anatomical structure.\n\n .. _AlgorithmSurfaceApplyAffine: https://github.com/Washington-University/workbench\\\n/blob/1b79e56/src/Algorithms/AlgorithmSurfaceApplyAffine.cxx#L73-L91\n\n .. _FreeSurfer2CaretConvertAndRegisterNonlinear: https://github.com/Washington-University/\\\nPipelines/blob/ae69b9a/PostFreeSurfer/scripts/FreeSurfer2CaretConvertAndRegisterNonlinear.sh\\\n#L147-154\n\n \"\"\"\n\n input_spec = _NormalizeSurfInputSpec\n output_spec = _NormalizeSurfOutputSpec\n\n def _run_interface(self, runtime):\n transform_file = self.inputs.transform_file\n if not isdefined(transform_file):\n transform_file = None\n self._results[\"out_file\"] = normalize_surfs(\n self.inputs.in_file, transform_file, newpath=runtime.cwd\n )\n return runtime\n\n\nclass _Path2BIDSInputSpec(BaseInterfaceInputSpec):\n in_file = File(mandatory=True, desc=\"input GIFTI file\")\n\n\nclass _Path2BIDSOutputSpec(DynamicTraitedSpec):\n extension = traits.Str()\n\n\nclass Path2BIDS(SimpleInterface):\n \"\"\"\n Extract BIDS entities from paths using a pattern.\n\n Default pattern is given for Gifti surfaces.\n\n >>> Path2BIDS(in_file='_fix_surfs0/rh.pial.surf.gii').run().outputs\n <BLANKLINE>\n extension = .surf.gii\n hemi = R\n suffix = pial\n <BLANKLINE>\n\n >>> Path2BIDS(in_file='_fix_surfs0/rh.pial.gii').run().outputs\n <BLANKLINE>\n extension = .gii\n hemi = R\n suffix = pial\n <BLANKLINE>\n\n >>> Path2BIDS(in_file='_fix_surfs0/rh.smoothwm_converted.gii').run().outputs\n <BLANKLINE>\n extension = .gii\n hemi = R\n suffix = smoothwm\n <BLANKLINE>\n\n >>> Path2BIDS(in_file='_fix_surfs0/rh.smoothwm_converted.func.gii').run().outputs\n <BLANKLINE>\n extension = .func.gii\n hemi = R\n suffix = smoothwm\n <BLANKLINE>\n\n \"\"\"\n\n input_spec = _Path2BIDSInputSpec\n output_spec = _Path2BIDSOutputSpec\n _pattern = re.compile(\n r\"(?P<hemi>[lr])h.(?P<suffix>(wm|smoothwm|pial|midthickness|\"\n r\"inflated|vinflated|sphere|flat))[\\w\\d_-]*(?P<extprefix>\\.\\w+)?\"\n )\n _excluded = (\"extprefix\",)\n\n def __init__(self, pattern=None, **inputs):\n \"\"\"Initialize the interface.\"\"\"\n super().__init__(**inputs)\n if pattern:\n self._pattern = re.compile(pattern)\n\n def _outputs(self):\n outputs = self.output_spec()\n outputs.trait_set(\n trait_change_notify=False,\n **{\n entity: Undefined\n for entity in self._pattern.groupindex.keys()\n if entity not in self._excluded\n },\n )\n return outputs\n\n def _run_interface(self, runtime):\n in_file = Path(self.inputs.in_file)\n extension = \"\".join(in_file.suffixes[-((in_file.suffixes[-1] == \".gz\") + 1):])\n info = self._pattern.match(in_file.name[: -len(extension)]).groupdict()\n self._results[\"extension\"] = f\"{info.pop('extprefix', None) or ''}{extension}\"\n self._results.update(info)\n if \"hemi\" in self._results:\n self._results[\"hemi\"] = self._results[\"hemi\"].upper()\n return runtime\n\n\nclass _GiftiNameSourceInputSpec(BaseInterfaceInputSpec):\n in_file = File(mandatory=True, exists=True, desc=\"input GIFTI file\")\n pattern = traits.Str(\n mandatory=True, desc='input file name pattern (must capture named group \"LR\")'\n )\n template = traits.Str(mandatory=True, desc=\"output file name template\")\n template_kwargs = traits.Dict(desc=\"additional template keyword value pairs\")\n\n\nclass _GiftiNameSourceOutputSpec(TraitedSpec):\n out_name = traits.Str(desc=\"(partial) filename formatted according to template\")\n\n\nclass GiftiNameSource(SimpleInterface):\n r\"\"\"\n Construct a new filename for a GIFTI file.\n\n Construct a new filename based on an input filename, a matching pattern,\n and a related template, with optionally additional keywords.\n\n This interface is intended for use with GIFTI files, to generate names\n conforming to Section 9.0 of the `GIFTI Standard`_.\n\n Patterns are expected to have named groups, including one named \"LR\" that\n matches \"l\" or \"r\".\n These groups must correspond to named format elements in the template.\n\n .. testsetup::\n\n >>> open('lh.pial.gii', 'w').close()\n >>> open('rh.fsaverage.gii', 'w').close()\n\n Examples\n --------\n >>> surf_namer = GiftiNameSource()\n >>> surf_namer.inputs.pattern = r'(?P<LR>[lr])h.(?P<surf>\\w+).gii'\n >>> surf_namer.inputs.template = r'{surf}.{LR}.surf'\n >>> surf_namer.inputs.in_file = 'lh.pial.gii'\n >>> res = surf_namer.run()\n >>> res.outputs.out_name\n 'pial.L.surf'\n\n >>> func_namer = GiftiNameSource()\n >>> func_namer.inputs.pattern = r'(?P<LR>[lr])h.(?P<space>\\w+).gii'\n >>> func_namer.inputs.template = r'space-{space}.{LR}.func'\n >>> func_namer.inputs.in_file = 'rh.fsaverage.gii'\n >>> res = func_namer.run()\n >>> res.outputs.out_name\n 'space-fsaverage.R.func'\n\n >>> namer = GiftiNameSource()\n >>> namer.inputs.pattern = r'(?P<LR>[lr])h.(?P<space>\\w+).gii'\n >>> namer.inputs.template = r'space-{space}_density-{density}_hemi-{LR}.func'\n >>> namer.inputs.in_file = 'rh.fsaverage.gii'\n >>> namer.inputs.template_kwargs = {'density': '10k'}\n >>> res = namer.run()\n >>> res.outputs.out_name\n 'space-fsaverage_density-10k_hemi-R.func'\n\n .. testcleanup::\n\n >>> import os\n >>> os.unlink('lh.pial.gii')\n >>> os.unlink('rh.fsaverage.gii')\n\n .. _GIFTI Standard: https://www.nitrc.org/frs/download.php/2871/GIFTI_Surface_Format.pdf\n \"\"\"\n input_spec = _GiftiNameSourceInputSpec\n output_spec = _GiftiNameSourceOutputSpec\n\n def _run_interface(self, runtime):\n in_format = re.compile(self.inputs.pattern)\n in_file = os.path.basename(self.inputs.in_file)\n info = in_format.match(in_file).groupdict()\n info[\"LR\"] = info[\"LR\"].upper()\n if self.inputs.template_kwargs:\n info.update(self.inputs.template_kwargs)\n filefmt = self.inputs.template\n self._results[\"out_name\"] = filefmt.format(**info)\n return runtime\n\n\nclass _GiftiSetAnatomicalStructureInputSpec(BaseInterfaceInputSpec):\n in_file = File(\n mandatory=True, exists=True, desc='GIFTI file beginning with \"lh.\" or \"rh.\"'\n )\n\n\nclass _GiftiSetAnatomicalStructureOutputSpec(TraitedSpec):\n out_file = File(desc=\"output file with updated AnatomicalStructurePrimary entry\")\n\n\nclass GiftiSetAnatomicalStructure(SimpleInterface):\n \"\"\"\n Set AnatomicalStructurePrimary attribute of GIFTI image based on filename.\n\n For files that begin with ``lh.`` or ``rh.``, update the metadata to\n include::\n\n {\n AnatomicalStructurePrimary: (CortexLeft | CortexRight),\n }\n\n If ``AnatomicalStructurePrimary`` is already set, this function has no\n effect.\n\n \"\"\"\n\n input_spec = _GiftiSetAnatomicalStructureInputSpec\n output_spec = _GiftiSetAnatomicalStructureOutputSpec\n\n def _run_interface(self, runtime):\n img = nb.load(self.inputs.in_file)\n if any(nvpair.name == \"AnatomicalStruturePrimary\" for nvpair in img.meta.data):\n out_file = self.inputs.in_file\n else:\n fname = os.path.basename(self.inputs.in_file)\n if fname[:3] in (\"lh.\", \"rh.\"):\n asp = \"CortexLeft\" if fname[0] == \"l\" else \"CortexRight\"\n else:\n raise ValueError(\n \"AnatomicalStructurePrimary cannot be derived from filename\"\n )\n img.meta.data.insert(\n 0, nb.gifti.GiftiNVPairs(\"AnatomicalStructurePrimary\", asp)\n )\n out_file = os.path.join(runtime.cwd, fname)\n img.to_filename(out_file)\n self._results[\"out_file\"] = out_file\n return runtime\n\n\nclass _GiftiToCSVInputSpec(BaseInterfaceInputSpec):\n in_file = File(mandatory=True, exists=True, desc=\"GIFTI file\")\n itk_lps = traits.Bool(False, usedefault=True, desc=\"flip XY axes\")\n\n\nclass _GiftiToCSVOutputSpec(TraitedSpec):\n out_file = File(desc=\"output csv file\")\n\n\nclass GiftiToCSV(SimpleInterface):\n \"\"\"Converts GIfTI files to CSV to make them ammenable to use with\n ``antsApplyTransformsToPoints``.\"\"\"\n\n input_spec = _GiftiToCSVInputSpec\n output_spec = _GiftiToCSVOutputSpec\n\n def _run_interface(self, runtime):\n gii = nb.load(self.inputs.in_file)\n data = gii.darrays[0].data\n\n if self.inputs.itk_lps: # ITK: flip X and Y around 0\n data[:, :2] *= -1\n\n # antsApplyTransformsToPoints requires 5 cols with headers\n csvdata = np.hstack((data, np.zeros((data.shape[0], 3))))\n\n out_file = fname_presuffix(\n self.inputs.in_file, newpath=runtime.cwd, use_ext=False, suffix=\"points.csv\"\n )\n np.savetxt(\n out_file,\n csvdata,\n delimiter=\",\",\n header=\"x,y,z,t,label,comment\",\n fmt=[\"%.5f\"] * 4 + [\"%d\"] * 2,\n )\n self._results[\"out_file\"] = out_file\n return runtime\n\n\nclass _CSVToGiftiInputSpec(BaseInterfaceInputSpec):\n in_file = File(mandatory=True, exists=True, desc=\"CSV file\")\n gii_file = File(mandatory=True, exists=True, desc=\"reference GIfTI file\")\n itk_lps = traits.Bool(False, usedefault=True, desc=\"flip XY axes\")\n\n\nclass _CSVToGiftiOutputSpec(TraitedSpec):\n out_file = File(desc=\"output GIfTI file\")\n\n\nclass CSVToGifti(SimpleInterface):\n \"\"\"Converts CSV files back to GIfTI, after moving vertices with\n ``antsApplyTransformToPoints``.\"\"\"\n\n input_spec = _CSVToGiftiInputSpec\n output_spec = _CSVToGiftiOutputSpec\n\n def _run_interface(self, runtime):\n gii = nb.load(self.inputs.gii_file)\n data = np.loadtxt(\n self.inputs.in_file, delimiter=\",\", skiprows=1, usecols=(0, 1, 2)\n )\n\n if self.inputs.itk_lps: # ITK: flip X and Y around 0\n data[:, :2] *= -1\n\n gii.darrays[0].data = data[:, :3].astype(gii.darrays[0].data.dtype)\n out_file = fname_presuffix(\n self.inputs.gii_file, newpath=runtime.cwd, suffix=\".transformed\"\n )\n gii.to_filename(out_file)\n self._results[\"out_file\"] = out_file\n return runtime\n\n\nclass _SurfacesToPointCloudInputSpec(BaseInterfaceInputSpec):\n in_files = InputMultiPath(\n File(exists=True), mandatory=True, desc=\"input GIfTI files\"\n )\n out_file = File(\"pointcloud.ply\", usedefault=True, desc=\"output file name\")\n\n\nclass _SurfacesToPointCloudOutputSpec(TraitedSpec):\n out_file = File(desc=\"output pointcloud in PLY format\")\n\n\nclass SurfacesToPointCloud(SimpleInterface):\n \"\"\"Converts multiple surfaces into a pointcloud with corresponding normals\n to then apply Poisson reconstruction\"\"\"\n\n input_spec = _SurfacesToPointCloudInputSpec\n output_spec = _SurfacesToPointCloudOutputSpec\n\n def _run_interface(self, runtime):\n from pathlib import Path\n\n giis = [nb.load(g) for g in self.inputs.in_files]\n vertices = np.vstack([g.darrays[0].data for g in giis])\n norms = np.vstack(\n [vertex_normals(g.darrays[0].data, g.darrays[1].data) for g in giis]\n )\n out_file = Path(self.inputs.out_file).resolve()\n pointcloud2ply(vertices, norms, out_file=out_file)\n self._results[\"out_file\"] = str(out_file)\n return runtime\n\n\nclass _PoissonReconInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n mandatory=True,\n argstr=\"--in %s\",\n desc=\"input PLY pointcloud (vertices + normals)\",\n )\n out_file = File(\n argstr=\"--out %s\",\n keep_extension=True,\n name_source=[\"in_file\"],\n name_template=\"%s_avg\",\n desc=\"output PLY triangular mesh\",\n )\n\n\nclass _PoissonReconOutputSpec(TraitedSpec):\n out_file = File(exists=True, desc=\"output PLY triangular mesh\")\n\n\nclass PoissonRecon(CommandLine):\n \"\"\"Runs Poisson Reconstruction on a cloud of points + normals\n given in PLY format.\n See https://github.com/mkazhdan/PoissonRecon\n \"\"\"\n\n input_spec = _PoissonReconInputSpec\n output_spec = _PoissonReconOutputSpec\n _cmd = \"PoissonRecon\"\n\n\nclass _PLYtoGiftiInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=\"input PLY file\")\n surf_key = traits.Str(mandatory=True, desc=\"reference GIfTI file\")\n\n\nclass _PLYtoGiftiOutputSpec(TraitedSpec):\n out_file = File(desc=\"output GIfTI file\")\n\n\nclass PLYtoGifti(SimpleInterface):\n \"\"\"Convert surfaces from PLY to GIfTI\"\"\"\n\n input_spec = _PLYtoGiftiInputSpec\n output_spec = _PLYtoGiftiOutputSpec\n\n def _run_interface(self, runtime):\n from pathlib import Path\n\n meta = {\n \"GeometricType\": \"Anatomical\",\n \"VolGeomWidth\": \"256\",\n \"VolGeomHeight\": \"256\",\n \"VolGeomDepth\": \"256\",\n \"VolGeomXsize\": \"1.0\",\n \"VolGeomYsize\": \"1.0\",\n \"VolGeomZsize\": \"1.0\",\n \"VolGeomX_R\": \"-1.0\",\n \"VolGeomX_A\": \"0.0\",\n \"VolGeomX_S\": \"0.0\",\n \"VolGeomY_R\": \"0.0\",\n \"VolGeomY_A\": \"0.0\",\n \"VolGeomY_S\": \"-1.0\",\n \"VolGeomZ_R\": \"0.0\",\n \"VolGeomZ_A\": \"1.0\",\n \"VolGeomZ_S\": \"0.0\",\n \"VolGeomC_R\": \"0.0\",\n \"VolGeomC_A\": \"0.0\",\n \"VolGeomC_S\": \"0.0\",\n }\n meta[\"AnatomicalStructurePrimary\"] = \"Cortex%s\" % (\n \"Left\" if self.inputs.surf_key.startswith(\"lh\") else \"Right\"\n )\n meta[\"AnatomicalStructureSecondary\"] = SECONDARY_ANAT_STRUC[\n self.inputs.surf_key.split(\".\")[-1]\n ]\n meta[\"Name\"] = \"%s_average.gii\" % self.inputs.surf_key\n\n out_file = Path(runtime.cwd) / meta[\"Name\"]\n out_file = ply2gii(self.inputs.in_file, meta, out_file=out_file)\n self._results[\"out_file\"] = str(out_file)\n return runtime\n\n\nclass _UnzipJoinedSurfacesInputSpec(BaseInterfaceInputSpec):\n in_files = traits.List(\n InputMultiPath(File(exists=True), mandatory=True, desc=\"input GIfTI files\")\n )\n\n\nclass _UnzipJoinedSurfacesOutputSpec(TraitedSpec):\n out_files = traits.List(\n OutputMultiPath(File(exists=True), desc=\"output pointcloud in PLY format\")\n )\n surf_keys = traits.List(traits.Str, desc=\"surface identifier keys\")\n\n\nclass UnzipJoinedSurfaces(SimpleInterface):\n \"\"\"Unpack surfaces by identifier keys\"\"\"\n\n input_spec = _UnzipJoinedSurfacesInputSpec\n output_spec = _UnzipJoinedSurfacesOutputSpec\n\n def _run_interface(self, runtime):\n from pathlib import Path\n\n groups = defaultdict(list)\n in_files = [it for items in self.inputs.in_files for it in items]\n\n for f in in_files:\n bname = Path(f).name\n groups[bname.split(\"_\")[0]].append(f)\n\n self._results[\"out_files\"] = [sorted(els) for els in groups.values()]\n self._results[\"surf_keys\"] = list(groups.keys())\n\n return runtime\n\n\ndef normalize_surfs(in_file, transform_file, newpath=None):\n \"\"\"\n Re-center GIFTI coordinates to fit align to native T1w space.\n\n For midthickness surfaces, add MidThickness metadata\n\n Coordinate update based on:\n https://github.com/Washington-University/workbench/blob/1b79e56/src/Algorithms/AlgorithmSurfaceApplyAffine.cxx#L73-L91\n and\n https://github.com/Washington-University/Pipelines/blob/ae69b9a/PostFreeSurfer/scripts/FreeSurfer2CaretConvertAndRegisterNonlinear.sh#L147\n \"\"\"\n\n img = nb.load(in_file)\n transform = load_transform(transform_file)\n pointset = img.get_arrays_from_intent(\"NIFTI_INTENT_POINTSET\")[0]\n coords = pointset.data.T\n c_ras_keys = (\"VolGeomC_R\", \"VolGeomC_A\", \"VolGeomC_S\")\n ras = np.array([[float(pointset.metadata[key])] for key in c_ras_keys])\n ones = np.ones((1, coords.shape[1]), dtype=coords.dtype)\n # Apply C_RAS translation to coordinates, then transform\n pointset.data = transform.dot(np.vstack((coords + ras, ones)))[:3].T.astype(\n coords.dtype\n )\n\n secondary = nb.gifti.GiftiNVPairs(\"AnatomicalStructureSecondary\", \"MidThickness\")\n geom_type = nb.gifti.GiftiNVPairs(\"GeometricType\", \"Anatomical\")\n has_ass = has_geo = False\n for nvpair in pointset.meta.data:\n # Remove C_RAS translation from metadata to avoid double-dipping in FreeSurfer\n if nvpair.name in c_ras_keys:\n nvpair.value = \"0.000000\"\n # Check for missing metadata\n elif nvpair.name == secondary.name:\n has_ass = True\n elif nvpair.name == geom_type.name:\n has_geo = True\n fname = os.path.basename(in_file)\n # Update metadata for MidThickness/graymid surfaces\n if \"midthickness\" in fname.lower() or \"graymid\" in fname.lower():\n if not has_ass:\n pointset.meta.data.insert(1, secondary)\n if not has_geo:\n pointset.meta.data.insert(2, geom_type)\n\n if newpath is not None:\n newpath = os.getcwd()\n out_file = os.path.join(newpath, fname)\n img.to_filename(out_file)\n return out_file\n\n\ndef load_transform(fname):\n \"\"\"Load affine transform from file\n\n Parameters\n ----------\n fname : str or None\n Filename of an LTA or FSL-style MAT transform file.\n If ``None``, return an identity transform\n\n Returns\n -------\n affine : (4, 4) numpy.ndarray\n \"\"\"\n if fname is None:\n return np.eye(4)\n\n if fname.endswith(\".mat\"):\n return np.loadtxt(fname)\n elif fname.endswith(\".lta\"):\n with open(fname, \"rb\") as fobj:\n for line in fobj:\n if line.startswith(b\"1 4 4\"):\n break\n lines = fobj.readlines()[:4]\n return np.genfromtxt(lines)\n\n raise ValueError(\"Unknown transform type; pass FSL (.mat) or LTA (.lta)\")\n\n\ndef vertex_normals(vertices, faces):\n \"\"\"Calculates the normals of a triangular mesh\"\"\"\n\n def normalize_v3(arr):\n \"\"\" Normalize a numpy array of 3 component vectors shape=(n,3) \"\"\"\n lens = np.sqrt(arr[:, 0] ** 2 + arr[:, 1] ** 2 + arr[:, 2] ** 2)\n arr /= lens[:, np.newaxis]\n\n tris = vertices[faces]\n facenorms = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0])\n normalize_v3(facenorms)\n\n norm = np.zeros(vertices.shape, dtype=vertices.dtype)\n norm[faces[:, 0]] += facenorms\n norm[faces[:, 1]] += facenorms\n norm[faces[:, 2]] += facenorms\n normalize_v3(norm)\n return norm\n\n\ndef pointcloud2ply(vertices, normals, out_file=None):\n \"\"\"Converts the file to PLY format\"\"\"\n from pathlib import Path\n import pandas as pd\n from pyntcloud import PyntCloud\n\n df = pd.DataFrame(np.hstack((vertices, normals)))\n df.columns = [\"x\", \"y\", \"z\", \"nx\", \"ny\", \"nz\"]\n cloud = PyntCloud(df)\n\n if out_file is None:\n out_file = Path(\"pointcloud.ply\").resolve()\n\n cloud.to_file(str(out_file))\n return out_file\n\n\ndef ply2gii(in_file, metadata, out_file=None):\n \"\"\"Convert from ply to GIfTI\"\"\"\n from pathlib import Path\n from numpy import eye\n from nibabel.gifti import (\n GiftiMetaData,\n GiftiCoordSystem,\n GiftiImage,\n GiftiDataArray,\n )\n from pyntcloud import PyntCloud\n\n in_file = Path(in_file)\n surf = PyntCloud.from_file(str(in_file))\n\n # Update centroid metadata\n metadata.update(\n zip(\n (\"SurfaceCenterX\", \"SurfaceCenterY\", \"SurfaceCenterZ\"),\n [\"%.4f\" % c for c in surf.centroid],\n )\n )\n\n # Prepare data arrays\n da = (\n GiftiDataArray(\n data=surf.xyz.astype(\"float32\"),\n datatype=\"NIFTI_TYPE_FLOAT32\",\n intent=\"NIFTI_INTENT_POINTSET\",\n meta=GiftiMetaData.from_dict(metadata),\n coordsys=GiftiCoordSystem(xform=eye(4), xformspace=3),\n ),\n GiftiDataArray(\n data=surf.mesh.values,\n datatype=\"NIFTI_TYPE_INT32\",\n intent=\"NIFTI_INTENT_TRIANGLE\",\n coordsys=None,\n ),\n )\n surfgii = GiftiImage(darrays=da)\n\n if out_file is None:\n out_file = fname_presuffix(\n in_file.name, suffix=\".gii\", use_ext=False, newpath=str(Path.cwd())\n )\n\n surfgii.to_filename(str(out_file))\n return out_file\n\n\ndef get_gii_meta(in_file):\n from nibabel import load\n\n if isinstance(in_file, list):\n in_file = in_file[0]\n gii = load(in_file)\n return gii.darrays[0].meta.metadata\n" ]
[ [ "numpy.hstack", "numpy.sqrt", "numpy.vstack", "numpy.eye", "numpy.ones", "numpy.genfromtxt", "numpy.cross", "numpy.savetxt", "numpy.zeros", "numpy.loadtxt" ] ]
vlomonaco/continual-learning
[ "39301f9454a40304d43fa9444b6e94f4f13e328f" ]
[ "vae_models.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport excitability_modules as eM\nimport utils\nimport linear_nets\nfrom replayer import Replayer\n\n\nclass AutoEncoder(Replayer):\n \"\"\"Class for variational auto-encoder (VAE) models.\"\"\"\n\n def __init__(self, image_size, image_channels, classes,\n fc_layers=3, fc_units=1000, fc_drop=0, fc_bn=True, fc_nl=\"relu\", z_dim=20):\n\n # Set configurations\n super().__init__()\n self.label = \"VAE\"\n self.image_size = image_size\n self.image_channels = image_channels\n self.classes = classes\n self.fc_layers = fc_layers\n self.z_dim = z_dim\n self.fc_units = fc_units\n\n # Training related components that should be set before training\n # -criterion for reconstruction\n self.recon_criterion = None\n # -weigths of different components of the loss function\n self.lamda_rcl = 1.\n self.lamda_vl = 1.\n self.lamda_pl = 0. # --> when used as \"classifier with feedback-connections\", this should be set to 1.\n\n # Check whether there is at least 1 fc-layer\n if fc_layers<1:\n raise ValueError(\"VAE cannot have 0 fully-connected layers!\")\n\n\n ######------SPECIFY MODEL------######\n\n # encoder: flatten image to 2D-tensor\n self.flatten = utils.Flatten()\n # encoder: fully connected hidden layers\n self.fcE = linear_nets.MLP(\n input_size=image_channels*image_size**2, output_size=fc_units, layers=fc_layers-1, hid_size=fc_units,\n drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, final_nl=True,\n )\n enc_mlp_output_size = fc_units if fc_layers>1 else image_channels*image_size**2\n\n # classifier (from final hidden layer of encoder)\n self.classifier = nn.Sequential(nn.Dropout(fc_drop),\n eM.LinearExcitability(enc_mlp_output_size, classes))\n\n # reparametrization (\"to Z and back\")\n out_nl = True if fc_layers>1 else False\n dec_mlp_input_size = fc_units if fc_layers>1 else image_channels*image_size**2\n self.toZ = nn.Linear(enc_mlp_output_size, z_dim) # estimating mean\n self.toZlogvar = nn.Linear(enc_mlp_output_size, z_dim) # estimating log(SD**2)\n self.fromZ = linear_nets.fc_layer(z_dim, dec_mlp_input_size, batch_norm=(out_nl and fc_bn),\n nl=fc_nl if out_nl else \"none\")\n\n # decoder: fully connected hidden layers (with no non-linearity or batchnorm in final layer!)\n self.fcD = linear_nets.MLP(\n input_size=fc_units, output_size=image_channels*image_size**2, layers=fc_layers-1, hid_size=fc_units,\n drop=fc_drop, batch_norm=fc_bn, nl=fc_nl, final_nl=False,\n )\n # decoder: reshape to image\n self.reshapeD = utils.ToImage(image_channels=image_channels)\n\n @property\n def name(self):\n fc_label = \"{}--\".format(self.fcE.name) if self.fc_layers>1 else \"\"\n hid_label = \"{}{}-\".format(\"i\", self.image_channels*self.image_size**2) if self.fc_layers==1 else \"\"\n z_label = \"z{}\".format(self.z_dim)\n return \"{}({}{}{}-c{})\".format(self.label, fc_label, hid_label, z_label, self.classes)\n\n\n def encode(self, x):\n '''Pass input through feed-forward connections, to get [hE], [z_mean] and [z_logvar].'''\n # extract final hidden features (forward-pass)\n hE = self.fcE(self.flatten(x))\n # get parameters for reparametrization\n z_mean = self.toZ(hE)\n z_logvar = self.toZlogvar(hE)\n return z_mean, z_logvar, hE\n\n def classify(self, x):\n '''For input [x], return all predicted \"scores\"/\"logits\".'''\n hE = self.fcE(self.flatten(x))\n y_hat = self.classifier(hE)\n return y_hat\n\n def reparameterize(self, mu, logvar):\n '''Perform \"reparametrization trick\" to make these stochastic variables differentiable.'''\n std = logvar.mul(0.5).exp_()\n eps = std.new(std.size()).normal_()\n return eps.mul(std).add_(mu)\n\n def decode(self, z):\n '''Pass latent variable activations through feedback connections, to give reconstructed image [image_recon].'''\n hD = self.fromZ(z)\n image_features = self.fcD(hD)\n image_recon = self.reshapeD(image_features)\n return image_recon\n\n def forward(self, x, full=False):\n '''Forward function to propagate [x] through the encoder, reparametrization and decoder.\n\n Input: - [x] <4D-tensor> of shape [batch_size]x[channels]x[image_size]x[image_size]\n\n If [full] is True, output should be a <tuple> consisting of:\n - [x_recon] <4D-tensor> reconstructed image (features) in same shape as [x]\n - [y_hat] <2D-tensor> with predicted logits for each class\n - [mu] <2D-tensor> with either [z] or the estimated mean of [z]\n - [logvar] None or <2D-tensor> estimated log(SD^2) of [z]\n - [z] <2D-tensor> reparameterized [z] used for reconstruction\n If [full] is False, output is simply the predicted logits (i.e., [y_hat]).'''\n if full:\n # encode (forward), reparameterize and decode (backward)\n mu, logvar, hE = self.encode(x)\n z = self.reparameterize(mu, logvar) if self.training else mu\n x_recon = self.decode(z)\n # classify\n y_hat = self.classifier(hE)\n # return\n return (x_recon, y_hat, mu, logvar, z)\n else:\n return self.classify(x) # -> if [full]=False, only forward pass for prediction\n\n\n def sample(self, size, allowed_predictions=None, return_scores=False):\n '''Generate [size] samples from the model. Outputs are tensors (not \"requiring grad\"), on same device as <self>.\n\n INPUT: - [allowed_predictions] <list> of [class_ids] which are allowed to be predicted\n - [return_scores] <bool>; if True, [y_hat] is also returned\n\n OUTPUT: - [X] <4D-tensor> generated images\n - [y] <1D-tensor> predicted corresponding labels\n - [y_hat] <2D-tensor> predicted \"logits\"/\"scores\" for all [allowed_predictions]'''\n\n # set model to eval()-mode\n mode = self.training\n self.eval()\n\n # sample z\n z = torch.randn(size, self.z_dim)\n z = z.to(self._device())\n\n # decode z into image X\n with torch.no_grad():\n X = self.decode(z)\n\n # use forward model to predict scores of generated images\n with torch.no_grad():\n y_hat = self.classify(X)\n y_hat = y_hat[:, allowed_predictions] if (allowed_predictions is not None) else y_hat\n # get \"predicted\" class-labels (indexed according to each class' position in [allowed_predictions]!)\n _, y = torch.max(y_hat, dim=1)\n\n # set model back to its initial mode\n self.train(mode=mode)\n\n # return samples as [batch_size]x[channels]x[image_size]x[image_size] tensor, plus classes-labels\n return (X, y, y_hat) if return_scores else (X, y)\n\n\n def loss_function(self, recon_x, x, y_hat=None, y_target=None, scores=None, mu=None, logvar=None):\n '''Calculate and return various losses that could be used for training and/or evaluating the model.\n\n INPUT: - [x_recon] <4D-tensor> reconstructed image in same shape as [x]\n - [x] <4D-tensor> original image\n - [y_hat] <2D-tensor> with predicted \"logits\" for each class\n - [y_target] <1D-tensor> with target-classes (as integers)\n - [scores] <2D-tensor> with target \"logits\" for each class\n - [mu] <2D-tensor> with either [z] or the estimated mean of [z]\n - [logvar] None or <2D-tensor> with estimated log(SD^2) of [z]\n\n OUTPUT: - [reconL] reconstruction loss indicating how well [x] and [x_recon] match\n - [variatL] variational (KL-divergence) loss \"indicating how normally distributed [z] is\"\n - [predL] prediction loss indicating how well targets [y] are predicted\n - [distilL] knowledge distillation (KD) loss indicating how well the predicted \"logits\" ([y_hat])\n match the target \"logits\" ([scores])'''\n\n batch_size = x.size(0)\n\n ###-----Reconstruction loss-----###\n reconL = self.recon_criterion(recon_x.view(batch_size, -1), x.view(batch_size, -1))\n\n ###-----Variational loss-----###\n if logvar is not None:\n #---- see Appendix B from: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 ----#\n variatL = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) / batch_size\n # -normalise by same number of elements as in reconstruction\n variatL /= (self.image_channels * self.image_size ** 2)\n # --> because self.recon_criterion averages over batch-size but also over all pixels/elements in recon!!\n else:\n variatL = torch.tensor(0., device=self._device())\n\n ###-----Prediction loss-----###\n if y_target is not None:\n predL = F.cross_entropy(y_hat, y_target, size_average=True)\n else:\n predL = torch.tensor(0., device=self._device())\n\n ###-----Distilliation loss-----###\n if scores is not None:\n n_classes_to_consider = y_hat.size(1) #--> zeroes will be added to [scores] to make its size match [y_hat]\n distilL = utils.loss_fn_kd(scores=y_hat[:, :n_classes_to_consider], target_scores=scores, T=self.KD_temp)\n else:\n distilL = torch.tensor(0., device=self._device())\n\n # Return a tuple of the calculated losses\n return reconL, variatL, predL, distilL\n\n\n\n def train_a_batch(self, x, y, x_=None, y_=None, scores_=None, rnt=0.5, active_classes=None, task=1):\n '''Train model for one batch ([x],[y]), possibly supplemented with replayed data ([x_],[y_]).\n\n [x] <tensor> batch of inputs (could be None, in which case only 'replayed' data is used)\n [y] <tensor> batch of corresponding labels\n [x_] None or (<list> of) <tensor> batch of replayed inputs\n [y_] None or (<list> of) <tensor> batch of corresponding \"replayed\" labels\n [scores_] None or (<list> of) <tensor> 2Dtensor:[batch]x[classes] predicted \"scores\"/\"logits\" for [x_]\n [rnt] <number> in [0,1], relative importance of new task\n [active_classes] None or (<list> of) <list> with \"active\" classes'''\n\n # Set model to training-mode\n self.train()\n\n ##--(1)-- CURRENT DATA --##\n precision = 0.\n if x is not None:\n # Run the model\n recon_batch, y_hat, mu, logvar, z = self(x, full=True)\n # If needed (e.g., incremental or multihead set-up), remove predictions for classes not in current task\n if active_classes is not None:\n y_hat = y_hat[:, active_classes[-1]] if type(active_classes[0])==list else y_hat[:, active_classes]\n # Calculate all losses\n reconL, variatL, predL, _ = self.loss_function(recon_x=recon_batch, x=x, y_hat=y_hat,\n y_target=y, mu=mu, logvar=logvar)\n # Weigh losses as requested\n loss = self.lamda_rcl*reconL + self.lamda_vl*variatL + self.lamda_pl*predL\n\n # Calculate training-precision\n if y is not None:\n _, predicted = y_hat.max(1)\n precision = (y == predicted).sum().item() / x.size(0)\n\n ##--(2)-- REPLAYED DATA --##\n if x_ is not None:\n # If [x_] is a list, perform separate replay for each entry\n n_replays = len(x_) if type(x_)==list else 1\n if not type(x_)==list:\n x_ = [x_]\n y_ = [y_]\n scores_ = [scores_]\n if active_classes is not None:\n active_classes = [active_classes]\n\n # Prepare lists to store losses for each replay\n loss_replay = [None]*n_replays\n reconL_r = [None]*n_replays\n variatL_r = [None]*n_replays\n predL_r = [None]*n_replays\n distilL_r = [None]*n_replays\n\n # Loop to perform each replay\n for replay_id in range(n_replays):\n # Run the model\n recon_batch, y_hat, mu, logvar, z = self(x_[replay_id], full=True)\n # If needed (e.g., incremental or multihead set-up), remove predictions for classes not in replayed task\n if active_classes is not None:\n y_hat = y_hat[:, active_classes[replay_id]]\n # Calculate all losses\n reconL_r[replay_id], variatL_r[replay_id], predL_r[replay_id], distilL_r[replay_id] = self.loss_function(\n recon_x=recon_batch, x=x_[replay_id], y_hat=y_hat,\n y_target=y_[replay_id] if (y_ is not None) else None,\n scores=scores_[replay_id] if (scores_ is not None) else None, mu=mu, logvar=logvar,\n )\n # Weigh losses as requested\n loss_replay[replay_id] = self.lamda_rcl*reconL_r[replay_id] + self.lamda_vl*variatL_r[replay_id]\n if self.replay_targets==\"hard\":\n loss_replay[replay_id] += self.lamda_pl*predL_r[replay_id]\n elif self.replay_targets==\"soft\":\n loss_replay[replay_id] += self.lamda_pl*distilL_r[replay_id]\n\n # Calculate total loss\n if x is not None:\n loss_total = rnt*loss + (1-rnt)*sum(loss_replay)/n_replays\n else:\n loss_total = sum(loss_replay)/n_replays\n else:\n loss_total = loss\n\n\n # Reset optimizer\n self.optimizer.zero_grad()\n # Backpropagate errors\n loss_total.backward()\n # Take optimization-step\n self.optimizer.step()\n\n\n # Return the dictionary with different training-loss split in categories\n return {\n 'loss_total': loss_total.item(), 'precision': precision,\n 'recon': reconL.item() if x is not None else 0,\n 'variat': variatL.item() if x is not None else 0,\n 'pred': predL.item() if x is not None else 0,\n 'recon_r': sum(reconL_r).item()/n_replays if x_ is not None else 0,\n 'variat_r': sum(variatL_r).item()/n_replays if x_ is not None else 0,\n 'pred_r': sum(predL_r).item()/n_replays if (x_ is not None and predL_r[0] is not None) else 0,\n 'distil_r': sum(distilL_r).item()/n_replays if (x_ is not None and distilL_r[0] is not None) else 0,\n }\n\n\n\n" ]
[ [ "torch.nn.Dropout", "torch.max", "torch.randn", "torch.nn.functional.cross_entropy", "torch.nn.Linear", "torch.no_grad" ] ]
fjbriones/emotalkingface
[ "d3d838be705ea74d4165891720739d749aaf38a5" ]
[ "data_prep/utils.py" ]
[ "# Written by Sefik Emre Eskimez, May 29 2018 - Aug 17 2018 #\r\n\r\nimport matplotlib as mpl\r\nmpl.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits import mplot3d\r\nimport matplotlib.animation as manimation\r\nimport matplotlib.lines as mlines\r\nfrom matplotlib import transforms\r\nimport numpy as np\r\nimport os\r\nfrom tqdm import tqdm\r\nimport subprocess\r\nimport librosa\r\nimport cv2 \r\nimport scipy\r\n\r\nfont = {'size' : 18}\r\nmpl.rc('font', **font)\r\n\r\nMouth = [[48, 49], [49, 50], [50, 51], [51, 52], [52, 53], [53, 54], [54, 55], [55, 56], [56, 57], \\\r\n [57, 58], [58, 59], [59, 48], [60, 61], [61, 62], [62, 63], [63, 64], [64, 65], [65, 66], \\\r\n [66, 67], [67, 60]]\r\n\r\nNose = [[27, 28], [28, 29], [29, 30], [30, 31], [30, 35], [31, 32], [32, 33], \\\r\n [33, 34], [34, 35], [27, 31], [27, 35]]\r\n\r\nleftBrow = [[17, 18], [18, 19], [19, 20], [20, 21]]\r\nrightBrow = [[22, 23], [23, 24], [24, 25], [25, 26]]\r\n\r\nleftEye = [[36, 37], [37, 38], [38, 39], [39, 40], [40, 41], [36, 41]]\r\nrightEye = [[42, 43], [43, 44], [44, 45], [45, 46], [46, 47], [42, 47]]\r\n\r\nother = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], \\\r\n [6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12], \\\r\n [12, 13], [13, 14], [14, 15], [15, 16]]\r\n\r\nfaceLmarkLookup = Mouth + Nose + leftBrow + rightBrow + leftEye + rightEye + other\r\n\r\ndef write_video_cv(frames, speech, fs, path, fname, fps):\r\n # fname = os.path.splitext(fname)[0]\r\n print(os.path.join(path, fname))\r\n # exit()\r\n out = cv2.VideoWriter(os.path.join(path, fname), cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), fps, (frames.shape[1], frames.shape[2]), (True if len(frames.shape) == 4 else False))\r\n # exit()\r\n if out.isOpened():\r\n for i in range(frames.shape[0]):\r\n out.write(frames[i, ...])\r\n out.release()\r\n # exit()\r\n # print(speech.shape)\r\n # scipy.io.wavfile.write(os.path.join(path, fname+'.wav'), fs, speech)\r\n librosa.output.write_wav(os.path.join(path, fname+'.wav'), speech, fs)\r\n\r\n cmd = 'ffmpeg -i '+os.path.join(path, fname)+' -i '+os.path.join(path, fname)+'.wav -c:v copy -c:a aac -strict experimental -map 0:v:0 -map 1:a:0 '+os.path.join(path, fname)+'_.mp4'\r\n subprocess.call(cmd, shell=True) \r\n print('Muxing Done')\r\n\r\n os.remove(os.path.join(path, fname))\r\n os.remove(os.path.join(path, fname+'.wav'))\r\n\r\ndef easy_show(data, lab, figsize=(10, 10)):\r\n plt.figure(figsize=figsize)\r\n plt.imshow(data)\r\n plt.savefig(lab, dpi = 300, bbox_inches='tight')\r\n plt.clf()\r\n plt.close()\r\n\r\ndef easy_show_FLM(data, lmarks, lab, figsize=(10, 10)):\r\n plt.figure(figsize=figsize)\r\n plt.imshow(data)\r\n plt.plot(lmarks[:, 0], lmarks[:, 1], 'r*')\r\n plt.savefig(lab, dpi = 300, bbox_inches='tight')\r\n plt.clf()\r\n plt.close()\r\n\r\ndef STFT(speech, sr, winsize, hopsize):\r\n cnst = 1+(int(int(sr*winsize))/2)\r\n res_stft =librosa.stft(speech,\r\n win_length = int(sr*winsize),\r\n hop_length = int(sr*hopsize),\r\n n_fft = int(sr*winsize))\r\n \r\n stft_mag = np.abs(res_stft)/cnst\r\n stft_phase = np.angle(res_stft)\r\n\r\n return stft_mag, stft_phase\r\n\r\ndef plot(data, label, min_val=None, max_val=None):\r\n if not min_val:\r\n min_val = np.min(data)\r\n max_val = np.max(data)\r\n fig = plt.figure(figsize=(10, 10))\r\n im = plt.imshow(data, cmap=plt.get_cmap('jet'), origin='lower', vmin=min_val, vmax=max_val)\r\n # fig.colorbar(im)\r\n plt.axis('off')\r\n plt.savefig(label, bbox_inches='tight')\r\n plt.clf()\r\n plt.close()\r\n\r\ndef subplot(data1, data2, label):\r\n fig = plt.figure(figsize=(10, 10))\r\n plt.subplot(211)\r\n im = plt.imshow(data1, cmap=plt.get_cmap('jet'), origin='lower', vmin=np.min(data1), vmax=np.max(data1))\r\n plt.axis('off')\r\n\r\n plt.subplot(212)\r\n im = plt.imshow(data2, cmap=plt.get_cmap('jet'), origin='lower', vmin=np.min(data1), vmax=np.max(data1))\r\n plt.axis('off')\r\n\r\n plt.savefig(label, bbox_inches='tight')\r\n plt.clf()\r\n plt.close()\r\n\r\ndef write_video(frames, sound, fs, path, fname, fps, cmap='jet'):\r\n try:\r\n os.remove(os.path.join(path, fname+'.mp4'))\r\n os.remove(os.path.join(path, fname+'.wav'))\r\n os.remove(os.path.join(path, fname+'_ws.mp4'))\r\n except:\r\n print ('Exp')\r\n\r\n FFMpegWriter = manimation.writers['ffmpeg']\r\n metadata = dict(title='Movie Test', artist='Matplotlib',\r\n comment='Movie support!')\r\n writer = FFMpegWriter(fps=fps, metadata=metadata)\r\n\r\n fig = plt.figure(figsize=(10, 10))\r\n l = plt.imshow(frames[0, :, :], cmap=cmap)\r\n\r\n librosa.output.write_wav(os.path.join(path, fname+'.wav'), sound, fs)\r\n\r\n with writer.saving(fig, os.path.join(path, fname+'.mp4'), 150):\r\n # plt.gca().invert_yaxis()\r\n plt.axis('off')\r\n for i in tqdm(range(frames.shape[0])):\r\n l.set_data(frames[i, :, :])\r\n cnt = 0\r\n writer.grab_frame()\r\n\r\n cmd = 'ffmpeg -i '+os.path.join(path, fname)+'.mp4 -i '+os.path.join(path, fname)+'.wav -c:v copy -c:a aac -strict experimental -map 0:v:0 -map 1:a:0 '+os.path.join(path, fname)+'_.mp4'\r\n subprocess.call(cmd, shell=True) \r\n print('Muxing Done')\r\n\r\n os.remove(os.path.join(path, fname+'.mp4'))\r\n os.remove(os.path.join(path, fname+'.wav'))\r\n\r\ndef write_video_FLM(frames, sound, fs, path, fname, xLim, yLim, fps=29.97):\r\n try:\r\n os.remove(os.path.join(path, fname+'.mp4'))\r\n os.remove(os.path.join(path, fname+'.wav'))\r\n os.remove(os.path.join(path, fname+'_ws.mp4'))\r\n except:\r\n print ('Exp')\r\n\r\n if len(frames.shape) < 3:\r\n frames = np.reshape(frames, (frames.shape[0], frames.shape[1]/2, 2))\r\n # print frames.shape\r\n\r\n FFMpegWriter = manimation.writers['ffmpeg']\r\n metadata = dict(title='Movie Test', artist='Matplotlib',\r\n comment='Movie support!')\r\n writer = FFMpegWriter(fps=fps, metadata=metadata)\r\n\r\n fig = plt.figure(figsize=(10, 10))\r\n l, = plt.plot([], [], 'ko', ms=4)\r\n\r\n\r\n plt.xlim(xLim)\r\n plt.ylim(yLim)\r\n\r\n librosa.output.write_wav(os.path.join(path, fname+'.wav'), sound, fs)\r\n\r\n if frames.shape[1] == 20:\r\n lookup = [[x[0] - 48, x[1] - 48] for x in Mouth]\r\n # print lookup\r\n else:\r\n lookup = faceLmarkLookup\r\n\r\n lines = [plt.plot([], [], 'k')[0] for _ in range(3*len(lookup))]\r\n\r\n with writer.saving(fig, os.path.join(path, fname+'.mp4'), 150):\r\n plt.gca().invert_yaxis()\r\n for i in tqdm(range(frames.shape[0])):\r\n l.set_data(frames[i,:,0], frames[i,:,1])\r\n cnt = 0\r\n for refpts in lookup:\r\n lines[cnt].set_data([frames[i,refpts[1], 0], frames[i,refpts[0], 0]], [frames[i, refpts[1], 1], frames[i,refpts[0], 1]])\r\n cnt+=1\r\n writer.grab_frame()\r\n\r\n cmd = 'ffmpeg -i '+os.path.join(path, fname)+'.mp4 -i '+os.path.join(path, fname)+'.wav -c:v copy -c:a aac -strict experimental '+os.path.join(path, fname)+'_.mp4'\r\n subprocess.call(cmd, shell=True) \r\n print('Muxing Done')\r\n\r\n os.remove(os.path.join(path, fname+'.mp4'))\r\n os.remove(os.path.join(path, fname+'.wav'))\r\n\r\ndef main():\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.pyplot.gca", "numpy.reshape", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.figure", "numpy.min", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.rc", "numpy.abs", "matplotlib.use", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "numpy.angle" ] ]
axelmagn/metrics
[ "63b047d4f0481614597f25a1e4c03cad35ff89eb" ]
[ "ai-metrics/aimetrics/estimator.py" ]
[ "import json\nimport logging\nimport numpy as np\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.utils.validation import NotFittedError\nfrom tornado import gen\nfrom tornado.httpclient import AsyncHTTPClient, HTTPError, HTTPClient\nfrom tornado.ioloop import IOLoop\nfrom urllib.parse import urljoin\n\nfrom .conf import get_conf\n\n\n_conf = get_conf()\n\n# one day in seconds\nDEFAULT_TIMEOUT = 86400\n\n\nclass RemoteBSTClassifier(BaseEstimator, ClassifierMixin):\n \"\"\" RemoteClassifier is a classifier that relies on a remote BST service to\n make predictions.\n\n This relies on the the REST interface laid out by the bst.ai server for\n interaction, so it is not generalizable to other servers.\n\n Parameters\n ----------\n base_url : str\n The base URL of the remote API.\n\n model_type : str\n The model type to use on the remote API. Refer to the bst.ai project\n for available options.\n\n model_params : dict (default: {})\n Any model parameters for the remote classifier. Refer to the bst.ai\n project for available options.\n\n Attributes\n ----------\n model_id : str or None\n The ID of the remote model, or None if the remote model has not been\n instantiated.\n\n training_error : float or None\n The training error of the trained classifier, or None if the classifier\n has not been trained yet.\n\n \"\"\"\n\n def __init__(self, base_url, model_type, model_params=None):\n self._conf = _conf['aimetrics']['estimators']['RemoteBSTClassifier']\n self.model_type = model_type\n if model_params is None:\n model_params = {}\n self.model_params = model_params\n self.base_url = base_url\n self.model_id = None\n self.training_error = None\n\n @gen.coroutine\n def _create_model(self):\n \"\"\"Create a new model and return the ID\n\n This does not set the self.model_id attribute.\n\n \"\"\"\n http_client = AsyncHTTPClient()\n # assemble request parameters\n create_url_suffix = self._conf['create']['url_suffix'].format(\n model_type = self.model_type)\n create_url = urljoin(self.base_url, create_url_suffix)\n create_method = self._conf['create']['method']\n create_params = json.dumps(self.model_params)\n headers = {'content-type': 'application/json'}\n # send async create request\n response = yield http_client.fetch(create_url, method=create_method,\n body=create_params, headers=headers,\n connect_timeout=DEFAULT_TIMEOUT,\n request_timeout=DEFAULT_TIMEOUT)\n return json.loads(response.body.decode('utf-8'))['id']\n\n @gen.coroutine\n def destroy_model(self):\n \"\"\"Destroy a model\"\"\"\n http_client = AsyncHTTPClient()\n # assemble request parameters\n destroy_url_suffix = self._conf['destroy']['url_suffix'].format(\n model_id = self.model_id)\n destroy_url = urljoin(self.base_url, destroy_url_suffix)\n destroy_method = self._conf['destroy']['method']\n headers = {'content-type': 'application/json'}\n # send async destroy request\n response = yield http_client.fetch(destroy_url, method=destroy_method,\n headers=headers, connect_timeout=DEFAULT_TIMEOUT,\n request_timeout=DEFAULT_TIMEOUT)\n self.model_id = None\n return json.loads(response.body.decode('utf-8'))\n\n @gen.coroutine\n def _train_model(self, training_set, training_params=None):\n \"\"\"Train the classifier's remote model\"\"\"\n if training_params is None:\n training_params = {}\n http_client = AsyncHTTPClient()\n # assemble request parameters\n train_url_suffix = self._conf['train']['url_suffix'].format(\n model_id=self.model_id)\n train_url = urljoin(self.base_url, train_url_suffix)\n train_cmd = { \"trainingSet\": training_set,\n \"params\": training_params }\n train_method = self._conf['train']['method']\n headers = {'content-type': 'application/json'}\n response = yield http_client.fetch(train_url, method=train_method,\n body=json.dumps(train_cmd), headers=headers,\n connect_timeout=DEFAULT_TIMEOUT,\n request_timeout=DEFAULT_TIMEOUT)\n return json.loads(response.body.decode('utf-8'))\n\n @gen.coroutine\n def _predict_model(self, prediction_set):\n \"\"\"Predict the classes for a set of records using a remote model\"\"\"\n http_client = AsyncHTTPClient()\n # assemble request parameters\n predict_url_suffix = self._conf['predict']['url_suffix'].format(\n model_id=self.model_id)\n predict_url = urljoin(self.base_url, predict_url_suffix)\n predict_method = self._conf['predict']['method']\n headers = {'content-type': 'application/json'}\n try:\n response = yield http_client.fetch(predict_url,\n method=predict_method, body=json.dumps(prediction_set),\n headers=headers, connect_timeout=DEFAULT_TIMEOUT,\n request_timeout=DEFAULT_TIMEOUT)\n except HTTPError as e:\n if e.response and e.response.body:\n logging.error(e.response.body.decode('utf-8'))\n raise e\n\n return json.loads(response.body.decode('utf-8'))\n\n @gen.coroutine\n def get_model(self):\n \"\"\"Get the JSON representation of the model from the server\"\"\"\n http_client = AsyncHTTPClient()\n # assemble request parameters\n url_suffix = self._conf['get']['url_suffix'].format(\n model_id=self.model_id)\n url = urljoin(self.base_url, url_suffix)\n method = self._conf['get']['method']\n headers = {'content-type': 'application/json'}\n response = yield http_client.fetch(url, method=method, headers=headers,\n request_timeout=DEFAULT_TIMEOUT)\n return json.loads(response.body.decode('utf-8'))\n\n\n @gen.coroutine\n def async_fit(self, X, y):\n \"\"\" Asynchronously fit the remote classifier.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features\n y : array-like, shape = [n_samples] or [n_samples, n_outputs]\n Target values\n\n \"\"\"\n # import ipdb; ipdb.set_trace() # DEBUG\n if self.model_id is None:\n self.model_id = yield self._create_model()\n # create training set\n if isinstance(X, np.ndarray):\n X = X.tolist()\n elif not isinstance(X, list):\n X = list(X)\n if isinstance(y, np.ndarray):\n y = y.tolist()\n elif not isinstance(y, list):\n y = list(y)\n training_set = [{'input': x_row, 'output': y_row}\n for x_row, y_row in zip(X,y)]\n # perform training\n train_results = yield self._train_model(training_set)\n # record and return error\n self.training_error = train_results['error']\n return self.training_error\n\n def fit(self, X, y):\n \"\"\"Fit the remote classifier.\n NOT YET IMPLEMENTED\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features\n y : array-like, shape = [n_samples] or [n_samples, n_outputs]\n Target values\n\n \"\"\"\n # TODO: Bugfix RemoteBSTClassifier synchronous functions\n \"\"\"\n def fit_wrapper():\n self.async_fit(X, y)\n IOLoop.instance().run_sync(fit_wrapper)\n \"\"\"\n raise NotImplementedError\n\n @gen.coroutine\n def async_predict_proba(self, X):\n \"\"\"Predict class labels for samples in X\"\"\"\n # make sure we have trained a model\n if self.model_id is None:\n raise NotFittedError(\"This BST Model is not fitted yet.\")\n # clean prediction set\n if isinstance(X, np.ndarray):\n X = X.tolist()\n elif not isinstance(X, list):\n X = list(X)\n # perform and return prediction\n results = yield self._predict_model(X)\n return np.asarray(results)\n" ]
[ [ "numpy.asarray", "sklearn.utils.validation.NotFittedError" ] ]
satpal82bhandari/Hands-On-Markov-Models-with-Python
[ "9c38aab4225806e25c3878a6c5b137710bbd4fa0" ]
[ "Chapter06/parse_data.py" ]
[ "\"\"\"\nUsage: parse_data.py --company=<company>\n\"\"\"\nimport os\nimport tarfile\nimport pandas as pd\nfrom pandas import errors as pd_errors\nfrom functools import reduce\nfrom docopt import docopt\n\nargs = docopt(doc=__doc__, argv=None, help=True,\n version=None, options_first=False)\n\nyears = [2015, 2016, 2017]\ncompany = args['--company']\n\n# Getting the data files list\ndata_files_list = []\nfor year in years:\n year_directory = 'data/{year}'.format(year=year)\n for file in os.listdir(year_directory):\n data_files_list.append(\n '{year_directory}/{file}'.format(year_directory=year_directory, file=file))\n\n\ndef parse_data(file_name, company_symbol):\n \"\"\"\n Returns data for the corresponding company\n\n :param file_name: name of the tar file\n :param company_symbol: company symbol\n :type file_name: str\n :type company_symbol: str\n :return: dataframe for the corresponding company data\n :rtype: pd.DataFrame\n \"\"\"\n tar = tarfile.open(file_name)\n try:\n price_report = pd.read_csv(tar.extractfile('prices.csv'))\n company_price_data = price_report[price_report['symbol']\n == company_symbol]\n return company_price_data\n except (KeyError, pd_errors.EmptyDataError):\n return pd.DataFrame()\n\n\n# Getting the complete data for a given company\ncompany_data = reduce(lambda df, file_name: df.append(parse_data(file_name, company)),\n data_files_list,\n pd.DataFrame())\ncompany_data = company_data.sort_values(by=['date'])\n\n# Create folder for company data if does not exists\nif not os.path.exists('data/company_data'):\n os.makedirs('data/company_data')\n\n# Write data to a CSV file\ncompany_data.to_csv('data/company_data/{company}.csv'.format(company=company),\n columns=['date', 'open', 'high', 'low',\n 'close', 'volume', 'adj_close'],\n index=False)\n" ]
[ [ "pandas.DataFrame" ] ]
mphoward/relentless
[ "5f7e8eb62696f45df28a948202b324563805a7f5" ]
[ "relentless/optimize/criteria.py" ]
[ "\"\"\"\nConvergence criteria\n====================\n\nA convergence test determines if an objective function has converged to the\ndesired minimum, subject to design constraints.\n\nThe following convergence tests have been implemented:\n\n.. autosummary::\n :nosignatures:\n\n AllTest\n AndTest\n AnyTest\n GradientTest\n OrTest\n ValueTest\n\n.. rubric:: Developer notes\n\nTo implement your own convergence test, create a class that derives from either of\nthe two abstract base classes below, and define the required properties and methods.\nIt may be helpful for the class to be composed having a :class:`Tolerance`.\n\n.. autosummary::\n :nosignatures:\n\n ConvergenceTest\n LogicTest\n Tolerance\n\n.. autoclass:: ConvergenceTest\n :member-order: bysource\n :members: converged\n\n.. autoclass:: Tolerance\n :member-order: bysource\n :members: absolute,\n relative,\n isclose\n\n.. autoclass:: GradientTest\n :member-order: bysource\n :members: tolerance,\n converged\n\n.. autoclass:: ValueTest\n :member-order: bysource\n :members: absolute,\n relative,\n value,\n converged\n\n.. autoclass:: LogicTest\n :member-order: bysource\n :members: converged\n\n.. autoclass:: AllTest\n :member-order: bysource\n :members: converged\n\n.. autoclass:: AnyTest\n :member-order: bysource\n :members: converged\n\n.. autoclass:: AndTest\n :member-order: bysource\n\n.. autoclass:: OrTest\n :member-order: bysource\n\n\"\"\"\nimport abc\n\nimport numpy\n\nfrom relentless import collections\nfrom relentless import variable\n\nclass ConvergenceTest(abc.ABC):\n r\"\"\"Abstract base class for optimization convergence tests.\n\n A :class:`ConvergenceTest` defines a test to determine if an\n :class:`~relentless.optimize.objective.ObjectiveFunction` :math:`f\\left(\\mathbf{x}\\right)`,\n defined on a set of design variables :math:`\\mathbf{x}=\\left[x_1,\\cdots,x_n\\right]`,\n has converged to a desired point.\n\n \"\"\"\n @abc.abstractmethod\n def converged(self, result):\n \"\"\"Check if the function is converged.\n\n Parameters\n ----------\n result : :class:`~relentless.optimize.objective.ObjectiveFunctionResult`\n The result to check for convergence.\n\n Returns\n -------\n bool\n True if the result is converged.\n \"\"\"\n pass\n\nclass Tolerance:\n r\"\"\"Tolerance for convergence tests.\n\n A tolerance can be used to check if one value is close to another in either\n an absolute or a relative sense. The test for closeness is based on the\n NumPy method :func:`numpy.isclose`, which can use both an absolute tolerance\n :math:`\\varepsilon_{\\rm a}` and a relative tolerance :math:`\\varepsilon_{\\rm r}`.\n\n A value :math:`a` is close to a value :math:`b` if and only if:\n\n .. math::\n\n \\lvert a-b\\rvert\\le\\varepsilon_{\\rm a}+\\varepsilon_{\\rm r}\\lvert b\\rvert\n\n An absolute tolerance can be any non-negative numerical value. A relative\n tolerance must be a non-negative numerical value between 0 and 1. By setting\n :math:`\\varepsilon_{\\rm r}=0`, only an absolute tolerance test is performed.\n Similarly, setting :math:`\\varepsilon_{\\rm a}=0` will result in the performance\n of only a relative tolerance test.\n\n Parameters\n ----------\n absolute : float\n The default absolute tolerance.\n relative : float\n The default relative tolerance.\n\n \"\"\"\n def __init__(self, absolute, relative):\n self._absolute = collections.DefaultDict(absolute)\n self._relative = collections.DefaultDict(relative)\n\n @property\n def absolute(self):\n \"\"\":class:`~relentless.collections.DefaultDict`: The absolute tolerance(s).\n Must be non-negative.\"\"\"\n return self._absolute\n\n @property\n def relative(self):\n \"\"\":class:`~relentless.collections.DefaultDict`: The relative tolerance(s).\n Must be between 0 and 1.\"\"\"\n return self._relative\n\n def isclose(self, a, b, key=None):\n \"\"\"Check if the two values are equivalent within a tolerance.\n\n The test is performed using :func:`numpy.isclose`.\n\n The default :attr:`absolute` and :attr:`relative` tolerances can be overridden based on a ``key``,\n which can be any valid dictionary key other than ``None``. When testing for closeness, if a ``key``\n is given, the keyed tolerance will be used if has been specified; otherwise, the default tolerance is used.\n\n Parameters\n ----------\n a : float\n The first value to compare.\n b : float\n The second value to compare.\n key : object\n The key to use for determining the tolerance (defaults to ``None``).\n\n Returns\n -------\n bool\n ``True`` if values are close.\n\n Raises\n ------\n ValueError\n If the absolute tolerance is not non-negative.\n ValueError\n If the relative tolerance is not between 0 and 1.\n\n \"\"\"\n if self.absolute[key] < 0:\n raise ValueError('Absolute tolerances must be non-negative.')\n if self.relative[key] < 0 or self.relative[key] > 1:\n raise ValueError('Relative tolerances must be between 0 and 1.')\n return numpy.isclose(a, b, atol=self.absolute[key], rtol=self.relative[key])\n\nclass GradientTest(ConvergenceTest):\n r\"\"\"Gradient test for convergence using absolute tolerance.\n\n This test is useful for finding minima / maxima where the gradient should be zero.\n This is implemented using an absolute tolerance, :math:`\\varepsilon_{\\rm a}`, which\n can be any non-negative numerical value. One ``tolerance`` must be initially specified\n for all variables, but a different tolerance can be set for each variable:\n\n .. code::\n\n test = GradientTest(1.e-3)\n test.tolerance[x] = 1.e-2\n\n The result is converged with respect to an unconstrained design variable\n :math:`x_i` (i.e., having :class:`~relentless.variable.DesignVariable.State` ``FREE``\n if and only if:\n\n .. math::\n\n \\left\\lvert\\frac{\\partial f}{\\partial x_i}\\right\\rvert < t\n\n If an upper-bound constraint is active on :math:`x_i` (i.e. it has\n :class:`~relentless.variable.DesignVariable.State` ``HIGH``), the result\n is converged with respect to :math:`x_i` if and only if:\n\n .. math::\n\n -\\frac{\\partial f}{\\partial x_i} > -t\n\n If a lower-bound constraint is active on :math:`x_i` (i.e. it has\n :class:`~relentless.variable.DesignVariable.State` ``LOW``), the result\n is converged with respect to :math:`x_i` if and only if:\n\n .. math::\n\n -\\frac{\\partial f}{\\partial x_i} < t\n\n A result is converged if and only if the result is converged with respect to\n all design variables.\n\n Parameters\n ----------\n tolerance : float\n The default absolute tolerance.\n variables : :class:`~relentless.variable.Variable` or tuple\n Variable(s) to test convergence for in gradient.\n\n \"\"\"\n def __init__(self, tolerance, variables):\n self._tolerance = Tolerance(absolute=tolerance, relative=0)\n self.variables = variable.graph.check_variables_and_types(variables, variable.Variable)\n\n @property\n def tolerance(self):\n \"\"\":class:`~relentless.collections.DefaultDict`: The absolute tolerance(s).\"\"\"\n return self._tolerance.absolute\n\n def converged(self, result):\n \"\"\"Check if the function is converged using the absolute gradient test.\n\n Parameters\n ----------\n result : :class:`~relentless.optimize.objective.ObjectiveFunctionResult`\n The location of the function at which to check for convergence.\n\n Returns\n -------\n bool\n True if the function is converged.\n\n Raises\n ------\n KeyError\n If the requested variable is not in the gradient of the result.\n\n \"\"\"\n converged = True\n for x in self.variables:\n if x not in result.gradient:\n raise KeyError('Design variable not in result')\n grad = result.gradient[x]\n tol = self.tolerance[x]\n if x.athigh() and -grad < -tol:\n converged = False\n break\n elif x.atlow() and -grad > tol:\n converged = False\n break\n elif x.isfree() and not self._tolerance.isclose(grad, 0, key=x):\n converged = False\n break\n\n return converged\n\nclass ValueTest(ConvergenceTest):\n r\"\"\"Value test for convergence.\n\n The result is converged if and only if the value of the function :math:`f`\n is close to the ``value`` according to :meth:`Tolerance.isclose`. Absolute\n and/or relative tolerances may be used.\n\n Parameters\n ----------\n value : float\n The value to check.\n absolute : float\n The default absolute tolerance (defaults to ``1e-8``).\n relative : float\n The default relative tolerance (defaults to ``1e-5``).\n\n \"\"\"\n def __init__(self, value, absolute=1e-8, relative=1e-5):\n self._tolerance = Tolerance(absolute=absolute, relative=relative)\n self.value = value\n\n @property\n def value(self):\n \"\"\"float: The value(s) to check.\"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n self._value = value\n\n @property\n def absolute(self):\n \"\"\"float: The absolute tolerance.\"\"\"\n return self._tolerance.absolute.default\n\n @absolute.setter\n def absolute(self, value):\n self._tolerance.absolute.default = value\n\n @property\n def relative(self):\n \"\"\"float: The relative tolerance.\"\"\"\n return self._tolerance.relative.default\n\n @relative.setter\n def relative(self, value):\n self._tolerance.relative.default = value\n\n def converged(self, result):\n \"\"\"Check if the function is converged using the value test.\n\n Determines if two the value of a result is close to the specified value\n using :meth:`Tolerance.isclose()`.\n\n Parameters\n ----------\n result : :class:`~relentless.optimize.objective.ObjectiveFunctionResult`\n The result to check for convergence.\n\n Returns\n -------\n bool\n True if the function is converged.\n\n \"\"\"\n return self._tolerance.isclose(result.value, self.value)\n\nclass LogicTest(ConvergenceTest):\n \"\"\"Abstract base class for logical convergence tests.\n\n Parameters\n ----------\n tests : args\n The :class:`ConvergenceTest`\\s to be used.\n\n Raises\n ------\n TypeError\n If all inputs are not :class:`ConvergenceTest`\\s.\n\n \"\"\"\n def __init__(self, *tests):\n if not all([isinstance(t, ConvergenceTest) for t in tests]):\n raise TypeError('All inputs to a LogicTest must be ConvergenceTests.')\n self.tests = tests\n\nclass AnyTest(LogicTest):\n \"\"\"Logic test if any specified test returns convergence.\n\n Check if the function is determined to be converged by any of the specified\n convergence tests.\n\n Parameters\n ----------\n tests : args\n The :class:`ConvergenceTest`\\s to be used.\n\n \"\"\"\n def converged(self, result):\n \"\"\"Check if the function has converged by any of the specified tests.\n\n Parameters\n ----------\n result : :class:`~relentless.optimize.objective.ObjectiveFunctionResult`\n The result to check for convergence.\n\n Returns\n -------\n bool\n True if the function is converged by any test.\n\n \"\"\"\n return any(t.converged(result) for t in self.tests)\n\nclass AllTest(LogicTest):\n \"\"\"Logic test if all specified tests return convergence.\n\n Check if the function is determined to be converged by all of the specified\n convergence tests.\n\n Parameters\n ----------\n tests : args\n The :class:`ConvergenceTest`\\s to be used.\n\n \"\"\"\n def converged(self, result):\n \"\"\"Check if the function is converged by all of the specified tests.\n\n Parameters\n ----------\n result : :class:`~relentless.optimize.objective.ObjectiveFunctionResult`\n The result to check for convergence.\n\n Returns\n -------\n bool\n True if the function is converged by all tests.\n\n \"\"\"\n return all(t.converged(result) for t in self.tests)\n\nclass OrTest(AnyTest):\n \"\"\"Logic test if either of the specified tests return convergence.\n\n Check if the function is determined to be converged by either of the specified\n convergence tests.\n\n Parameters\n ----------\n a : :class:`ConvergenceTest`\n The first convergence test to use.\n b : :class:`ConvergenceTest`\n The second convergence test to use.\n\n \"\"\"\n def __init__(self, a, b):\n super().__init__(a, b)\n\nclass AndTest(AllTest):\n \"\"\"Logic test if both specified tests return convergence.\n\n Check if the function is determined to be converged by both of the specified\n convergence tests.\n\n Parameters\n ----------\n a : :class:`ConvergenceTest`\n The first convergence test to use.\n b : :class:`ConvergenceTest`\n The second convergence test to use.\n\n \"\"\"\n def __init__(self, a, b):\n super().__init__(a, b)\n" ]
[ [ "numpy.isclose" ] ]
jmaslek/OpenBBTerminal
[ "919ca99f80809b2b9fe828dc3dd201c813d12d6d" ]
[ "openbb_terminal/forex/forex_helper.py" ]
[ "from datetime import datetime\nfrom typing import List, Optional, Dict, Iterable\nimport os\nimport argparse\nimport logging\nimport re\n\nimport yfinance as yf\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport mplfinance as mpf\n\nfrom openbb_terminal.forex import av_model, polygon_model\nfrom openbb_terminal.rich_config import console\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.helper_funcs import plot_autoscale, is_valid_axes_count\nfrom openbb_terminal.config_terminal import theme\n\n\nFOREX_SOURCES: Dict = {\n \"yf\": \"YahooFinance\",\n \"av\": \"AlphaAdvantage\",\n \"oanda\": \"Oanda\",\n \"polygon\": \"Polygon\",\n}\n\nSOURCES_INTERVALS: Dict = {\n \"yf\": [\n \"1min\",\n \"2min\",\n \"5min\",\n \"15min\",\n \"30min\",\n \"60min\",\n \"90min\",\n \"1hour\",\n \"1day\",\n \"5day\",\n \"1week\",\n \"1month\",\n \"3month\",\n ],\n \"av\": [\"1min\", \"5min\", \"15min\", \"30min\", \"60min\"],\n}\n\nINTERVAL_MAPS: Dict = {\n \"yf\": {\n \"1min\": \"1m\",\n \"2min\": \"2m\",\n \"5min\": \"5m\",\n \"15min\": \"15m\",\n \"30min\": \"30m\",\n \"60min\": \"60m\",\n \"90min\": \"90m\",\n \"1hour\": \"1h\",\n \"1day\": \"1d\",\n \"5day\": \"5d\",\n \"1week\": \"1wk\",\n \"1month\": \"1mo\",\n \"3month\": \"3mo\",\n },\n \"av\": {\"1min\": 1, \"5min\": 5, \"15min\": 15, \"30min\": 30, \"60min\": 60},\n}\n\nlogger = logging.getLogger(__name__)\n\n\n@log_start_end(log=logger)\ndef load(\n to_symbol: str,\n from_symbol: str,\n resolution: str,\n interval: str,\n start_date: str,\n source: str = \"yf\",\n):\n if source in [\"yf\", \"av\"]:\n interval_map = INTERVAL_MAPS[source]\n\n if interval not in interval_map.keys():\n console.print(\n f\"Interval not supported by {FOREX_SOURCES[source]}. Need to be one of the following options\",\n interval_map.keys(),\n )\n return pd.DataFrame()\n\n if source == \"av\":\n\n df = av_model.get_historical(\n to_symbol=to_symbol,\n from_symbol=from_symbol,\n resolution=resolution,\n interval=interval_map[interval],\n start_date=start_date,\n )\n\n if source == \"yf\":\n\n df = yf.download(\n f\"{from_symbol}{to_symbol}=X\",\n end=datetime.now(),\n start=datetime.strptime(start_date, \"%Y-%m-%d\"),\n interval=interval_map[interval],\n progress=False,\n )\n\n if source == \"polygon\":\n # Interval for polygon gets broken into mulltiplier and timeframe\n temp = re.split(r\"(\\d+)\", interval)\n multiplier = int(temp[1])\n timeframe = temp[2]\n if timeframe == \"min\":\n timeframe = \"minute\"\n df = polygon_model.get_historical(\n f\"{from_symbol}{to_symbol}\",\n multiplier=multiplier,\n timespan=timeframe,\n from_date=start_date,\n )\n\n return df\n\n\n@log_start_end(log=logger)\ndef get_yf_currency_list() -> List:\n \"\"\"Load YF list of forex pair a local file.\"\"\"\n path = os.path.join(os.path.dirname(__file__), \"data/yahoofinance_forex.json\")\n\n return sorted(list(set(pd.read_json(path)[\"from_symbol\"])))\n\n\nYF_CURRENCY_LIST = get_yf_currency_list()\n\n\n@log_start_end(log=logger)\ndef check_valid_yf_forex_currency(fx_symbol: str) -> str:\n \"\"\"Check if given symbol is supported on Yahoo Finance\n\n Parameters\n ----------\n fx_symbol : str\n Symbol to check\n\n Returns\n -------\n str\n Currency symbol\n\n Raises\n ------\n argparse.ArgumentTypeError\n Symbol not valid on YahooFinance\n \"\"\"\n if fx_symbol.upper() in get_yf_currency_list():\n return fx_symbol.upper()\n\n raise argparse.ArgumentTypeError(\n f\"{fx_symbol.upper()} not found in YahooFinance supported currency codes. \"\n )\n\n\n@log_start_end(log=logger)\ndef display_candle(\n data: pd.DataFrame,\n to_symbol: str,\n from_symbol: str,\n ma: Optional[Iterable[int]] = None,\n external_axes: Optional[List[plt.Axes]] = None,\n):\n \"\"\"Show candle plot for fx data.\n\n Parameters\n ----------\n data : pd.DataFrame\n Loaded fx historical data\n to_symbol : str\n To forex symbol\n from_symbol : str\n From forex symbol\n external_axes: Optional[List[plt.Axes]]\n External axes (1 axis is expected in the list), by default None\n \"\"\"\n candle_chart_kwargs = {\n \"type\": \"candle\",\n \"style\": theme.mpf_style,\n \"volume\": False,\n \"xrotation\": theme.xticks_rotation,\n \"scale_padding\": {\"left\": 0.3, \"right\": 1, \"top\": 0.8, \"bottom\": 0.8},\n \"update_width_config\": {\n \"candle_linewidth\": 0.6,\n \"candle_width\": 0.8,\n \"volume_linewidth\": 0.8,\n \"volume_width\": 0.8,\n },\n \"warn_too_much_data\": 20000,\n }\n if ma:\n candle_chart_kwargs[\"mav\"] = ma\n # This plot has 1 axis\n if not external_axes:\n candle_chart_kwargs[\"returnfig\"] = True\n candle_chart_kwargs[\"figratio\"] = (10, 7)\n candle_chart_kwargs[\"figscale\"] = 1.10\n candle_chart_kwargs[\"figsize\"] = plot_autoscale()\n fig, ax = mpf.plot(data, **candle_chart_kwargs)\n fig.suptitle(\n f\"{from_symbol}/{to_symbol}\",\n x=0.055,\n y=0.965,\n horizontalalignment=\"left\",\n )\n if ma:\n # Manually construct the chart legend\n colors = []\n\n for i, _ in enumerate(ma):\n colors.append(theme.get_colors()[i])\n\n lines = [Line2D([0], [0], color=c) for c in colors]\n labels = [\"MA \" + str(label) for label in ma]\n ax[0].legend(lines, labels)\n theme.visualize_output(force_tight_layout=False)\n\n elif is_valid_axes_count(external_axes, 1):\n (ax1,) = external_axes\n candle_chart_kwargs[\"ax\"] = ax1\n mpf.plot(data, **candle_chart_kwargs)\n else:\n return\n\n\n@log_start_end(log=logger)\ndef parse_forex_symbol(input_symbol):\n \"\"\"Parses potential forex symbols\"\"\"\n for potential_split in [\"-\", \"/\"]:\n if potential_split in input_symbol:\n symbol = input_symbol.replace(potential_split, \"\")\n return symbol\n if len(input_symbol) != 6:\n raise argparse.ArgumentTypeError(\"Input symbol should be 6 characters.\\n \")\n return input_symbol.upper()\n" ]
[ [ "matplotlib.lines.Line2D", "pandas.read_json", "pandas.DataFrame" ] ]
wang3702/barlowtwins
[ "6d1dc9d31f8f3c87fa4148b7dada0fe9e34805d1" ]
[ "ops/LARS.py" ]
[ "import torch\nfrom torch import nn\n\n\nclass SGD_LARC(object):\n \"\"\"\n :class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC,\n in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive\n local learning rate for each individual parameter. The algorithm is designed to improve\n convergence of large batch training.\n\n See https://arxiv.org/abs/1708.03888 for calculation of the local learning rate.\n In practice it modifies the gradients of parameters as a proxy for modifying the learning rate\n of the parameters. This design allows it to be used as a wrapper around any torch.optim Optimizer.\n ```\n model = ...\n optim = torch.optim.Adam(model.parameters(), lr=...)\n optim = LARC(optim)\n ```\n It can even be used in conjunction with apex.fp16_utils.FP16_optimizer.\n ```\n model = ...\n optim = torch.optim.Adam(model.parameters(), lr=...)\n optim = LARC(optim)\n optim = apex.fp16_utils.FP16_Optimizer(optim)\n ```\n Args:\n optimizer: Pytorch optimizer to wrap and modify learning rate for.\n trust_coefficient: Trust coefficient for calculating the lr. See https://arxiv.org/abs/1708.03888\n clip: Decides between clipping or scaling mode of LARC. If `clip=True` the learning rate is set to `min(optimizer_lr, local_lr)` for each parameter. If `clip=False` the learning rate is set to `local_lr*optimizer_lr`.\n eps: epsilon kludge to help with numerical stability while calculating adaptive_lr\n \"\"\"\n\n def __init__(self, optimizer, trust_coefficient=0.02, clip=True, eps=1e-8):\n self.optim = optimizer\n self.trust_coefficient = trust_coefficient\n self.eps = eps\n self.clip = clip\n\n def __getstate__(self):\n return self.optim.__getstate__()\n\n def __setstate__(self, state):\n self.optim.__setstate__(state)\n\n @property\n def state(self):\n return self.optim.state\n\n def __repr__(self):\n return self.optim.__repr__()\n\n @property\n def param_groups(self):\n return self.optim.param_groups\n\n @param_groups.setter\n def param_groups(self, value):\n self.optim.param_groups = value\n\n def state_dict(self):\n return self.optim.state_dict()\n\n def load_state_dict(self, state_dict):\n self.optim.load_state_dict(state_dict)\n\n def zero_grad(self):\n self.optim.zero_grad()\n\n def add_param_group(self, param_group):\n self.optim.add_param_group(param_group)\n\n def step(self):\n with torch.no_grad():\n weight_decays = []\n for group in self.optim.param_groups:\n # absorb weight decay control from optimizer\n weight_decay = group['weight_decay'] if 'weight_decay' in group else 0\n weight_decays.append(weight_decay)\n group['weight_decay'] = 0\n #SIMCLR skip batchnorm or not\n\n if 'skip_lars' in group and group['skip_lars']:\n continue\n\n for p in group['params']:\n if p.grad is None:\n continue\n param_norm = torch.norm(p.data)\n grad_norm = torch.norm(p.grad.data)\n\n if param_norm != 0 and grad_norm != 0:\n # calculate adaptive lr + weight decay\n adaptive_lr = self.trust_coefficient * (param_norm) / (\n grad_norm + param_norm * weight_decay + self.eps)\n\n # clip learning rate for LARC\n if self.clip:\n # calculation of adaptive_lr so that when multiplied by lr it equals `min(adaptive_lr, lr)`\n adaptive_lr = min(adaptive_lr / group['lr'], 1)\n\n p.grad.data += weight_decay * p.data\n p.grad.data *= adaptive_lr\n\n self.optim.step()\n # return weight decay control to optimizer\n for i, group in enumerate(self.optim.param_groups):\n group['weight_decay'] = weight_decays[i]" ]
[ [ "torch.norm", "torch.no_grad" ] ]
LenzDu/Kaggle-Competition-Sberbank
[ "ef8ba67c0a17183cac7ab693a42f4e5dbbac6bb3" ]
[ "lightGBM.py" ]
[ "# author: vrtjso\r\nimport numpy as np\r\nimport pandas as pd\r\nimport lightgbm as lgb\r\nimport gc\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom Utils import CreateOutput\r\n\r\ntrainDf = pd.read_csv('train_featured.csv')\r\nXtrain = trainDf.drop(['price_doc','w'],1)\r\nw = trainDf.w.values\r\nYtrain = trainDf.price_doc\r\n# scaler = StandardScaler().fit(Ytrain)\r\n# Ytrain = scaler.transform(Ytrain)\r\nYtrain = Ytrain * 0.0000001\r\ntrain = lgb.Dataset(Xtrain, Ytrain, weight=w)\r\ndel Xtrain, Ytrain; gc.collect()\r\n\r\n#min CV on 0.1(new): 2350 (num_leave:15, min_data:30)\r\n#min CV 0.1 normalized: 0.511 (num_leave:15, min_data:30)\r\nparams = {'objective':'regression','metric':'rmse',\r\n 'learning_rate':0.1,'max_depth':-1,'sub_feature':0.7,'sub_row':1,\r\n 'num_leaves':15,'min_data':30,'max_bin':20,\r\n 'bagging_fraction':0.9,'bagging_freq':40,'verbosity':-1}\r\nlgbcv = lgb.cv(params, train, 10000, nfold=6, early_stopping_rounds=50,\r\n verbose_eval=50, show_stdv=False)['rmse-mean']\r\nprint('The final CV score:', lgbcv[-1])\r\n\r\n# best_round = len(lgbcv)\r\n# bst = lgb.train(params, train, best_round)\r\n# fs = bst.feature_importance()\r\n# f_name = bst.feature_name()\r\n# f = dict()\r\n# for i in range(0,len(fs)):\r\n# f[f_name[i]]=fs[i]\r\n\r\n# Xtest = pd.read_csv('test_featured.csv')\r\n# prediction = bst.predict(Xtest) * 10000000\r\n# # prediction = scaler.inverse_transform(prediction)\r\n# output = pd.read_csv('test.csv')\r\n# output = output[['id']]\r\n# output['price_doc'] = prediction\r\n# output.to_csv(r'Ensemble\\Submission_lgb.csv',index=False)\r\n" ]
[ [ "pandas.read_csv" ] ]
DNPLab/DNPLab
[ "78999a4e8320b6476a5aa55d9884c49d74149edc" ]
[ "dnplab/dnpIO/tnmr.py" ]
[ "from .. import dnpdata\nimport numpy as np\nimport struct\nimport re\n\n\ndef import_tnmr(path):\n \"\"\"\n Import tnmr data and return dnpdata object\n\n Args:\n path (str) : Path to .jdf file\n\n Returns:\n tnmr_data (object) : dnpdata object containing tnmr data\n \"\"\"\n\n attrs = import_tnmr_pars(path)\n values, coords, dims = import_tnmr_data(path)\n\n tnmr_data = dnpdata(values, coords, dims, attrs)\n\n return tnmr_data\n\n\ndef import_tnmr_pars(path):\n \"\"\"\n Import parameter fields of tnmr data\n\n Args:\n path (str) : Path to .tnt file\n\n Returns:\n params (dict) : dictionary of parameter fields and values\n \"\"\"\n\n params = {}\n\n with open(path, \"rb\") as f:\n params[\"version\"] = f.read(8).decode(\"utf-8\")\n\n return params\n\n\ndef import_tnmr_data(path):\n \"\"\"\n Import spectrum or spectra of tnmr data\n\n Args:\n path (str) : Path to .tnt file\n\n Returns:\n data (ndarray) : spectrum or spectra if >1D\n abscissa (list) : coordinates of axes\n dims (list) : axes names\n \"\"\"\n\n with open(path, \"rb\") as f:\n\n version = f.read(8).decode(\"utf-8\")\n\n section = None\n\n while section != \"\":\n\n section = f.read(4).decode(\"utf-8\")\n section = str(section)\n\n if section == \"TMAG\":\n flag = bool(f.read(4))\n if flag:\n bytes_to_read = f.read(4)\n bytes_to_read = struct.unpack(\"<i\", bytes_to_read)[0]\n\n header = f.read(bytes_to_read)\n\n ### Deal With Header Here ###\n\n elif section == \"DATA\":\n flag = bool(f.read(4))\n if flag:\n bytes_to_read = f.read(4)\n bytes_to_read = struct.unpack(\"<i\", bytes_to_read)[0]\n\n raw_data = f.read(bytes_to_read)\n\n raw_data = struct.unpack(\"%if\" % (bytes_to_read / 4), raw_data)\n\n raw_data = np.array(raw_data)\n\n data = raw_data[::2] + 1j * raw_data[1::2]\n\n else:\n flag = bool(f.read(4))\n if flag:\n bytes_to_read = f.read(4)\n bytes_to_read = struct.unpack(\"<i\", bytes_to_read)[0]\n\n unsupported_bytes = f.read(bytes_to_read)\n\n abscissa = np.array(range(0, len(data)))\n\n dims = [\"t2\"]\n\n return data, list(abscissa), dims\n" ]
[ [ "numpy.array" ] ]
mariajmolina/ML-extremes-mcs
[ "b3783b00132dd59247947cc9d1492dfb18c2415e" ]
[ "ML-extremes-mcs/running_mcs_stats_flextrkrID3H.py" ]
[ "import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.feature as cf\nfrom visualize import create_STATES\nfrom id_selector import IDSelector\nimport mcs_stats\nfrom config import main_path_era, savefig_path, us_states_location\n\n######################################################################\n######################################################################\n##############\n############## Generate MCS stats and figs\n##############\n######################################################################\n######################################################################\n\ndef iterate_mcs_stats(month, years, dictpath, mcspath, ens_num='era5', msk_var='cloudtracknumber'):\n \"\"\"\n Iterate the MCS stats.\n \n Args:\n month (int): Select one month.\n years (int): List of years to iterate over. \n dictpath (str): Path to location of dictionary.\n mcspath (str): Path to location of MCSs.\n ens_num (str): The dataset ensemble choice. Defaults to ``era5``.\n msk_var (str): Mask file variable. \n \n \"\"\"\n # create blank lists\n a = []; b = []; c = []; d = []; e = []\n \n # first loop return lat lon\n return_coords = True\n \n # loop over years\n for i, yr in enumerate(years):\n \n # instantiate id selector class\n ids_ = IDSelector(main_path = dictpath, \n start_year = 2004, \n end_year = 2016, \n month_only = [month], year_only = [yr], mcs_only = False, \n percent_train = 0.7, ens_num = ens_num)\n \n # grab ids for corresponding month and year and other settings\n IDlist = ids_.generate_IDarray(pre_dict=True, dict_freq='3H', \n start_str=None, end_str=None, dictsave=None)\n \n # instantiate MCS stats class object\n mcsstat = mcs_stats.MCSstats(IDlist=IDlist, mcs_path=mcspath, msk_var=msk_var)\n \n # return masks (and lat lon once)\n if not return_coords:\n \n masks = mcsstat.open_masks(return_coords=return_coords)\n \n if return_coords:\n \n masks, lat, lon = mcsstat.open_masks(return_coords=return_coords)\n \n # go thru mcs stat options\n a.append(mcsstat.nontracked_total_grid(masks))\n b.append(mcsstat.tracked_total_grid(masks))\n c.append(mcsstat.tracked_total(masks))\n d.append(mcsstat.total_with_mcs(masks))\n e.append(mcsstat.percentage_with_mcs(d[i]))\n \n # remove request to return lat lon\n return_coords = False\n \n return a, b, c, d, e, lat, lon\n\n\ndef create_mcs_stat_figure(data, STATES, lat, lon, vmin=0, vmax=10, cmap='BuPu', \n nrows=4, ncols=3, titles=None, suptitle=None, savefig=None):\n \"\"\"\n Function to plot MCS stats on maps.\n \n ** Works with nontracked_total_grid and tracked_total_grid.\n \n Args:\n data (list of 2d numpy array): The files containing stats.\n STATES (cartopy feature): US STATES file.\n lat (1d or 2d numpy array): Latitude.\n lon (1d or 2d numpy array): Longitude.\n vmin (int): Minimum value for plot. Defaults to ``0``.\n vmax (int): Maximum value for plot. Defaults to ``10``.\n cmap (str): Colormap name option from matplotlib. Defaults to ``BuPU``.\n nrows (int): Number of rows for subplots. Defaults to ``4``.\n ncols (int): Number of columns for subplots. Defaults to ``3``.\n titles (float, int, or str): List of titles for plot. Defaults to ``None``.\n suptitle (str): Title for full figure. Defaults to ``None``.\n savefig (str): Directory and name of figure for saving. Defaults to ``None``.\n \n \"\"\"\n if not titles:\n \n titles = np.zeros(nrows*ncols)\n \n # create fig\n fig, axes = plt.subplots(figsize=(7.,6.5), nrows=nrows, ncols=ncols, sharex=True, sharey=True, \n subplot_kw={'projection': ccrs.PlateCarree()})\n \n # create plots\n if suptitle:\n \n fig.suptitle(suptitle, fontsize=12, y=0.95)\n \n for i, (ax, title) in enumerate(zip(axes.flat, titles)):\n \n if title:\n \n ax.set_title(title, fontsize=10)\n \n ax.pcolormesh(lon, lat, data[i], transform=ccrs.PlateCarree(), vmin=vmin, vmax=vmax, cmap=cmap)\n ax.add_feature(STATES, facecolor='none', edgecolor='k', zorder=30)\n ax.add_feature(cf.BORDERS)\n ax.margins(x=0,y=0)\n ax.coastlines()\n \n # cbar\n cbar_ax = fig.add_axes([0.345, 0.1, 0.3, 0.0125])\n bounds = [vmin,vmax*0.5,vmax]\n newnorm=mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n cbar = mpl.colorbar.ColorbarBase(cbar_ax, cmap=plt.cm.get_cmap(cmap),\n norm=newnorm,\n ticks=bounds,\n orientation='horizontal')\n cbar.set_label('Frequency', fontsize=12) \n cbar.ax.tick_params(labelsize=12)\n \n # save fig\n if savefig:\n \n plt.savefig(savefig, bbox_inches='tight', dpi=200)\n \n return plt.show()\n \n if not savefig:\n \n return plt.show()\n\n \ndef main():\n \"\"\"\n Run statistics and generate associated plots for MCSs.\n \"\"\"\n ############## set variables\n dictpath = main_path_era\n mcspath = f'{main_path_era}/dl_files/3H/'\n STATES = create_STATES(us_states_location)\n theyears = [2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015]\n \n ############## APRIL\n a, b, c, d, e, lat, lon = iterate_mcs_stats(4, theyears, dictpath=dictpath, mcspath=mcspath)\n create_mcs_stat_figure(b, STATES, lat, lon, vmin=0, vmax=15, cmap='BuPu', suptitle='April', titles=theyears,\n savefig=f'{savefig_path}/april_mcs_flextrkr2.png')\n print(f\"Percent of month with MCS: {np.array(e)}\")\n plt.bar(theyears, c); plt.title('April'); \n plt.savefig(f'{savefig_path}/april_mcs_bar.png', bbox_inches='tight', dpi=200); plt.show()\n \n ############## MAY\n a, b, c, d, e, lat, lon = iterate_mcs_stats(5, theyears, dictpath=dictpath, mcspath=mcspath)\n create_mcs_stat_figure(b, STATES, lat, lon, vmin=0, vmax=15, cmap='BuPu', suptitle='May', titles=theyears,\n savefig=f'{savefig_path}/may_mcs_flextrkr2.png')\n print(f\"Percent of month with MCS: {np.array(e)}\")\n plt.bar(theyears, c); plt.title('May'); \n plt.savefig(f'{savefig_path}/may_mcs_bar.png', bbox_inches='tight', dpi=200); plt.show()\n \n ############## JUNE\n a, b, c, d, e, lat, lon = iterate_mcs_stats(6, theyears, dictpath=dictpath, mcspath=mcspath)\n create_mcs_stat_figure(b, STATES, lat, lon, vmin=0, vmax=15, cmap='BuPu', suptitle='June', titles=theyears,\n savefig=f'{savefig_path}/june_mcs_flextrkr2.png')\n print(f\"Percent of month with MCS: {np.array(e)}\")\n plt.bar(theyears, c); plt.title('June'); \n plt.savefig(f'{savefig_path}/june_mcs_bar.png', bbox_inches='tight', dpi=200); plt.show()\n \n ############## JULY\n a, b, c, d, e, lat, lon = iterate_mcs_stats(7, theyears, dictpath=dictpath, mcspath=mcspath)\n create_mcs_stat_figure(b, STATES, lat, lon, vmin=0, vmax=15, cmap='BuPu', suptitle='July', titles=theyears,\n savefig=f'{savefig_path}/july_mcs_flextrkr2.png')\n print(f\"Percent of month with MCS: {np.array(e)}\")\n plt.bar(theyears, c); plt.title('July'); \n plt.savefig(f'{savefig_path}/july_mcs_bar.png', bbox_inches='tight', dpi=200); plt.show()\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.cm.get_cmap", "matplotlib.colors.Normalize", "matplotlib.pyplot.savefig", "matplotlib.pyplot.bar", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ] ]
Bennygmate/Deep-Learning
[ "a7236f15829009bcadfdee884c78182ebdfb42f7" ]
[ "deep learning/Mnist/train.py" ]
[ "import tensorflow as tf\nimport hw1 as qfns\nimport time\nfrom datetime import datetime\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n#options: \"onelayer\", \"twolayer\", \"conv\"\n#network = \"none\"\n#network = \"onelayer\"\n#network = \"twolayer\"\nnetwork = \"conv\"\n\n\ndef accuracy(sess, dataset, batch_size, X, Y, accuracy_op):\n # compute number of batches for given batch_size\n num_test_batches = dataset.num_examples // batch_size\n\n overall_accuracy = 0.0\n for i in range(num_test_batches):\n batch = mnist.test.next_batch(batch_size)\n accuracy_batch = \\\n sess.run(accuracy_op, feed_dict={X: batch[0], Y: batch[1]})\n overall_accuracy += accuracy_batch\n\n return overall_accuracy/num_test_batches\n\ndef variable_summaries(var, name):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope(name+'_summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\ndef train(sess, mnist, n_training_epochs, batch_size,\n summaries_op, accuracy_summary_op, train_writer, test_writer,\n X, Y, train_op, loss_op, accuracy_op):\n # compute number of batches for given batch_size\n num_train_batches = mnist.train.num_examples // batch_size\n\n # record starting time\n train_start = time.time()\n\n # Run through the entire dataset n_training_epochs times\n for i in range(n_training_epochs):\n # Initialise statistics\n training_loss = 0\n epoch_start = time.time()\n\n # Run the SGD train op for each minibatch\n for _ in range(num_train_batches):\n batch = mnist.train.next_batch(batch_size)\n trainstep_result, batch_loss, summary = \\\n qfns.train_step(sess, batch, X, Y, train_op, loss_op, summaries_op)\n train_writer.add_summary(summary, i)\n training_loss += batch_loss\n\n # Timing and statistics\n epoch_duration = round(time.time() - epoch_start, 2)\n ave_train_loss = training_loss / num_train_batches\n\n # Get accuracy\n train_accuracy = \\\n accuracy(sess, mnist.train, batch_size, X, Y, accuracy_op)\n test_accuracy = \\\n accuracy(sess, mnist.test, batch_size, X, Y, accuracy_op)\n\n # log accuracy at the current epoch on training and test sets\n train_acc_summary = sess.run(accuracy_summary_op,\n feed_dict={accuracy_placeholder: train_accuracy})\n train_writer.add_summary(train_acc_summary, i)\n test_acc_summary = sess.run(accuracy_summary_op,\n feed_dict={accuracy_placeholder: test_accuracy})\n test_writer.add_summary(test_acc_summary, i)\n [writer.flush() for writer in [train_writer, test_writer]]\n\n train_duration = round(time.time() - train_start, 2)\n # Output to montior training\n print('Epoch {0}, Training Loss: {1}, Test accuracy: {2}, \\\ntime: {3}s, total time: {4}s'.format(i, ave_train_loss,\n test_accuracy, epoch_duration,\n train_duration))\n print('Total training time: {0}s'.format(train_duration))\n print('Confusion Matrix:')\n true_class=tf.argmax(Y, 1)\n predicted_class=tf.argmax(preds_op, 1)\n cm=tf.confusion_matrix(predicted_class,true_class)\n print(sess.run(cm, feed_dict={X: mnist.test.images,\n Y: mnist.test.labels}))\n\ndef get_accuracy_op(preds_op, Y):\n with tf.name_scope('accuracy_ops'):\n correct_preds_op = tf.equal(tf.argmax(preds_op, 1), tf.argmax(Y, 1))\n # the tf.cast sets True to 1.0, and False to 0.0. With N predictions, of\n # which M are correct, the mean will be M/N, i.e. the accuracy\n accuracy_op = tf.reduce_mean(tf.cast(correct_preds_op, tf.float32))\n return accuracy_op\n\n\nif __name__ == \"__main__\":\n # hyperparameters\n learning_rate = 0.001\n batch_size = 128\n n_training_epochs = 20\n\n # load data\n mnist = input_data.read_data_sets('data/mnist', one_hot=True)\n\n # Input (X) and Target (Y) placeholders, they will be fed with a batch of\n # input and target values resepectively, from the training and test sets\n X = qfns.input_placeholder()\n Y = qfns.target_placeholder()\n\n # Create the tensorflow computational graph for our model\n if network == \"onelayer\":\n w, b, logits_op, preds_op, xentropy_op, loss_op = qfns.onelayer(X, Y)\n [variable_summaries(v, name) for (v, name) in zip((w, b), (\"w\", \"b\"))]\n tf.summary.histogram('pre_activations', logits_op)\n elif network == \"twolayer\":\n w1, b1, w2, b2, logits_op, preds_op, xentropy_op, loss_op = \\\n qfns.twolayer(X, Y)\n [variable_summaries(v, name) for (v, name) in\n zip((w1, b1, w2, b2), (\"w1\", \"b1\", \"w2\", \"b2\"))]\n tf.summary.histogram('pre_activations', logits_op)\n elif network == \"conv\":\n # standard conv layers\n conv1out, conv2out, w, b, logits_op, preds_op, xentropy_op, loss_op = \\\n qfns.convnet(tf.reshape(X, [-1, 28, 28, 1]), Y)\n [variable_summaries(v, name) for (v, name) in ((w,\"w\"), (b,\"b\"))]\n tf.summary.histogram('pre_activations', logits_op)\n #elif network == \"rollyourown\":\n ## You can define your own conv net here and play around with it\n else:\n raise ValueError(\"Incorrect network string in line 7\")\n\n # The training op performs a step of stochastic gradient descent on a minibatch\n # optimizer = tf.train.GradientDescentOptimizer # vanilla SGD\n # optimizer = tf.train.MomentumOptimizer # SGD with momentum\n optimizer = tf.train.AdamOptimizer # ADAM - widely used optimiser (ref: http://arxiv.org/abs/1412.6980)\n train_op = optimizer(learning_rate).minimize(loss_op)\n\n # Prediction and accuracy ops\n accuracy_op = get_accuracy_op(preds_op, Y)\n\n # TensorBoard for visualisation\n # Merge all the summaries and write them out to /tmp/mnist_logs (by default)\n summaries_op = tf.summary.merge_all()\n\n # Separate accuracy summary so we can use train and test sets\n accuracy_placeholder = tf.placeholder(shape=[], dtype=tf.float32)\n accuracy_summary_op = tf.summary.scalar(\"accuracy\", accuracy_placeholder)\n\n # When run, the init_op initialises any tensorflow variables\n # hint: weights and biases in our case\n init_op = tf.global_variables_initializer()\n\n # Get started\n sess = tf.Session()\n sess.run(init_op)\n\n # Initialise TensorBoard Summary writers\n dtstr = \"{:%b_%d_%H-%M-%S}\".format(datetime.now())\n train_writer = tf.summary.FileWriter('./summaries/'+dtstr+'/train', sess.graph)\n test_writer = tf.summary.FileWriter('./summaries/'+dtstr+'/test')\n\n # Train\n print('Starting Training...')\n train(sess, mnist, n_training_epochs, batch_size,\n summaries_op, accuracy_summary_op, train_writer, test_writer,\n X, Y, train_op, loss_op, accuracy_op)\n print('Training Complete')\n\n # Clean up\n sess.close()\n" ]
[ [ "tensorflow.confusion_matrix", "tensorflow.reduce_max", "tensorflow.summary.FileWriter", "tensorflow.reduce_mean", "tensorflow.cast", "tensorflow.reshape", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.reduce_min", "tensorflow.summary.merge_all", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.square", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "tensorflow.summary.scalar", "tensorflow.summary.histogram" ] ]
jbr-ai-labs/NeurIPS2020-Flatland-Competition-Solution
[ "bd3c169ffa39063d6bb2b170bca93fa785b7bf71" ]
[ "networks/DQN.py" ]
[ "import torch\nimport torch.nn as nn\n\n\nclass DQNNet(nn.Module):\n def __init__(self, state_sz, action_sz, layers_sz, device):\n super(DQNNet, self).__init__()\n self.layers_sz = layers_sz\n self.device = device\n \n layers = self.create_linear_layers(state_sz, action_sz, layers_sz)\n\n self.seq = nn.Sequential(*layers)\n\n def create_linear_layers(self, state_sz, action_sz, layers_sz):\n layers = list()\n in_sz = state_sz\n for sz in layers_sz:\n layers += [nn.Linear(in_sz, sz), nn.ReLU(inplace=True)]\n in_sz = sz\n layers.append(nn.Linear(in_sz, action_sz))\n return layers\n\n def forward(self, x):\n return self.seq(x)\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
harmsm/gpvolve
[ "94dd71e5fbee29cc50b82d282ef33c850fb33575", "94dd71e5fbee29cc50b82d282ef33c850fb33575" ]
[ "gpvolve/cluster/opt.py", "gpvolve/simulate/wright_fisher/engine/wright_fisher_engine_python.py" ]
[ "from gpvolve.cluster.criteria import *\nimport numpy as np\n\n\ndef optimize(T, criterion=\"Spectral gap\", z=None):\n \"\"\"\n Finding optimal number of clusters to use based on\n given criterion.\n\n Since the number of clusters nc is unknown in advance,\n the cluster algorithm is ran several times with different\n input parameters for nc, and then \"best\" solution is\n chosen based on chosen criteria (options are spectral\n gap, in\n\n Parameters\n ----------\n T : probability transition matrix calculated based on how many\n times a starting node i ended in j.\n criterion : criterion used to determine if clustering is optimal\n By default criterion is spectral gap.\n Options: spectral gap, optimality, and minchi\n z : maximum number of clusters to try. By default it's calculated\n as half of the size of transition matrix (i.e. half of the\n genotypes).\n\n Returns\n -------\n nc : optimal number of clusters according to given criteria\n \"\"\"\n if z:\n pass\n else:\n z = int(np.shape(T)[0]/2)\n\n # Check criterion is a string of text\n assert isinstance(criterion,str)\n\n # Determine optimal number of clusters according to given criteria\n # Check if criterion chosen is spectral gap\n if criterion == \"Spectral gap\" or criterion == \"spectral gap\" or criterion == \"Spectral Gap\":\n nc = spectral_gap(T,z)\n # Check if criterion chosen is optimality\n elif criterion == \"Optimality\" or criterion == \"optimality\":\n nc = optimality(T,z)\n # Default to spectral gap is all else fails, but alert user\n else:\n print(\"Chosen criterion was not found, using spectral gap instead.\")\n nc = spectral_gap(T,z)\n\n return nc\n", "__description__ = \\\n\"\"\"\nPython implementation of Wright Fisher simulation.\n\"\"\"\n__author__ = \"Michael J. Harms\"\n__date__ = \"2021-09-15\"\n\nimport numpy as np\n\ndef wf_engine_python(pops,\n mutation_rate,\n fitness,\n neighbor_slicer,\n neighbors):\n \"\"\"\n A python implementation of the Wright Fisher engine.\n\n This function should not be called directly. Instead, use wf_engine\n wrapper. Wrapper has argument docs and does argument sanity checking.\n \"\"\"\n\n # If zero population, don't bother with simulation\n if np.sum(pops[0,:]) == 0:\n return pops\n\n # Get number of genoptypes, population size, and expected number of mutations\n # each generation\n num_genotypes = len(fitness)\n population_size = sum(pops[0,:])\n expected_num_mutations = mutation_rate*population_size\n num_generations = len(pops)\n\n indexes = np.arange(num_genotypes,dtype=int)\n for i in range(1,num_generations):\n\n # Look at non-zero genotypes\n mask = indexes[pops[i-1,:] != 0]\n local_fitness = fitness[mask]\n local_pop = pops[i-1,mask]\n\n # If all fitness are 0 for the populated genotypes, probability of\n # reproducing depends only on how often each genotype occurs.\n if np.sum(local_fitness) == 0:\n prob = local_pop\n\n # In most cases, reproduction probability is given by how many of each\n # genotype times its fitness\n else:\n prob = local_pop*local_fitness\n\n # Normalize prob\n prob = prob/np.sum(prob)\n\n # New population selected based on relative fitness\n new_pop = np.random.choice(mask,size=population_size,p=prob,replace=True)\n\n # Introduce mutations\n num_to_mutate = np.random.poisson(expected_num_mutations)\n\n # If we have a ridiculously high mutation rate, do not mutate each\n # genotype more than once.\n if num_to_mutate > population_size:\n num_to_mutate = population_size\n\n for j in range(num_to_mutate):\n k = new_pop[j]\n\n # If neighbor_slicer[k,0] == -1, this genotype *has* no neighbors.\n # Mutation should lead to self.\n if neighbor_slicer[k,0] != -1:\n a = neighbors[neighbor_slicer[k,0]:neighbor_slicer[k,1]]\n new_pop[j] = np.random.choice(a,size=1)[0]\n\n # Count how often each genotype occurs and store in pops array\n idx, counts = np.unique(new_pop,return_counts=True)\n pops[i,idx] = counts\n\n return pops\n" ]
[ [ "numpy.shape" ], [ "numpy.unique", "numpy.random.choice", "numpy.arange", "numpy.random.poisson", "numpy.sum" ] ]
rknaebel/textgame-project
[ "53b035639ce4502fcaaec12f250db4fa11503963" ]
[ "train2.py" ]
[ "#!/usr/bin/python\n#\n# author: rknaebel\n#\n# description:\n#\nimport numpy as np\nimport random\n\n# GYM: Environment lib\nimport gym\nimport gym_textgame\n\nfrom models import HistoryQLearner\nfrom replay_buffer import PrioritizedReplayBuffer\nfrom preprocess import sent2seq, initHist, addHistoryState\n\nfrom arguments import getArguments\nfrom utils import initDB, sendDocDB, sendModelDB\n\nif __name__ == \"__main__\":\n args = getArguments()\n\n es = initDB()\n\n # layer sizes\n epsilon = args.epsilon_start\n epsilon_step = (args.epsilon_start-args.epsilon_end)/args.epsilon_anneal_steps\n\n env = gym.make(args.env)\n env_eval = gym.make(args.env)\n num_actions = env.num_actions\n num_objects = env.num_objects\n vocab_size = env.vocab_space\n seq_len = env.seq_length\n hist_size = args.history_size\n\n model = HistoryQLearner(seq_len,vocab_size,args.embd_size,hist_size,\n args.hidden1,args.hidden2,\n num_actions,num_objects,\n args.alpha,args.gamma,args.exp_id)\n # Initialize replay memory\n replay_buffer = PrioritizedReplayBuffer(args.buffer_size, args.random_seed)\n\n sendModelDB(es,args,args.exp_id)\n #sendWeigthsDB(es,model)\n step_ctr = 0\n for epoch in range(args.max_epochs):\n scores = []\n ep_lens = []\n invalids = []\n quests_complete = []\n deaths = []\n #\n # TRAIN Phase\n #\n for episode in range(args.episodes_per_epoch):\n loss = 0.\n plan = []\n cnt_invalid_actions = 0\n ep_reward = 0.\n # get initial input\n init_s_text = env.reset()\n s = sent2seq(init_s_text, seq_len)\n h = initHist(s,hist_size,False)\n #\n for j in xrange(args.max_ep_steps):\n step_ctr += 1\n # show textual input if so\n #if args.render: env.render()\n # choose action\n if np.random.rand() <= epsilon:\n a = model.randomAction()\n else:\n a = model.predictAction(h)\n plan.append(env.get_action(a))\n # anneal epsilon\n epsilon = max(0.2, epsilon-epsilon_step)\n # apply action, get rewards and new state s2\n s2_text, r, terminal, info = env.step(a)\n s2 = sent2seq(s2_text, seq_len)\n h2 = addHistoryState(h,s2)\n # add current exp to buffer\n replay_buffer.add(h, a, r, terminal, h2)\n # Keep adding experience to the memory until\n # there are at least minibatch size samples\n if ((replay_buffer.size() > args.batch_size) and\n (step_ctr % args.rounds_per_learn == 0)):\n h_batch, a_batch, r_batch, t_batch, h2_batch = \\\n replay_buffer.sample_batch(args.batch_size)\n # Update the networks each given the new target values\n l = model.trainOnBatch(h_batch, a_batch, r_batch, t_batch, h2_batch)\n loss += l\n step_ctr = 0\n\n s = s2\n h = h2\n ep_reward += r\n cnt_invalid_actions += 1 if r == -0.1 else 0\n\n if terminal: break\n\n ep_lens.append(j+1)\n invalids.append(cnt_invalid_actions)\n quests_complete.append(int(terminal and r >= 1))\n deaths.append(int(terminal and r <= -1))\n scores.append(ep_reward)\n\n sendDocDB(es, { \"epoch\" : epoch+1, \"episode\" : episode+1,\n \"length\" : ep_lens[-1], \"invalids\" : invalids[-1],\n \"epsilon\" : epsilon, \"reward\" : scores[-1],\n \"quest_complete\" : quests_complete[-1],\n \"death\" : deaths[-1], \"mode\" : \"train\",\n \"init_state\" : init_s_text, \"plan\" : plan}, args.exp_id)\n print(\"> Training {:03d} | len {: 4.2f} | inval {: 4.2f} | quests {:02.2f} | deaths {:.2f} | r {: .2f} \".format(\n epoch+1, np.mean(ep_lens),\n np.mean(invalids),\n np.mean(quests_complete),\n np.mean(deaths),\n np.mean(scores)))\n\n #\n # EVAL Phase\n #\n scores = []\n ep_lens = []\n invalids = []\n quests_complete = []\n for episode in range(args.episodes_per_epoch):\n plan = []\n ep_reward = 0.\n cnt_invalid_actions = 0\n # get initial input\n seed = random.random()\n env_eval.seed(seed)\n init_s_text = env_eval.reset()\n s = sent2seq(init_s_text, seq_len)\n h = initHist(s,hist_size,False)\n #\n for j in xrange(args.max_ep_steps):\n # show textual input if so\n if args.render: env_eval.render()\n # choose action\n if np.random.rand() <= 0.05:\n a = model.randomAction()\n else:\n a = model.predictAction(h)\n plan.append(env_eval.get_action(a))\n # apply action, get rewards and new state s2\n s2_text, r, terminal, info = env_eval.step(a)\n s2 = sent2seq(s2_text, seq_len)\n h2 = addHistoryState(h,s2)\n\n s = s2\n h = h2\n ep_reward += r\n cnt_invalid_actions += 1 if r == -0.1 else 0\n\n if terminal: break\n\n\n ep_lens.append(j+1)\n invalids.append(cnt_invalid_actions)\n quests_complete.append(int(terminal and r >= 1))\n deaths.append(int(terminal and r <= -1))\n scores.append(ep_reward)\n sendDocDB(es, { \"epoch\" : epoch+1, \"episode\" : episode+1,\n \"length\" : ep_lens[-1], \"invalids\" : invalids[-1],\n \"epsilon\" : 0.05, \"reward\" : scores[-1],\n \"quest_complete\" : quests_complete[-1],\n \"death\" : deaths[-1], \"mode\" : \"eval\",\n \"init_state\" : init_s_text, \"plan\" : plan}, args.exp_id)\n print(\"> Evaluation {:03d} | len {:4.2f} | inval {:4.2f} | quests {:.2f} | deaths {:.2f} | r {: .2f} \".format(\n epoch+1, np.mean(ep_lens),\n np.mean(invalids),\n np.mean(quests_complete),\n np.mean(deaths),\n np.mean(scores)))\n # Save trained model weights and architecture, this will be used by the visualization code\n model.save(\"model.h5\", overwrite=True)\n #with open(\"model.json\", \"w\") as outfile:\n # json.dump(model.to_json(), outfile)\n" ]
[ [ "numpy.mean", "numpy.random.rand" ] ]
trislaz/useful_wsi
[ "8bad92832545eaa85cfc3df9100f2325a71c710b" ]
[ "useful_wsi/patch_sampling.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCode for sampling from WSI\n\n\"\"\"\n\nimport itertools\nimport numpy as np\n\nfrom .tissue_segmentation import roi_binary_mask\nfrom .utils import (find_square, get_size, get_whole_image, pj_slice,\n get_x_y, get_x_y_from_0, mask_percentage, open_image)\n\n\ndef sample_patch_from_wsi(slide, mask=None, mask_level=None, \n patch_size=(512, 512), analyse_level=0,\n list_func=[]):\n \"\"\"\n Samples one tile from a slide where mask is 1\n\n Args:\n slide : String or open_slide object. The slide from which you wish to sample.\n mask : None by default or binary numpy array, where positive pixels correspond to tissue area and\n negative pixels to background areas in the slide. \n mask_level : Integer or None. Level to which apply mask_function to the rgb \n image of the slide at that resolution. mask_function(slide[mask_level])\n will return the binary image corresponding to the tissue.\n patch_size : Tuple of integers or None. If none the default tile size will (512, 512).\n analyse_level : Integer. Level resolution to use for extracting the tiles.\n list_func : None or list of functions to apply to the tiles. Useful to filter the tiles\n that are part of the tissue mask. Very useful if the tissue mask is bad and \n samples many white background tiles, in this case it is interesting to add \n a function to eliminate tiles that are too white, like the function white_percentage.\n Returns:\n A list of 5 parameters corresponding to: [x, y, size_x_level, size_y_level, level]\n \"\"\" \n slide = open_image(slide)\n if mask_level is None:\n mask_level = slide.level_count - 1\n slide_png = get_whole_image(slide, mask_level, numpy=True)\n size_l = get_size(slide, patch_size, analyse_level, mask_level)\n size_l = np.array(size_l)\n if mask is None:\n mask = np.ones_like(slide_png)[:, :, 0]\n x_mask, y_mask = np.where(mask)\n indice = np.random.randint(len(x_mask))\n point = np.array([x_mask[indice], y_mask[indice]])\n sub_img = pj_slice(slide_png, point - size_l // 2, point + size_l // 2)\n criterias = []\n for function in list_func:\n criterias.append(function(sub_img))\n if all(criterias):\n para = find_square(slide, point, mask_level, analyse_level, patch_size)\n else:\n para = None\n return para\n\n\ndef remove_sample_from_mask(slide, para, mask, mask_level):\n \"\"\"\n Given a square patch and a mask, removes the patch from mask.\n So that it can't be choosen again for instance...\n Args:\n slide : String or open_slide object. The slide from which you wish to sample.\n para : A list of 5 integer parameters corresponding to: [x, y, size_x_level, size_y_level, level]\n mask : None by default or binary numpy array, where positive pixels correspond to tissue area and\n negative pixels to background areas in the slide. \n mask_level : Integer or None. Level to which apply mask_function to the rgb \n image of the slide at that resolution. mask_function(slide[mask_level])\n will return the binary image corresponding to the tissue. Returns: \n An updated version of mask where the tile given by para and mask_level\n is removed.\n \"\"\"\n if para is not None:\n point_0 = (para[1], para[0])\n size_l = (para[2], para[3])\n analyse_level = para[4]\n point_mask_res = np.array(get_x_y_from_0(slide, point_0, mask_level))\n size_mask_res = np.array(get_size(slide, size_l, analyse_level, mask_level))\n start_point = np.array([point_mask_res - size_mask_res // 2, (0, 0)]).max(axis=0)\n end_point = start_point + size_mask_res\n mask[start_point[0]:end_point[0], start_point[1]:end_point[1]] = 0\n\n return mask\n\n\ndef random_wsi_sampling(n_samples, slide, mask=None,\n mask_level=None, patch_size=(512, 512),\n analyse_level=0, with_replacement=False,\n list_func=[]):\n \"\"\"\n Randomly generate patches from slide.\n\n Args:\n n_samples : Integer, number of tiles to extract from the slide with the \n sampling method \"random_sampling\".\n slide : String or open_slide object. The slide from which you wish to sample.\n mask : None by default or binary numpy array, where positive pixels correspond to tissue area and\n negative pixels to background areas in the slide. \n mask_level : Integer or None. Level to which apply mask_function to the rgb \n image of the slide at that resolution. mask_function(slide[mask_level])\n will return the binary image corresponding to the tissue.\n patch_size : Tuple of integers or None. If none the default tile size will (512, 512).\n analyse_level : Integer. Level resolution to use for extracting the tiles.\n with_replacement : Bool, default to False. Wether or not you can sample with replacement. \n Here, if True, we would remove the previous patches from the original \n mask at each iteration.\n list_func : None or list of functions to apply to the tiles. Useful to filter the tiles\n that are part of the tissue mask. Very useful if the tissue mask is bad and \n samples many white background tiles, in this case it is interesting to add \n a function to eliminate tiles that are too white, like the function white_percentage.\n\n Returns:\n A list of 5 parameters corresponding to: [x, y, size_x_level, size_y_level, level]\n \"\"\" \n list_para = []\n if mask_level is None:\n mask_level = slide.level_count - 1\n slide_png = get_whole_image(slide, mask_level, numpy=True)\n if mask is None:\n mask = np.ones_like(slide_png)[:, :, 0]\n mask = mask.astype('bool')\n\n for _ in range(n_samples):\n para = sample_patch_from_wsi(slide, mask, mask_level, patch_size,\n analyse_level, list_func)\n if para is not None:\n list_para.append(para)\n if not with_replacement:\n mask = remove_sample_from_mask(slide, para, mask, mask_level)\n if mask.sum() == 0:\n break\n return list_para\n\n\ndef grid_blob(slide, point_start, point_end, patch_size,\n analyse_level):\n \"\"\"\n Forms a uniform grid starting from the top left point point_start\n and finishes at point point_end of size patch_size at level analyse_level\n for the given slide.\n Args:\n slide : String or open_slide object. \n point_start : Tuple like object of integers of size 2.\n point_end : Tuple like object of integers of size 2.\n patch_size : Tuple like object of integers of size 2.\n analse_level : Integer. Level resolution to use for extracting the tiles.\n Returns:\n List of coordinates of grid.\n \"\"\"\n if analyse_level == 0:\n patch_size_0 = patch_size\n else:\n patch_size_0 = get_size(slide, patch_size, analyse_level, 0)\n size_x, size_y = patch_size_0\n list_x = range(point_start[0], point_end[0], size_x)\n list_y = range(point_start[1], point_end[1], size_y)\n return list(itertools.product(list_x, list_y))\n\n\ndef correct_patch(coord, slide, analyse_level, patch_size):\n \"\"\"\n Correct patch by shifting so that the whole square patch can fit.\n Args:\n coord : A tuple like object of size 2 with integers.\n slide : String or open_slide object.\n analyse_level : Integer. Level resolution to use for extracting the tiles.\n patch_size : Tuple of integers or None.\n Returns:\n A numpy array of size 2 corresponding to the corrected or not\n orignal coord.\n \"\"\"\n max_dimensions = np.array(slide.dimensions)[::-1]\n\n if analyse_level != 0:\n patch_size_0 = get_size(slide, patch_size, analyse_level, 0)\n else:\n patch_size_0 = patch_size\n coord = np.array([coord, max_dimensions - 1 - patch_size_0]).min(axis=0)\n return coord\n\ndef check_patch(slide, mask, coord_grid_0, mask_level, \n patch_size, analyse_level,\n list_func, mask_tolerance=0.5,\n allow_overlapping=False,\n margin=0):\n \"\"\"\n Filters a list of possible coordinates with a set of filtering parameters.\n\n Args:\n slide : String or open_slide object. The slide from which you wish to sample.\n mask : Binary numpy array, where positive pixels correspond to tissue area and\n negative pixels to background areas in the slide.\n coord_grid_0 : List of list of two elements where each (sub) list can be described as \n possible coordinates for a possible tile at analyse_level.\n mask_level : Integer or None. Level to which apply mask_function to the rgb \n image of the slide at that resolution. mask_function(slide[mask_level])\n will return the binary image corresponding to the tissue.\n patch_size : Tuple of integers or None. If none the default tile size will (512 + margin, 512 + margin).\n analyse_level : Integer. Level resolution to use for extracting the tiles.\n list_func : None or list of functions to apply to the tiles. Useful to filter the tiles\n that are part of the tissue mask. Very useful if the tissue mask is bad and \n samples many white background tiles, in this case it is interesting to add \n a function to eliminate tiles that are too white, like the function white_percentage.\n mask_tolerance : Float between 0 and 1. A tile will be accepted if pixel_in_mask / total_pixel > value.\n So if mask_tolerance = 1, only tiles that are completly within the mask are accepted.\n allow_overlapping : Bool. False by default, this parameter does not influence the 'overlapping' parameter\n above. This parameter only influences the tiles on that reach the border. In particular\n if the tile extends out of the boundaries of the slide, in this case if allow_overlapping\n is set to true, it will correct the extracted tile by allowing it to overlappe with it's\n neighbouring tile.\n margin : Integer. By default set to 0, number of pixels at resolution 0 to add\n to patch_size on each side. (different to overlapping as this is at resolution 0)\n Returns:\n List of parameters where each parameter is a list of 5 elements\n [x, y, size_x_level, size_y_level, level]\n \"\"\"\n slide_png = get_whole_image(slide, level=mask_level, numpy=True)\n assert slide_png.shape[0:2] == mask.shape[0:2], \"Raise value, mask not of the right shape {}\".format(mask.shape[0:2])\n shape_mask = np.array(mask.shape[0:2])\n parameters = []\n patch_size_l = get_size(slide, patch_size, analyse_level, mask_level)\n radius = np.array([max(el // 2, 1) for el in patch_size_l])\n for coord_0 in coord_grid_0:\n coord_l = get_x_y_from_0(slide, coord_0, mask_level)\n # coord_0 = np.array(coord_0)[::-1]\n # coord_l = np.array(coord_l)[::-1]\n point_cent_l = [coord_l + radius, shape_mask - 1 - radius]\n point_cent_l = np.array(point_cent_l).min(axis=0)\n if mask_percentage(mask, point_cent_l, radius, mask_tolerance): ## only checking center point\n criterias = []\n sub_img = pj_slice(slide_png, point_cent_l - radius, point_cent_l + radius)\n for function in list_func:\n criterias.append(function(sub_img))\n if all(criterias):\n still_add = True\n if ((coord_l + radius) != point_cent_l).any():\n # If the patch is going off the border\n still_add = False\n if allow_overlapping:\n coord_0 = correct_patch(coord_0, slide, analyse_level, patch_size)\n still_add = True\n if still_add:\n sub_param = [coord_0[1] - margin, coord_0[0] - margin, \\\n patch_size[0] + 2 * margin, patch_size[1] + 2 * margin, \\\n analyse_level]\n parameters.append(sub_param)\n return parameters\n\n\ndef patch_sampling(slide, seed=None, mask_level=None,\n mask_function=roi_binary_mask, sampling_method=None,\n analyse_level=0, patch_size=None, overlapping=0,\n list_func=None, mask_tolerance=0.5, allow_overlapping=False,\n n_samples=10, with_replacement=False):\n \"\"\"\n Returns a list of tiles from slide given a mask generating method\n and a sampling method\n Args:\n slide : String or open_slide object. The slide from which you wish to sample.\n seed : Integer or None. Seed value to use for setting numpy randomness.\n mask_level : Integer or None. Level to which apply mask_function to the rgb \n image of the slide at that resolution. mask_function(slide[mask_level])\n will return the binary image corresponding to the tissue.\n mask_function : Function that returns a binary image of same size as input. \n Mask_function is applied in order to determine the tissue areas on \n the slide.\n sampling_method : String. Possible values are 'grid', 'random_patches' for the \n patch sampling method.\n If grid, we apply a grid on the tissue and extra all the tiles that\n overlap on the tissue mask.\n If random_patches, the tiles will be sampled at random from the tissue \n mask until no more available tissue or that n_samples has been reached.\n analyse_level : Integer. Level resolution to use for extracting the tiles.\n patch_size : Tuple of integers or None. If none the default tile size will (512, 512).\n overlapping : Integer. By default set to 0, number of pixels at analyse level to add\n to patch_size on each side.\n list_func : None or list of functions to apply to the tiles. Useful to filter the tiles\n that are part of the tissue mask. Very useful if the tissue mask is bad and \n samples many white background tiles, in this case it is interesting to add \n a function to eliminate tiles that are too white, like the function white_percentage.\n mask_tolerance : Float between 0 and 1. A tile will be accepted if pixel_in_mask / total_pixel > value.\n So if mask_tolerance = 1, only tiles that are completly within the mask are accepted.\n allow_overlapping : Bool. False by default, this parameter does not influence the 'overlapping' parameter\n above. This parameter only influences the tiles on that reach the border. In particular\n if the tile extends out of the boundaries of the slide, in this case if allow_overlapping\n is set to true, it will correct the extracted tile by allowing it to overlappe with it's\n neighbouring tile. Only taken into account for the method \"grid\".\n n_samples : Integer, default to 10, number of tiles to extract from the slide with the \n sampling method \"random_sampling\".\n with_replacement : Bool, default to False. Wether or not you can sample with replacement in the case\n of random sampling.\n\n Returns:\n List of parameters where each parameter is a list of 5 elements\n [x, y, size_x_level, size_y_level, level]\n \"\"\"\n np.random.seed(seed)\n slide = open_image(slide)\n\n if patch_size is None:\n patch_size = (512, 512)\n if list_func is None:\n list_func = list()\n if mask_level is None:\n mask_level = slide.level_count - 1\n\n wsi_tissue = get_whole_image(slide, level=mask_level, numpy=True)\n wsi_mask = mask_function(wsi_tissue)\n\n if sampling_method == 'grid': # grid is just grid_etienne with marge = 0\n min_row, min_col, max_row, max_col = 0, 0, *wsi_mask.shape\n point_start_l = min_row, min_col\n point_end_l = max_row, max_col\n point_start_0 = get_x_y(slide, point_start_l, mask_level)\n point_end_0 = get_x_y(slide, point_end_l, mask_level)\n grid_coord = grid_blob(slide, point_start_0, point_end_0, patch_size,\n analyse_level)\n\n margin_mask_level = get_size(slide, (overlapping, 0),\n 0, analyse_level)[0]\n parameter = check_patch(slide, wsi_mask, grid_coord,\n mask_level, patch_size, analyse_level,\n list_func, mask_tolerance=mask_tolerance,\n allow_overlapping=allow_overlapping,\n margin=margin_mask_level)\n return_list = parameter\n elif sampling_method == \"random_patches\":\n return_list = random_wsi_sampling(n_samples, slide, wsi_mask,\n mask_level, patch_size, analyse_level,\n with_replacement=with_replacement,\n list_func=list_func)\n elif sampling_method == \"random_patches_with_border\":\n raise NameError('sampling method random_patches_with_border is not yet implemented...')\n else:\n raise NameError('sampling method is unknown...')\n return return_list\n" ]
[ [ "numpy.ones_like", "numpy.array", "numpy.where", "numpy.random.seed" ] ]
pashu123/torch-mlir
[ "7c3ba25238ac73850fcdd698be1fb084f8a58e49" ]
[ "e2e_testing/torchscript/elementwise.py" ]
[ "# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.\n# See https://llvm.org/LICENSE.txt for license information.\n# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n# Also available under a BSD-style license. See LICENSE.\n\nimport torch\n\nfrom torch_mlir_e2e_test.torchscript.framework import TestUtils\nfrom torch_mlir_e2e_test.torchscript.registry import register_test_case\nfrom torch_mlir_e2e_test.torchscript.annotations import annotate_args, export\n\n# TODO: Support scalar !torch.int/!torch.float variants. Add support to\n# ReduceOpVariants to implement them in terms of the tensor-only variants +\n# torch.prim.NumToTensor.\n\n# TODO: This is pretty verbose. Can we have a helper to reduce\n# the boilerplate?\n\n# ==============================================================================\n\nclass ElementwiseUnaryModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.tanh(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseUnaryModule())\ndef ElementwiseUnaryModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseBinaryModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ([-1], torch.float32, True),\n ])\n def forward(self, a, b):\n return a * b\n\n\n@register_test_case(module_factory=lambda: ElementwiseBinaryModule())\ndef ElementwiseBinaryModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4), tu.rand(4))\n\n# ==============================================================================\n\nclass ElementwiseBinaryStaticShapeModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([5, 4, 3, 3, 1], torch.float32, True),\n ([4, 3, 1, 2], torch.float32, True),\n ])\n def forward(self, a, b):\n return a * b\n\n\n@register_test_case(\n module_factory=lambda: ElementwiseBinaryStaticShapeModule())\ndef ElementwiseBinaryStaticShapeModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(5, 4, 3, 3, 1), tu.rand(4, 3, 1, 2))\n\n# ==============================================================================\n\nclass ElementwiseTernaryModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ([-1], torch.float32, True),\n ])\n def forward(self, a, b, c):\n return torch.lerp(a, b, c)\n\n\n@register_test_case(module_factory=lambda: ElementwiseTernaryModule())\ndef ElementwiseTernaryModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5), tu.rand(4, 5), tu.rand(5))\n\n# ==============================================================================\n\nclass ElementwiseWhereSelfModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ([-1], torch.float32, True),\n ])\n def forward(self, a, b, c):\n return torch.where(a > 0.5, b, c)\n\n\n@register_test_case(module_factory=lambda: ElementwiseWhereSelfModule())\ndef ElementwiseWhereSelfModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5), tu.rand(4, 5), tu.rand(5))\n\n# ==============================================================================\n\n# Addition is an interesting special case of a binary op, because under the hood\n# it carries a third scalar \"alpha\" parameter, which needs special handling.\nclass ElementwiseAddModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([], torch.float32, True),\n ])\n def forward(self, a, b):\n return a + b\n\n\n@register_test_case(module_factory=lambda: ElementwiseAddModule())\ndef ElementwiseAddModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4), tu.rand())\n\n# ==============================================================================\n\nclass ElementwiseUnsqueezeBroadcastModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([], torch.float32, True),\n ])\n def forward(self, a, b):\n return a * b.unsqueeze(0)\n\n\n@register_test_case(\n module_factory=lambda: ElementwiseUnsqueezeBroadcastModule())\ndef ElementwiseUnsqueezeBroadcastModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4), tu.rand())\n\n# ==============================================================================\n\nclass ElementwiseUnsqueezeNegDimsModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n # As mentioned in `unsqueeze` docstring,\n # valid dim values are [-input.dim()-1, input.dim()+1).\n # This tests the lower bound\n return torch.unsqueeze(a, -3)\n\n\n@register_test_case(module_factory=lambda: ElementwiseUnsqueezeNegDimsModule())\ndef ElementwiseUnsqueezeNegDimsModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 3))\n\n# ==============================================================================\n\nclass ElementwiseFlattenBroadcastModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([], torch.float32, True),\n ])\n def forward(self, a, b):\n return a * b.flatten(-1, -1)\n\n\n@register_test_case(module_factory=lambda: ElementwiseFlattenBroadcastModule())\ndef ElementwiseFlattenBroadcastModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(6), tu.rand())\n\n# ==============================================================================\n\nclass ElementwiseReluModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.relu(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseReluModule())\ndef ElementwiseReluModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 2) - 0.5)\n\n# ==============================================================================\n\nclass ElementwiseLeakyReluModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.ops.aten.leaky_relu(x, negative_slope=0.1)\n\n\n@register_test_case(module_factory=lambda: ElementwiseLeakyReluModule())\ndef ElementwiseLeakyReluModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4, 2) - 0.5)\n\n# ==============================================================================\n\nclass ElementwiseGeluModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.gelu = torch.nn.GELU()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return self.gelu(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseGeluModule())\ndef ElementwiseGeluModule_basic(module, tu: TestUtils):\n module.forward(2 * tu.rand(5, 3) - 0.5)\n\n# ==============================================================================\n\nclass ElementwiseSigmoidModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.sigmoid(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSigmoidModule())\ndef ElementwiseSigmoidModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseMinimumModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.minimum(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMinimumModule())\ndef ElementwiseMinimumModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5), tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseMinimumIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int64, True),\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.minimum(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMinimumIntModule())\ndef ElementwiseMinimumIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 5)), torch.randint(10, (3, 5)))\n\n# ==============================================================================\n\nclass ElementwiseMaximumModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.maximum(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMaximumModule())\ndef ElementwiseMaximumModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5), tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseMaximumIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int64, True),\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.ops.aten.maximum(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMaximumIntModule())\ndef ElementwiseMaximumIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 5)), torch.randint(10, (3, 5)))\n\n# ==============================================================================\n\nclass ElementwiseClampModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n # TODO: It would be great to return all of these, so they get checked\n # individually, but RefBackend doesn't support multiple returns.\n # Instead, multiply them together, which has some chance of propagating\n # all the values.\n float_min = torch.clamp(x, min=-2.0)\n int_min = torch.clamp(x, min=-3)\n float_max = torch.clamp(x, max=2.0)\n int_max = torch.clamp(x, max=3)\n both = torch.clamp(x, min=-5, max=5)\n return float_min * int_min * float_max * int_max * both\n\n\n@register_test_case(module_factory=lambda: ElementwiseClampModule())\ndef ElementwiseClampModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5, low=-10, high=10))\n\n# ==============================================================================\n\nclass RsubModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.rsub(x, 3.0, alpha=1.0)\n\n\n@register_test_case(module_factory=lambda: RsubModule())\ndef RsubModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass RsubModule_noalpha(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.rsub(x, 2.0)\n\n\n@register_test_case(module_factory=lambda: RsubModule_noalpha())\ndef RsubModule_noalpha_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseMulScalarIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x):\n return torch.mul(x, 4)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulScalarIntModule())\ndef ElementwiseMulScalarModule_int(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 4)))\n\n# ==============================================================================\n\nclass ElementwiseMulScalarFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.mul(x, 100.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulScalarFloatModule())\ndef ElementwiseMulScalarModule_float(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseMulScalarModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, x):\n return torch.mul(x, 8.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulScalarModule())\ndef ElementwiseMulScalarModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseMulTensorFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([-1], torch.float64, True),\n ])\n def forward(self, a, b):\n return torch.mul(a, b)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulTensorFloatModule())\ndef ElementwiseMulTensorFloatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4), tu.rand(4).type(torch.float64))\n\n# ==============================================================================\n\nclass ElementwiseMulTensorIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.int32, True),\n ([-1], torch.int64, True),\n ])\n def forward(self, a, b):\n return torch.mul(a, b)\n\n\n@register_test_case(module_factory=lambda: ElementwiseMulTensorIntModule())\ndef ElementwiseMulTensorIntModule_basic(module, tu: TestUtils):\n module.forward(\n torch.randint(10, [4]).type(torch.int32), torch.randint(10, [4]))\n\n# ==============================================================================\n\nclass ElementwiseLogModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.log(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseLogModule())\ndef ElementwiseLogModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseErfModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.ops.aten.erf(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseErfModule())\ndef ElementwiseErfModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\n\nclass ElementwiseSqrtModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.sqrt(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSqrtModule())\ndef ElementwiseSqrtModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseFloorModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.floor(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseFloorModule())\ndef ElementwiseFloorModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseCeilModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.ceil(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseCeilModule())\ndef ElementwiseCeilModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwisePowModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.pow(a, 2.0)\n\n\n@register_test_case(module_factory=lambda: ElementwisePowModule())\ndef ElementwisePowModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseToDtypeF32ToI64Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True)\n ])\n def forward(self, x):\n return x.to(torch.int64)\n\n\n@register_test_case(module_factory=lambda: ElementwiseToDtypeF32ToI64Module())\ndef ElementwiseToDtypeF32ToI64Module_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseToDtypeIdentityModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True)\n ])\n def forward(self, x):\n return x.to(torch.float32, False, False)\n\n\n@register_test_case(module_factory=lambda: ElementwiseToDtypeIdentityModule())\ndef ElementwiseToDtypeIdentityModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 5))\n\n# ==============================================================================\n\nclass ElementwiseLog2Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, a):\n return torch.log2(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseLog2Module())\ndef ElementwiseLog2Module_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseRsqrtModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.rsqrt(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseRsqrtModule())\ndef ElementwiseRsqrtModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseAbsModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.abs(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAbsModule())\ndef ElementwiseAbsModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4, 5, low=-1.0, high=1.0))\n\n# ==============================================================================\n\nclass ElementwiseReciprocalModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ])\n\n def forward(self, a):\n return torch.reciprocal(a)\n\n\n@register_test_case(module_factory=lambda: ElementwiseReciprocalModule())\ndef ElementwiseReciprocalModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4))\n\n# ==============================================================================\n\nclass ElementwiseDivScalarModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.div(x, 10.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseDivScalarModule())\ndef ElementwiseDivScalarModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseDivTensorFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1], torch.float32, True),\n ([-1], torch.float64, True),\n ])\n def forward(self, a, b):\n return torch.div(a, b)\n\n\n@register_test_case(module_factory=lambda: ElementwiseDivTensorFloatModule())\ndef ElementwiseDivTensorFloatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(4), tu.rand(4).type(torch.float64))\n\n# ==============================================================================\n\nclass ElementwiseAndIntegerModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x, y):\n return torch.bitwise_and(x, y)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAndIntegerModule())\ndef ElementwiseAndIntegerModule_basic(module, tu: TestUtils):\n module.forward(\n torch.randint(-10, 10, (3, 4)).to(torch.int32),\n torch.randint(-10, 10, (3, 4)))\n\n# ==============================================================================\n\nclass ElementwiseSubScalarIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, x):\n return torch.sub(x, 2.1, alpha=2)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSubScalarIntModule())\ndef ElementwiseSubScalarIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 4), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseSubScalarFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.sub(x, 2.1)\n\n\n@register_test_case(module_factory=lambda: ElementwiseSubScalarFloatModule())\ndef ElementwiseSubScalarFloatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseAddScalarInt64Module(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int64, True),\n ])\n def forward(self, x):\n return torch.add(x, 3.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAddScalarInt64Module())\ndef ElementwiseAddScalarInt64Module_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (3, 4)))\n\n# ==============================================================================\n\nclass ElementwiseAddScalarIntModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.int32, True),\n ])\n def forward(self, x):\n return torch.add(x, 3.0)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAddScalarIntModule())\ndef ElementwiseAddScalarIntModule_basic(module, tu: TestUtils):\n module.forward(torch.randint(10, (2, 3), dtype=torch.int32))\n\n# ==============================================================================\n\nclass ElementwiseAddScalarFloatModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.add(x, 3.0, alpha=2)\n\n\n@register_test_case(module_factory=lambda: ElementwiseAddScalarFloatModule())\ndef ElementwiseAddScalarFloatModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(3, 4))\n\n# ==============================================================================\n\nclass ElementwiseCloneModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.clone(x)\n\n\n@register_test_case(module_factory=lambda: ElementwiseCloneModule())\ndef ElementwiseCloneModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(2, 3, 4))\n\n# ==============================================================================\n\nclass ElementwiseCloneContiguousModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n @export\n @annotate_args([\n None,\n ([-1, -1, -1], torch.float32, True),\n ])\n def forward(self, x):\n return torch.clone(x, memory_format=torch.contiguous_format)\n\n\n@register_test_case(module_factory=lambda: ElementwiseCloneContiguousModule())\ndef ElementwiseCloneContiguousModule_basic(module, tu: TestUtils):\n module.forward(tu.rand(2, 3, 4))\n\n" ]
[ [ "torch.abs", "torch.randint", "torch.ops.aten.erf", "torch.ops.aten.maximum", "torch.tanh", "torch.rsqrt", "torch.where", "torch.pow", "torch.add", "torch.sqrt", "torch.clone", "torch.log2", "torch.relu", "torch.mul", "torch.reciprocal", "torch.rsub", "torch.ops.aten.leaky_relu", "torch.div", "torch.lerp", "torch.sigmoid", "torch.ops.aten.minimum", "torch.floor", "torch.unsqueeze", "torch.log", "torch.bitwise_and", "torch.nn.GELU", "torch.ceil", "torch.sub", "torch.clamp" ] ]
rbavery/sprnca_gedi
[ "2c89cb9b84d255bf03b50bf7bf29efa93b932604" ]
[ "pygedi/clean_shps.py" ]
[ "import geopandas as gpd\nimport pandas as pd\nfrom datetime import datetime\nfrom astral.sun import sun\nfrom astral import LocationInfo\nimport pytz\n\ndef convert_float_to_datetime(timestamp):\n delta_time_reference = \"Jan 1 00:00 2018\" # from the data dictionary\n\n utc_time = pd.to_datetime(timestamp, unit=\"s\", origin=pd.to_datetime(delta_time_reference))\n \n return (utc_time- pd.Timedelta(hours=7)).tz_localize(\"America/Phoenix\")\n\ndef preprocess_gedi_gdf(path):\n \"\"\"\n Takes a pathlib Path to a subsetted GEDI file that was processed by \n gedi_to_vector into shapefile format. Returns a geodataframe that has been \n quality filtered and with quality flags for sundown, beam type, and any other \n variables that were used with gedi_to_vector.py\n \"\"\"\n gdf = gpd.read_file(path)\n gdf['name'] = path.name\n power_test = lambda x: x in [\"BEAM0101\", \"BEAM0110\", \"BEAM1000\", \"BEAM1011\"]\n gdf[\"is_power_beam\"] = gdf['BEAM'].apply(power_test)\n gdf['delta_time'] = gdf['delta_time'].apply(convert_float_to_datetime)# UTC is 7 hours ahead of Arizona\n gdf = gdf.set_index(\"delta_time\")\n gdf = gdf.rename({\"longitude_\":\"longitude\", \"latitude_b\":\"latitude\"}, axis=1)\n gdf = gdf[(gdf[\"l2a_qualit\"]==1) & (gdf[\"l2b_qualit\"]==1)]\n # it's suggested in the GEDI L2B product doc to use nightime samples to reduce solar illumination bias. We add a flag here based\n # on local sunrise and sunset for the first sample in each track (the study area is small enough for this)\n city = LocationInfo(\"Phoenix\", \"Arizona\", timezone = pytz.timezone(\"America/Phoenix\"), latitude = gdf.latitude[0], longitude = gdf.longitude[0])\n s = sun(city.observer, date=datetime(gdf.index[0].year, gdf.index[0].month, gdf.index[0].day), tzinfo=pytz.timezone(\"America/Phoenix\"))\n gdf[\"is_sundown\"] = (gdf.index < s['sunrise']) & (gdf.index > s['sunset'])\n return gdf" ]
[ [ "pandas.to_datetime", "pandas.Timedelta" ] ]
Taarak9/Handwritten-DIgit-Recognition
[ "6ae4c5c64872a6ee1550b073d926fa1855d941ef" ]
[ "src/Handwritten_Digit_Recognition/hdr_fnn.py" ]
[ "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom mnist_loader import load_data_wrapper \nfrom customdl import FNN\n\n# MNIST data split\ntraining_data, validation_data, test_data = load_data_wrapper()\n\n# handwritten digit recognizer \n# Loss function: Cross Entropy\nhdr = FNN(784, \"ce\")\nhdr.add_layer(80, \"sigmoid\")\nhdr.add_layer(10, \"sigmoid\")\n\nhdr.compile(training_data, test_data)\n\ndef display_image(input_vector) :\n image = np.reshape(input_vector, (28, 28))\n\n plt.imshow(image, cmap='gray')\n plt.show()\n\n# test results for few samples\nfor i in np.arange(0, 19):\n display_image(test_data[i][0])\n print(\"HDR output: \", np.argmax(hdr.feedforward(test_data[i][0])))\n" ]
[ [ "numpy.reshape", "numpy.arange", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show" ] ]
julianblanco/MAVFleetControl
[ "70ca504288e7123a906150beb8f1fc7141a18fbd" ]
[ "precision_land.py" ]
[ "import asyncio\nimport time\nimport numpy as np\n\nfrom mavfleetcontrol.craft import Craft\nfrom mavfleetcontrol.actions.point import FlyToPoint\nfrom mavfleetcontrol.actions.percision_land import PercisionLand\nfrom mavfleetcontrol.actions.land import land\n\nif __name__ == \"__main__\":\n\n # loop = asyncio.get_event_loop()\n\n drone = Craft(\"drone1\", \"udp://:14540\")\n # loop.run_until_complete(drone.arm(coordinate=[0.0,0.0,0.0],attitude=[0.0,0.0,0.0]))\n drone.start()\n drone.add_action(FlyToPoint(np.array([1, 1, -20]), tolerance=0.5))\n drone.add_action( PercisionLand( 1.0, np.array([1, 1]) ) )\n drone.add_action(land)\n # drone.override_action(land)\n drone.close_conn() # will run after FLYTOPOINT IS DONE)\n drone.join()\n" ]
[ [ "numpy.array" ] ]
tyang816/CoClsGraph
[ "c31634d39685e6192a5018f9d1a113ad5fc80407" ]
[ "backend/flask-app/server.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom flask import Flask, request, jsonify\nfrom db import *\nimport json\nimport pandas as pd\nimport os\n\n# 跨域支持\ndef after_request(response):\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'PUT,GET,POST,DELETE'\n response.headers['Access-Control-Allow-Headers'] = 'Content-Type,Authorization'\n return response\n\napp = Flask(__name__)\napp.after_request(after_request)\n\n\[email protected](\"/login\", methods=['POST'])\ndef login():\n res = {'code':'500'}\n if request.method == 'POST':\n data = request.data.decode()\n name = data['name']\n password = data['password']\n user = User.query.filter_by(name=name, password=password).first()\n if user:\n res = {'msg':'true'}\n return res\n\[email protected](\"/token\", methods=['POST'])\ndef token():\n res = {\"code\":\"500\"}\n if request.method == 'POST':\n data = json.loads(request.data.decode())\n name = data['username']\n password = data['password']\n user = User.query.filter_by(name=name, password=password).first()\n\n if user:\n res = {\n \"code\":\"200\", \n \"data\":{\n \"token\": \"user.Auth\", \n \"userInfo\":{\"userId\": user.id, \"userName\": user.name, \"dashboard\": \"1\",\n \"userSex\": user.sex, \"userIntro\": user.intro, \"userLocate\": user.locate,\n \"userSchool\": user.school, \"userAvatar\": user.avatar}\n }\n }\n return jsonify(res)\n\n\n\n\[email protected](\"/base\", methods=['POST'])\ndef basemethod():\n res = {\"code\":\"500\"}\n if request.method == 'POST':\n data = json.loads(request.data.decode())\n base_id = data['base_id']\n base = Base.query.filter_by(id=base_id).first()\n if base:\n res = {\n \"code\": \"200\",\n \"data\": {\n \"method\": base.method, \"summary\": base.summary, \n \"method_token\": base.method_token, \"summary_token\": base.summary_token\n }\n }\n\n return jsonify(res)\n\n\n\[email protected](\"/class2base\", methods=['POST'])\ndef class2base():\n res = {\"code\":\"500\"}\n if request.method == 'POST':\n data = json.loads(request.data.decode())\n base_id = data['base_id']\n class2base = Class2Base.query.filter_by(base_id=base_id).all()\n if class2base[0]:\n res = {\n \"code\": \"200\",\n \"data\": [ \n {\"method\": class2base_i.method, \"method_token\": class2base_i.method_token} for class2base_i in class2base\n ]\n }\n\n return jsonify(res)\n\n\[email protected](\"/relateshow\", methods=[\"POST\"])\ndef relateshow():\n res = {\"code\":\"500\"}\n if request.method == 'POST':\n data = json.loads(request.data.decode())\n nodes = []\n links = []\n categories = [{\"name\": \"repository\"}, {\"name\": \"sourcedir\"}, {\"name\": \"package\"}, {\"name\": \"java\"},\n {\"name\": \"class\"}, {\"name\": \"import\"}, {\"name\": \"inherit\"}, {\"name\": \"method\"}]\n rep_id = data['rep_id']\n \n rep = Repository.query.filter_by(id=rep_id).first()\n nodes.append({\"id\": 0, \"name\":rep.name, \"value\": 5, \"category\": 1})\n srcdir = SourceDir.query.filter_by(repository_id=rep_id).limit(5).all()\n srcdir_idx = 1\n pkgs = []\n for src in srcdir:\n nodes.append({\"id\": srcdir_idx, \"name\": src.path, \"value\": 1, \"category\": 2})\n links.append({\"source\": 0,\"target\": srcdir_idx})\n srcdir_idx = srcdir_idx + 1\n pkg = Package.query.filter_by(source_dir_id=src.id).limit(3).all()\n for p in pkg:\n pkgs.append(p)\n \n pkg_idx = srcdir_idx\n javas = []\n for i in range(len(pkgs)):\n nodes.append({\"id\": pkg_idx, \"name\": pkgs[i].name, \"value\": 1, \"category\": 3})\n links.append({\"source\": 2+int(i/3),\"target\": pkg_idx})\n pkg_idx = pkg_idx + 1\n java = Java.query.filter_by(package_id=pkgs[i].id).limit(2).all()\n for j in java:\n javas.append(j)\n \n java_idx = pkg_idx\n clazzs = []\n for i in range(len(javas)):\n nodes.append({\"id\": java_idx, \"name\": javas[i].name, \"value\": 1, \"category\": 4})\n links.append({\"source\": srcdir_idx+int(i/2),\"target\": java_idx})\n java_idx = java_idx + 1\n clazz = Clazz.query.filter_by(java_id=javas[i].id).limit(1).all()\n for c in clazz:\n clazzs.append(c)\n \n clazz_idx = java_idx\n imports = []\n inherits = []\n methods = []\n for i in range(len(clazzs)):\n nodes.append({\"id\": clazz_idx, \"name\": clazzs[i].name, \"value\": 1, \"category\": 5})\n links.append({\"source\": pkg_idx + i,\"target\": clazz_idx})\n clazz_idx = clazz_idx + 1\n import_ = Import.query.filter_by(import_clazz_id=clazzs[i].id).limit(2).all()\n for im in import_:\n imports.append(im)\n inherit = Inherit.query.filter_by(super_clazz_id=clazzs[i].id).limit(2).all()\n for inh in inherit:\n inherits.append(inh)\n method = Method.query.filter_by(clazz_id=clazzs[i].id).limit(2).all()\n for m in method:\n methods.append(m)\n \n import_idx = clazz_idx\n for i in range(len(imports)):\n nodes.append({\"id\": import_idx, \"name\": imports[i].import_clazz_id, \"value\": 1, \"category\": 7})\n links.append({\"source\": java_idx + int(i/2),\"target\": import_idx})\n import_idx = import_idx + 1\n \n inherit_idx = import_idx\n for i in range(len(inherits)):\n nodes.append({\"id\": inherit_idx, \"name\": inherits[i].super_clazz_id, \"value\": 1, \"category\": 8})\n links.append({\"source\": java_idx + int(i/2),\"target\": inherit_idx})\n inherit_idx = inherit_idx + 1\n \n method_idx = inherit_idx\n for i in range(len(methods)):\n nodes.append({\"id\": method_idx, \"name\": methods[i].signature, \"value\": 1, \"category\": 9})\n links.append({\"source\": java_idx + int(i/2),\"target\": method_idx})\n method_idx = method_idx + 1\n \n res = {\"code\": \"200\", \"type\": \"force\", \"categories\": categories,\n \"nodes\": nodes, \"links\": links}\n \n return jsonify(res)\n\[email protected](\"/jielong\", methods=[\"POST\"])\ndef jielong():\n res = {\"code\":\"500\"}\n if request.method == 'POST':\n data = json.loads(request.data.decode())\n lines = data['content'].split('\\n')\n clazz = data['clazz']\n \n names = []\n for line in lines:\n name = line.split()[1]\n if name not in names:\n names.append(name)\n\n df = pd.read_excel('./data/信息学院18级在校学生名单.xlsx'+total_xlsx[0])\n names_all = df[df['班级'].isin([clazz])]['姓名'].tolist()\n not_jielong = set(names_all) - set(names)\n \n ans1 = \"\"\n ans2 = \"\"\n for name in list(not_jielong):\n tel = df[df['姓名'].isin([name])]['电话'].tolist()[0]\n ans1 = ans1 + \"@\" + name + \" \"\n ans2 = ans2 + str(list(not_jielong).index(name)+1) + '.' + name + \"未回复+%s\"%tel + '\\n'\n\n res = {\n \"code\": \"200\", \"ans1\": ans1, \"ans2\": ans2\n }\n return jsonify(res)\n\[email protected](\"/jielong5\", methods=[\"POST\"])\ndef jielong5():\n res = {\"code\":\"500\"}\n if request.method == 'POST':\n data = json.loads(request.data.decode())\n lines = data['content'].split('\\n')\n rooms = []\n for line in lines:\n room = line.split()[1][:3]\n if room not in rooms:\n rooms.append(room)\n df = pd.read_excel('./data/23舍5层核酸分组.xlsx')\n rooms_list = df['宿舍'].tolist()\n rooms_all = []\n for room in rooms_list:\n rooms_all.append(room[7:])\n not_jielong = set(rooms_all) - set(rooms)\n \n ans1 = \"\"\n for room in list(not_jielong):\n ans1 = ans1 + room + \" \"\n\n res = {\n \"code\": \"200\", \"ans1\": ans1\n }\n return jsonify(res)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n" ]
[ [ "pandas.read_excel" ] ]
HyeongJu916/video_SuperResolution_ESRGAN
[ "c1a01c99287a6212a3dc76ac17baafcf1c9f3013" ]
[ "esrgan_pytorch/calculate_niqe.py" ]
[ "# Copyright 2020 Dakewe Biotech Corporation. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport math\nimport os\n\nimport numpy as np\nimport scipy\nimport scipy.io\nimport scipy.ndimage\nimport scipy.special\nfrom PIL import Image\n\n__all__ = [\n \"aggd_features\", \"cal_niqe\", \"compute_image_mscn_transform\", \"extract_on_patches\", \"get_patches_test_features\",\n \"gen_gauss_window\", \"ggd_features\", \"paired_product\"\n]\n\n# Define some parameters\ngamma_range = np.arange(0.2, 10, 0.001)\na = scipy.special.gamma(2.0 / gamma_range)\na *= a\nb = scipy.special.gamma(1.0 / gamma_range)\nc = scipy.special.gamma(3.0 / gamma_range)\nprec_gammas = a / (b * c)\n\n\ndef aggd_features(imdata):\n # flatten imdata\n imdata.shape = (len(imdata.flat),)\n imdata2 = imdata * imdata\n left_data = imdata2[imdata < 0]\n right_data = imdata2[imdata >= 0]\n left_mean_sqrt = 0\n right_mean_sqrt = 0\n if len(left_data) > 0:\n left_mean_sqrt = np.sqrt(np.average(left_data))\n if len(right_data) > 0:\n right_mean_sqrt = np.sqrt(np.average(right_data))\n\n if right_mean_sqrt != 0:\n gamma_hat = left_mean_sqrt / right_mean_sqrt\n else:\n gamma_hat = np.inf\n # solve r-hat norm\n\n imdata2_mean = np.mean(imdata2)\n if imdata2_mean != 0:\n r_hat = (np.average(np.abs(imdata)) ** 2) / (np.average(imdata2))\n else:\n r_hat = np.inf\n rhat_norm = r_hat * (((math.pow(gamma_hat, 3) + 1) * (gamma_hat + 1)) / math.pow(math.pow(gamma_hat, 2) + 1, 2))\n\n # solve alpha by guessing values that minimize ro\n pos = np.argmin((prec_gammas - rhat_norm) ** 2)\n alpha = gamma_range[pos]\n\n gam1 = scipy.special.gamma(1.0 / alpha)\n gam2 = scipy.special.gamma(2.0 / alpha)\n gam3 = scipy.special.gamma(3.0 / alpha)\n\n aggdratio = np.sqrt(gam1) / np.sqrt(gam3)\n bl = aggdratio * left_mean_sqrt\n br = aggdratio * right_mean_sqrt\n\n # mean parameter\n N = (br - bl) * (gam2 / gam1) # *aggdratio\n return alpha, N, bl, br, left_mean_sqrt, right_mean_sqrt\n\n\ndef cal_niqe(filename):\n r\"\"\"Assessment of natural image quality\n\n Args:\n filename (str): Image path to be evaluated.\n\n Returns:\n Niqe score.\n\n \"\"\"\n patch_size = 32\n params = scipy.io.loadmat(os.path.join(\"data\", \"niqe_image_params.mat\"))\n pop_mu = np.ravel(params[\"pop_mu\"])\n pop_cov = params[\"pop_cov\"]\n\n img = np.array(Image.open(filename).convert(\"LA\"))[:, :, 0]\n\n M, N = img.shape\n\n # assert C == 1, \"niqe called with videos containing %d channels.\n # Please supply only the luminance channel\" % (C,)\n assert M > (patch_size * 2 + 1), \"niqe called with small frame size, requires > 192x192 resolution video \" \\\n \"using current training parameters\"\n assert N > (patch_size * 2 + 1), \"niqe called with small frame size, requires > 192x192 resolution video \" \\\n \"using current training parameters\"\n\n feats = get_patches_test_features(img, patch_size)\n sample_mu = np.mean(feats, axis=0)\n sample_cov = np.cov(feats.T)\n\n X = sample_mu - pop_mu\n covmat = ((pop_cov + sample_cov) / 2.0)\n pinvmat = scipy.linalg.pinv(covmat)\n niqe_score = np.sqrt(np.dot(np.dot(X, pinvmat), X))\n\n return niqe_score\n\n\ndef compute_image_mscn_transform(image, C=1, avg_window=None, extend_mode=\"constant\"):\n if avg_window is None:\n avg_window = gen_gauss_window(3, 7.0 / 6.0)\n assert len(np.shape(image)) == 2\n h, w = np.shape(image)\n mu_image = np.zeros((h, w), dtype=np.float32)\n var_image = np.zeros((h, w), dtype=np.float32)\n image = np.array(image).astype(\"float32\")\n scipy.ndimage.correlate1d(image, avg_window, 0, mu_image, mode=extend_mode)\n scipy.ndimage.correlate1d(mu_image, avg_window, 1, mu_image, mode=extend_mode)\n scipy.ndimage.correlate1d(image ** 2, avg_window, 0, var_image, mode=extend_mode)\n scipy.ndimage.correlate1d(var_image, avg_window, 1, var_image, mode=extend_mode)\n var_image = np.sqrt(np.abs(var_image - mu_image ** 2))\n return (image - mu_image) / (var_image + C), var_image, mu_image\n\n\ndef extract_on_patches(img, patch_size):\n r\"\"\"The feature parameters are extracted from the trained general features\n\n Args:\n img (np.array): Image data read by opencv\n patch_size (int): Number of image patch\n\n Returns:\n Feature parameters.\n\n \"\"\"\n h, w = img.shape\n patch_size = np.int(patch_size)\n patches = []\n for j in range(0, h - patch_size + 1, patch_size):\n for i in range(0, w - patch_size + 1, patch_size):\n patch = img[j:j + patch_size, i:i + patch_size]\n patches.append(patch)\n\n patches = np.array(patches)\n\n patch_features = []\n for p in patches:\n alpha_m, N, bl, br, lsq, rsq = aggd_features(p.copy())\n pps1, pps2, pps3, pps4 = paired_product(p)\n alpha1, N1, bl1, br1, lsq1, rsq1 = aggd_features(pps1)\n alpha2, N2, bl2, br2, lsq2, rsq2 = aggd_features(pps2)\n alpha3, N3, bl3, br3, lsq3, rsq3 = aggd_features(pps3)\n alpha4, N4, bl4, br4, lsq4, rsq4 = aggd_features(pps4)\n extract_subband_features = np.array([alpha_m, (bl + br) / 2.0,\n alpha1, N1, bl1, br1, # (V)\n alpha2, N2, bl2, br2, # (H)\n alpha3, N3, bl3, bl3, # (D1)\n alpha4, N4, bl4, bl4, # (D2)\n ])\n patch_features.append(extract_subband_features)\n patch_features = np.array(patch_features)\n\n return patch_features\n\n\ndef get_patches_test_features(img, patch_size):\n r\"\"\"Get the general features in each block of the test image\n\n Args:\n img (np.array): Image data read by opencv\n patch_size (int): Number of image patch\n\n Returns:\n General test features\n\n \"\"\"\n h, w = np.shape(img)\n if h < patch_size or w < patch_size:\n print(\"Input image is too small\")\n exit(0)\n\n # ensure that the patch divides evenly into img\n hoffset = (h % patch_size)\n woffset = (w % patch_size)\n\n if hoffset > 0:\n img = img[:-hoffset, :]\n if woffset > 0:\n img = img[:, :-woffset]\n\n img = img.astype(np.float32)\n img2 = Image.fromarray(img).resize(size=(h // 2, w // 2), resample=Image.BICUBIC)\n\n mscn1, var, mu = compute_image_mscn_transform(img)\n mscn1 = mscn1.astype(np.float32)\n\n mscn2, _, _ = compute_image_mscn_transform(img2)\n mscn2 = mscn2.astype(np.float32)\n\n feats_lvl1 = extract_on_patches(mscn1, patch_size)\n feats_lvl2 = extract_on_patches(mscn2, patch_size // 2)\n\n features = np.hstack((feats_lvl1, feats_lvl2)) # feats_lvl3))\n return features\n\n\ndef gen_gauss_window(lw, sigma):\n sd = np.float32(sigma)\n lw = int(lw)\n weights = [0.0] * (2 * lw + 1)\n weights[lw] = 1.0\n sum = 1.0\n sd *= sd\n for ii in range(1, lw + 1):\n tmp = np.exp(-0.5 * np.float32(ii * ii) / sd)\n weights[lw + ii] = tmp\n weights[lw - ii] = tmp\n sum += 2.0 * tmp\n for ii in range(2 * lw + 1):\n weights[ii] /= sum\n return weights\n\n\ndef ggd_features(imdata):\n nr_gam = 1 / prec_gammas\n sigma_sq = np.var(imdata)\n E = np.mean(np.abs(imdata))\n rho = sigma_sq / E ** 2\n pos = np.argmin(np.abs(nr_gam - rho))\n return gamma_range[pos], sigma_sq\n\n\ndef paired_product(new_im):\n shift1 = np.roll(new_im.copy(), 1, axis=1)\n shift2 = np.roll(new_im.copy(), 1, axis=0)\n shift3 = np.roll(np.roll(new_im.copy(), 1, axis=0), 1, axis=1)\n shift4 = np.roll(np.roll(new_im.copy(), 1, axis=0), -1, axis=1)\n\n H_img = shift1 * new_im\n V_img = shift2 * new_im\n D1_img = shift3 * new_im\n D2_img = shift4 * new_im\n\n return H_img, V_img, D1_img, D2_img\n" ]
[ [ "numpy.dot", "scipy.linalg.pinv", "numpy.sqrt", "numpy.int", "numpy.mean", "numpy.argmin", "numpy.var", "numpy.hstack", "numpy.arange", "numpy.float32", "numpy.ravel", "numpy.zeros", "scipy.special.gamma", "numpy.cov", "scipy.ndimage.correlate1d", "numpy.array", "numpy.abs", "numpy.shape", "numpy.average" ] ]
deeplearningforfun/torch-tools
[ "e3dc040dcfe33aec247a3139e72426bca73cda96" ]
[ "torchtoolbox/data/sampler.py" ]
[ "__all__ = ['RepeatedAugmentSampler']\n\nimport math\n\nimport torch\nfrom torch import distributed as dist\nfrom torch.utils.data import Sampler\n\n\nclass RepeatedAugmentSampler(Sampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset for distributed,\n with repeated augmentation.\n It ensures that different each augmented version of a sample will be visible to a\n different process (GPU)\n Heavily based on torch.utils.data.DistributedSampler\n \"\"\"\n def __init__(self, dataset, m=3, num_replicas=None, rank=None, shuffle=True):\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.m = m\n self.num_samples = int(math.ceil(len(self.dataset) * m / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n # self.num_selected_samples = int(math.ceil(len(self.dataset) / self.num_replicas))\n self.num_selected_samples = int(math.floor(len(self.dataset) // 256 * 256 / self.num_replicas))\n self.shuffle = shuffle\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n if self.shuffle:\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = list(range(len(self.dataset)))\n\n # add extra samples to make it evenly divisible\n indices = [ele for ele in indices for i in range(self.m)]\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices[:self.num_selected_samples])\n\n def __len__(self):\n return self.num_selected_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n" ]
[ [ "torch.Generator", "torch.distributed.get_world_size", "torch.distributed.is_available", "torch.distributed.get_rank" ] ]
GeniusHTX/EIDIG-2021-09-30-server
[ "af3ca2c8edd1ba052b8b7c9a0ecf6494dc9b36b6" ]
[ "_htx/retraining_htx.py" ]
[ "\"\"\"\nThis python file retrains the original models with augmented training set.\n\"\"\"\n\nimport os\nimport pickle\nimport sys\n\nimport joblib\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow import keras\n\nfrom _htx.EIDIG_htx import DisInstanceResult\n\nsys.path.append(\"..\")\nsys.path.extend([os.path.join(root, name) for root, dirs, _ in os.walk(\"../\") for name in dirs])\nfrom preprocessing import pre_census_income\nfrom preprocessing import pre_german_credit\nfrom preprocessing import pre_bank_marketing\nfrom training import train_census_income\nfrom training import train_german_credit\nfrom training import train_bank_marketing\n\nfrom _htx import pre_census_income_htx\n\n\ndef remove_informal_dis_ins(dataset_name, num_attribs, benchmark, round_now):\n ensemble_clf = joblib.load('../models/ensemble_models/' + dataset_name + '_ensemble.pkl')\n protected_attribs = pre_census_income_htx.protected_attribs\n\n ids_C_a_EIDIG_5_pkl = '../logging_data_htx/generated_dis_instances_back_up/C-a_ids_EIDIG_5_1.pkl'\n ids_C_g_EIDIG_5_pkl = '../logging_data_htx/generated_dis_instances_back_up/C-g_ids_EIDIG_5_1.pkl'\n ids_C_r_EIDIG_5_pkl = '../logging_data_htx/generated_dis_instances_back_up/C-r_ids_EIDIG_5_1.pkl'\n\n ids_benchmark_EIDIG_5 = ''\n if benchmark == 'C-a':\n ids_benchmark_EIDIG_5 = ids_C_a_EIDIG_5_pkl\n if benchmark == 'C-g':\n ids_benchmark_EIDIG_5 = ids_C_g_EIDIG_5_pkl\n if benchmark == 'C-r':\n ids_benchmark_EIDIG_5 = ids_C_r_EIDIG_5_pkl\n\n file = open(ids_benchmark_EIDIG_5, 'rb')\n R = pickle.load(file)\n X = pre_census_income_htx.get_all_data()\n N_R = []\n for r in R:\n seed = r.seed[0]\n # produce seed's label\n for x in X:\n x_without_label = x[:-1]\n if (x_without_label == seed).all():\n seed_label = x[-1]\n print(\"seed label: \" + str(seed_label))\n\n flag_1 = False # 用于记录 global 是否与 seed 相同\n flag_2 = False # 用于记录 global or local discriminatory instances 的 labels 是否与 seed 一致\n # produce the global discriminatory instances' label\n g_dis_ins = r.g_dis_ins\n num_g_dis_ins = len(g_dis_ins)\n if num_g_dis_ins > 0:\n if (g_dis_ins[0] == seed).all():\n # 排除 global = seed\n flag_1 = True\n else:\n g_dis_ins_label_vote = ensemble_clf.predict(np.delete(g_dis_ins, protected_attribs, axis=1))\n if g_dis_ins_label_vote[0] != seed_label:\n flag_2 = True\n if flag_1 or flag_2:\n continue\n\n new_l_dis_ins = np.empty(shape=(0, num_attribs)) # 用于存储与 seed label 一致的 local discriminatory instances\n # produce the local discriminatory instances' label\n l_dis_ins = r.l_dis_ins\n num_l_dis_ins = len(l_dis_ins)\n if num_l_dis_ins > 0:\n l_dis_ins_label_vote = ensemble_clf.predict(np.delete(l_dis_ins, protected_attribs, axis=1))\n for i in range(num_l_dis_ins):\n if l_dis_ins_label_vote[i] != seed_label:\n continue\n new_l_dis_ins = np.append(new_l_dis_ins, [l_dis_ins[i]], axis=0)\n\n new_r = DisInstanceResult(num_attribs, r.seed.copy(), r.g_dis_ins.copy())\n new_r.set_l_dis_ins(new_l_dis_ins)\n N_R.append(new_r)\n # end for R\n\n file.close()\n\n result_directory = '../logging_data_htx/generated_dis_instances_back_up/'\n if not os.path.exists(result_directory):\n os.makedirs(result_directory)\n filename = result_directory + benchmark + '_ids_EIDIG_5_' + str(round_now) + '_formal.pkl'\n filehandler = open(filename, 'wb')\n pickle.dump(N_R, filehandler)\n filehandler.close()\n\n\ndef patch_remove_informal_dis_ins(dataset_name):\n for benchmark, protected_attribs in [('C-a', [0]), ('C-r', [6]), ('C-g', [7])]:\n print('\\n', benchmark, ':\\n')\n ROUND = 1\n num_attribs = 12\n remove_informal_dis_ins(dataset_name, num_attribs, benchmark, ROUND)\n\n\ndef retraining_without_majority_voting(dataset_name, approach_name, percent):\n print(\"我已经进入 retraining_without_majority_voting 函数\")\n if dataset_name == 'adult':\n X_train = pre_census_income.X_train_all\n y_train = pre_census_income.y_train_all\n X_test = pre_census_income.X_test\n y_test = pre_census_income.y_test\n model = train_census_income.model\n elif dataset_name == 'german':\n X_train = pre_german_credit.X_train\n y_train = pre_german_credit.y_train\n X_test = pre_german_credit.X_test\n y_test = pre_german_credit.y_test\n model = train_german_credit.model\n elif dataset_name == 'bank':\n X_train = pre_bank_marketing.X_train_all\n y_train = pre_bank_marketing.y_train_all\n X_test = pre_bank_marketing.X_test\n y_test = pre_bank_marketing.y_test\n model = train_bank_marketing.model\n\n # census income\n dataset_name = 'adult'\n num_attribs = 0\n if dataset_name == 'adult':\n num_attribs = 12\n if dataset_name == 'german':\n num_attribs = 25\n if dataset_name == 'bank':\n num_attribs = 16\n\n X = pre_census_income_htx.get_all_data()\n\n all_dis_ins = np.empty(shape=(0, num_attribs)) # 存 all instances\n all_dis_ins_label = np.empty(shape=(0,)) # 存 all instances' labels\n print(all_dis_ins_label.shape)\n\n for benchmark, protected_attribs in [('C-a', [0]), ('C-r', [6]), ('C-g', [7])]:\n print('\\n', benchmark, ':\\n')\n ids_benchmark_EIDIG_5_pkl = '../logging_data_htx/generated_dis_instances_back_up/' + \\\n benchmark + \"_ids_EIDIG_5_1.pkl\"\n file = open(ids_benchmark_EIDIG_5_pkl, 'rb')\n R = pickle.load(file)\n for r in R:\n # step1: 找 seed 的 label\n seed = r.seed[0]\n seed_label = 0\n for x in X:\n x_without_label = x[:-1]\n if (x_without_label == seed).all():\n seed_label = x[-1]\n\n # step2: set global and local discriminatory instances's label to that same as seed_label\n dis_ins_label = seed_label\n\n if len(r.g_dis_ins) > 0:\n if (r.g_dis_ins[0] == seed).all():\n # 排除 global = seed\n continue\n\n all_dis_ins = np.append(all_dis_ins, r.g_dis_ins, axis=0)\n for i in range(len(r.g_dis_ins)):\n all_dis_ins_label = np.append(all_dis_ins_label, [dis_ins_label], axis=0)\n\n if len(r.l_dis_ins) > 0:\n all_dis_ins = np.append(all_dis_ins, r.l_dis_ins, axis=0)\n for j in range(len(r.l_dis_ins)):\n all_dis_ins_label = np.append(all_dis_ins_label, [dis_ins_label], axis=0)\n file.close()\n # end for\n num_of_ids = len(all_dis_ins_label)\n print(\"the number of all formal discriminatory instances: \" + str(num_of_ids))\n num_percent = num_of_ids * percent\n print(\"the number of discriminatory instances used to retraining:\" + str(num_percent))\n num_aug = int(num_percent)\n\n ids_aug = np.empty(shape=(0, len(X_train[0])))\n ids_aug_label = np.empty(shape=(0,))\n for _ in range(num_aug):\n rand_index = np.random.randint(len(all_dis_ins))\n ids_aug = np.append(ids_aug, [all_dis_ins[rand_index]], axis=0)\n ids_aug_label = np.append(ids_aug_label, [all_dis_ins_label[rand_index]], axis=0)\n\n X_train = np.append(X_train, ids_aug, axis=0)\n y_train = np.append(y_train, ids_aug_label, axis=0)\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)\n history = model.fit(X_train, y_train, epochs=100, validation_data=(X_val, y_val),\n callbacks=[keras.callbacks.EarlyStopping(patience=10)])\n model.evaluate(X_test, y_test)\n\n model_name = dataset_name + '_' + approach_name + '_retrained_model_no_majority_vote_' + str(percent) + '.h5'\n model_path = '../models_htx/retrained_models/' + model_name\n model.save(model_path)\n\n\ndef retraining(dataset_name, approach_name, ids):\n # randomly sample 5% of individual discriminatory instances generated for data augmentation\n # then retrain the original models\n\n ensemble_clf = joblib.load('../models/ensemble_models/' + dataset_name + '_ensemble.pkl')\n if dataset_name == 'adult':\n protected_attribs = pre_census_income.protected_attribs\n X_train = pre_census_income.X_train_all\n y_train = pre_census_income.y_train_all\n X_test = pre_census_income.X_test\n y_test = pre_census_income.y_test\n model = train_census_income.model\n elif dataset_name == 'german':\n protected_attribs = pre_german_credit.protected_attribs\n X_train = pre_german_credit.X_train\n y_train = pre_german_credit.y_train\n X_test = pre_german_credit.X_test\n y_test = pre_german_credit.y_test\n model = train_german_credit.model\n elif dataset_name == 'bank':\n protected_attribs = pre_bank_marketing.protected_attribs\n X_train = pre_bank_marketing.X_train_all\n y_train = pre_bank_marketing.y_train_all\n X_test = pre_bank_marketing.X_test\n y_test = pre_bank_marketing.y_test\n model = train_bank_marketing.model\n ids_aug = np.empty(shape=(0, len(X_train[0])))\n num_of_ids = len(ids)\n print(\"the number of all discriminatory instances: \" + str(num_of_ids))\n num_percent_5 = num_of_ids * 0.05\n num_aug = int(num_percent_5)\n print(\"the number of augment samples from discriminatory instances (5%): \" + str(num_aug))\n for _ in range(num_aug):\n rand_index = np.random.randint(len(ids))\n ids_aug = np.append(ids_aug, [ids[rand_index]], axis=0)\n label_vote = ensemble_clf.predict(np.delete(ids_aug, protected_attribs, axis=1))\n X_train = np.append(X_train, ids_aug, axis=0)\n y_train = np.append(y_train, label_vote, axis=0)\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)\n history = model.fit(X_train, y_train, epochs=100, validation_data=(X_val, y_val),\n callbacks=[keras.callbacks.EarlyStopping(patience=10)])\n model.evaluate(X_test, y_test)\n model.save('../models_htx/retrained_models/' + dataset_name + '_' + approach_name + '_retrained_model.h5')\n\n\ndef retraining_modify(dataset_name, approach_name, ids):\n # randomly sample 5% of individual discriminatory instances generated for data augmentation\n # then retrain the original models\n\n ensemble_clf = joblib.load('../models/ensemble_models/' + dataset_name + '_ensemble.pkl')\n if dataset_name == 'adult':\n protected_attribs = pre_census_income.protected_attribs\n X_train = pre_census_income.X_train_all\n y_train = pre_census_income.y_train_all\n X_test = pre_census_income.X_test\n y_test = pre_census_income.y_test\n model = train_census_income.model\n elif dataset_name == 'german':\n protected_attribs = pre_german_credit.protected_attribs\n X_train = pre_german_credit.X_train\n y_train = pre_german_credit.y_train\n X_test = pre_german_credit.X_test\n y_test = pre_german_credit.y_test\n model = train_german_credit.model\n elif dataset_name == 'bank':\n protected_attribs = pre_bank_marketing.protected_attribs\n X_train = pre_bank_marketing.X_train_all\n y_train = pre_bank_marketing.y_train_all\n X_test = pre_bank_marketing.X_test\n y_test = pre_bank_marketing.y_test\n model = train_bank_marketing.model\n\n ids_aug = np.empty(shape=(0, len(X_train[0])))\n\n '''\n 修改前\n num_aug = int(len(ids) * 0.05) # 注意这里的 5% 不是三个属性歧视实例各取 %5,而是先把三个属性对应的歧视实例何在一起,然后 random 取 5%\n '''\n\n # 修改后\n num_of_ids = len(ids)\n print(\"the number of all formal discriminatory instances: \" + str(num_of_ids))\n num_percent_5 = num_of_ids * 0.05\n print(\"the number of 5% formal discriminatory instances:\" + str(num_percent_5))\n num_aug = max(num_percent_5, 18102)\n print(\"the number of augment samples from discriminatory instances (5%): \" + str(num_aug)) # 18102\n\n for _ in range(num_aug):\n rand_index = np.random.randint(len(ids))\n ids_aug = np.append(ids_aug, [ids[rand_index]], axis=0)\n label_vote = ensemble_clf.predict(np.delete(ids_aug, protected_attribs, axis=1)) # train 的时候就删掉了\n\n X_train = np.append(X_train, ids_aug, axis=0)\n y_train = np.append(y_train, label_vote, axis=0)\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)\n history = model.fit(X_train, y_train, epochs=100,\n validation_data=(X_val, y_val),\n callbacks=[keras.callbacks.EarlyStopping(patience=10)])\n model.evaluate(X_test, y_test)\n model.save('../models_htx/retrained_models/' + dataset_name + '_' +\n approach_name + '_retrained_model.h5')\n\n\ndef retraining_with_formal_dis_ins():\n # census income\n dataset_name = 'adult'\n num_attribs = 0\n if dataset_name == 'adult':\n num_attribs = 12\n if dataset_name == 'german':\n num_attribs = 25\n if dataset_name == 'bank':\n num_attribs = 16\n\n approach_name = 'EIDIG_5'\n\n all_dis_ins = np.empty(shape=(0, num_attribs))\n ids_benchmark_EIDIG_5_formal_pkl = ''\n for benchmark, protected_attribs in [('C-a', [0]), ('C-r', [6]), ('C-g', [7])]:\n print('\\n', benchmark, ':\\n')\n ids_benchmark_EIDIG_5_formal_pkl = '../logging_data_htx/generated_dis_instances_back_up/' + \\\n benchmark + \"_ids_EIDIG_5_1_formal.pkl\"\n file = open(ids_benchmark_EIDIG_5_formal_pkl, 'rb')\n R = pickle.load(file)\n for r in R:\n if len(r.g_dis_ins) > 0:\n all_dis_ins = np.append(all_dis_ins, r.g_dis_ins, axis=0)\n if len(r.l_dis_ins) > 0:\n all_dis_ins = np.append(all_dis_ins, r.l_dis_ins, axis=0)\n file.close()\n # retrain the original models\n print(\"进入 retraining 函数,正在准备重新训练模型\")\n retraining(dataset_name, approach_name, all_dis_ins)\n\n\ndef main(argv=None):\n dataset_name = 'adult'\n approach_name = 'EIDIG_5'\n patch_remove_informal_dis_ins(dataset_name)\n retraining_with_formal_dis_ins() # 有点疑问的是这个retraining_with_formal和下面的区别?\n percent = 0.02 # 0.02 == 2%, 0.05 == 5%, 0.1 == 10%\n # retraining_without_majority_voting(dataset_name, approach_name, percent)\n\n # reproduction\n # ids_C_a_EIDIG_5 = np.load('../logging_data/generated_dis_instances_back_up/C-a_ids_EIDIG_5.npy')\n # ids_C_r_EIDIG_5 = np.load('../logging_data/generated_dis_instances_back_up/C-r_ids_EIDIG_5.npy')\n # ids_C_g_EIDIG_5 = np.load('../logging_data/generated_dis_instances_back_up/C-g_ids_EIDIG_5.npy')\n # C_ids_EIDIG_5 = np.concatenate((ids_C_a_EIDIG_5, ids_C_r_EIDIG_5, ids_C_g_EIDIG_5),axis=0)\n # retraining(dataset_name, approach_name, C_ids_EIDIG_5)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.model_selection.train_test_split", "numpy.append", "numpy.delete", "tensorflow.keras.callbacks.EarlyStopping", "numpy.empty" ] ]
countywest/SAUM
[ "9dc2215db95039ea43452a7cc626e58755363219" ]
[ "test_self_consistency.py" ]
[ "import csv\nimport importlib\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport time\nimport datetime\nimport sys\nimport h5py\nfrom utils.io_util import read_pcd, save_pcd\nfrom utils.tf_util import chamfer, earth_mover, dist_to_nearest\nfrom utils.visu_util import plot_pcd_three_views\nfrom utils.args import testSelfConsistencyArguments\nfrom termcolor import colored\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, 'models'))\nsys.path.append(os.path.join(BASE_DIR, 'utils'))\n\ndef test_vanilla(config):\n test_config = config['test_setting']\n data_config = config['dataset']\n \n # Data\n inputs = tf.placeholder(tf.float32, (1, None, 3))\n npts = tf.placeholder(tf.int32, (1,))\n gt = tf.placeholder(tf.float32, (1, data_config['num_gt_points'], 3))\n output = tf.placeholder(tf.float32, (1, data_config['num_gt_points'], 3))\n\n # Model\n model_module = importlib.import_module(config['model']['decoder']['type'])\n model = model_module.model(config, inputs, npts, gt, False)\n\n # Loss\n cd_op = chamfer(output, gt)\n emd_op = earth_mover(output, gt)\n nearest_dist_op = dist_to_nearest(output, gt)\n\n # make results directory\n if os.path.exists(config['results_dir']):\n delete_key = input(colored('%s exists. Delete? [y (or enter)/N]'\n % config['results_dir'], 'white', 'on_red'))\n if delete_key == 'y' or delete_key == \"\":\n os.system('rm -rf %s/*' % config['results_dir'])\n else:\n os.makedirs(os.path.join(config['results_dir']))\n\n os.system('cp test_self_consistency.py %s' % config['results_dir'])\n\n # TF Config\n config_proto = tf.ConfigProto()\n config_proto.gpu_options.allow_growth = True\n config_proto.allow_soft_placement = True\n sess = tf.Session(config=config_proto)\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(config['checkpoint']))\n\n # Test\n test_start = time.time()\n print(colored(\"Testing...\", 'grey', 'on_green'))\n\n with open(config['list_path']) as file:\n model_list = file.read().splitlines()\n\n total_time = 0\n total_cd = 0\n total_emd = 0\n total_f1_score = 0\n cd_per_cat = {}\n emd_per_cat = {}\n f1_score_per_cat = {}\n os.makedirs(config['results_dir'], exist_ok=True)\n csv_file = open(os.path.join(config['results_dir'], 'results.csv'), 'w')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"')\n writer.writerow(['id', 'cd', 'emd', 'f1_score'])\n\n for i, model_id in enumerate(model_list):\n start = time.time()\n\n # data\n if data_config['type'] == 'pcn' or data_config['type'] == 'car':\n gt_complete = read_pcd(os.path.join(data_config['dir'], 'complete', '%s.pcd' % model_id))\n gt_complete_npts = gt_complete.shape[0]\n elif data_config['type'] == 'topnet':\n with h5py.File(os.path.join(data_config['dir'], 'gt', '%s.h5' % model_id), 'r') as f:\n gt_complete = f.get('data').value.astype(np.float32)\n gt_complete_npts = gt_complete.shape[0]\n else:\n raise NotImplementedError\n\n # inference\n completion = sess.run(model.completion, feed_dict={inputs: [gt_complete], npts: [gt_complete_npts]})\n\n # cd, emd\n cd = sess.run(cd_op, feed_dict={output: completion, gt: [gt_complete]})\n emd = sess.run(emd_op, feed_dict={output: completion, gt:[gt_complete]})\n total_cd += cd\n total_emd += emd\n\n # f1_score\n nn_dists1, nn_dists2 = sess.run(nearest_dist_op,\n feed_dict={output: completion, gt: [gt_complete]})\n P = len(nn_dists1[nn_dists1 < test_config['threshold']]) / data_config['num_gt_points']\n R = len(nn_dists2[nn_dists2 < test_config['threshold']]) / data_config['num_gt_points']\n f1_score = 2 * P * R / (P + R)\n total_f1_score += f1_score\n\n total_time += time.time() - start\n\n writer.writerow([model_id, cd, emd, f1_score])\n csv_file.flush()\n\n synset_id, model_id = model_id.split('/')\n if not cd_per_cat.get(synset_id):\n cd_per_cat[synset_id] = []\n if not emd_per_cat.get(synset_id):\n emd_per_cat[synset_id] = []\n if not f1_score_per_cat.get(synset_id):\n f1_score_per_cat[synset_id] = []\n\n cd_per_cat[synset_id].append(cd)\n emd_per_cat[synset_id].append(emd)\n f1_score_per_cat[synset_id].append(f1_score)\n\n # visualize\n if i % test_config['plot_freq'] == 0:\n os.makedirs(os.path.join(config['results_dir'], 'plots', synset_id), exist_ok=True)\n plot_path = os.path.join(config['results_dir'], 'plots', synset_id, '%s.png' % model_id)\n plot_pcd_three_views(plot_path, [gt_complete, completion[0], gt_complete],\n model.visualize_titles, None,\n 'CD %.4f EMD %.4f f1_score %.4f' %\n (cd, emd, f1_score)\n )\n if test_config['save_pcd']:\n os.makedirs(os.path.join(config['results_dir'], 'pcds', synset_id), exist_ok=True)\n save_pcd(os.path.join(config['results_dir'], 'pcds', synset_id, '%s.pcd' % model_id), completion[0])\n\n writer.writerow([\"average\",\n total_cd / len(model_list),\n total_emd / len(model_list),\n total_f1_score / len(model_list)])\n\n for synset_id in cd_per_cat.keys():\n writer.writerow([synset_id,\n np.mean(cd_per_cat[synset_id]),\n np.mean(emd_per_cat[synset_id]),\n np.mean(f1_score_per_cat[synset_id])]\n )\n\n with open(os.path.join(config['results_dir'], 'results_summary.txt'), 'w') as log:\n log.write('Average Chamfer distance: %.8f \\n' % (total_cd / len(model_list)))\n log.write('Average Earth mover distance: %.8f \\n' % (total_emd / len(model_list)))\n log.write('Average f1_score(threshold: %.4f): %.8f \\n' % (test_config['threshold'], total_f1_score / len(model_list)))\n log.write('## Summary for each category ## \\n')\n log.write('ID CD EMD f1_score \\n')\n for synset_id in cd_per_cat.keys():\n log.write('%s %.8f %.8f %.8f\\n' % (synset_id,\n np.mean(cd_per_cat[synset_id]),\n np.mean(emd_per_cat[synset_id]),\n np.mean(f1_score_per_cat[synset_id])\n )\n )\n\n # print results\n print('Average time: %f' % (total_time / len(model_list)))\n print('Average Chamfer distance: %f' % (total_cd / len(model_list)))\n print('Average Earth mover distance: %f' % (total_emd / len(model_list)))\n print('Average f1_score(threshold: %.4f): %f' % (test_config['threshold'], total_f1_score / len(model_list)))\n print('Chamfer distance per category')\n for synset_id in cd_per_cat.keys():\n print(synset_id, '%f' % np.mean(cd_per_cat[synset_id]))\n print('Earth mover distance per category')\n for synset_id in emd_per_cat.keys():\n print(synset_id, '%f' % np.mean(emd_per_cat[synset_id]))\n print('f1_score per category')\n for synset_id in f1_score_per_cat.keys():\n print(synset_id, '%f' % np.mean(f1_score_per_cat[synset_id]))\n csv_file.close()\n sess.close()\n print(colored(\"Test ended!\", 'grey', 'on_green'))\n print('Total testing time', datetime.timedelta(seconds=time.time() - test_start))\n\ndef test_saum(config):\n test_config = config['test_setting']\n data_config = config['dataset']\n \n # Data\n inputs = tf.placeholder(tf.float32, (1, None, 3))\n npts = tf.placeholder(tf.int32, (1,))\n gt = tf.placeholder(tf.float32, (1, data_config['num_gt_points'], 3))\n output = tf.placeholder(tf.float32, (1, None, 3))\n sampled_output = tf.placeholder(tf.float32, (1, data_config['num_gt_points'], 3))\n\n # Model\n model_module = importlib.import_module(config['model']['decoder']['type'])\n model = model_module.model(config, inputs, npts, gt, False)\n\n # Loss\n cd_op = chamfer(output, gt)\n emd_op = earth_mover(sampled_output, gt)\n nearest_dist_op = dist_to_nearest(output, gt)\n\n # make results directory\n if os.path.exists(config['results_dir']):\n delete_key = input(colored('%s exists. Delete? [y (or enter)/N]'\n % config['results_dir'], 'white', 'on_red'))\n if delete_key == 'y' or delete_key == \"\":\n os.system('rm -rf %s/*' % config['results_dir'])\n else:\n os.makedirs(os.path.join(config['results_dir']))\n\n os.system('cp test_self_consistency.py %s' % config['results_dir'])\n\n # TF Config\n config_proto= tf.ConfigProto()\n config_proto.gpu_options.allow_growth = True\n config_proto.allow_soft_placement = True\n sess = tf.Session(config=config_proto)\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(config['checkpoint']))\n\n # Test\n test_start = time.time()\n print(colored(\"Testing...\", 'grey', 'on_green'))\n\n with open(config['list_path']) as file:\n model_list = file.read().splitlines()\n\n total_time = 0\n\n total_cd = 0\n total_fps_cd = 0\n total_fps_emd = 0\n total_fps_f1_score = 0\n\n cd_per_cat = {}\n fps_cd_per_cat = {}\n fps_emd_per_cat = {}\n fps_f1_score_per_cat = {}\n\n os.makedirs(config['results_dir'], exist_ok=True)\n csv_file = open(os.path.join(config['results_dir'], 'results.csv'), 'w')\n writer = csv.writer(csv_file, delimiter=',', quotechar='\"')\n writer.writerow(['id', 'cd', 'fps_cd', 'fps_emd', 'fps_f1_score'])\n\n for i, model_id in enumerate(model_list):\n start = time.time()\n\n # data\n if data_config['type'] == 'pcn' or data_config['type'] == 'car':\n gt_complete = read_pcd(os.path.join(data_config['dir'], 'complete', '%s.pcd' % model_id))\n gt_complete_npts = gt_complete.shape[0]\n elif data_config['type'] == 'topnet':\n with h5py.File(os.path.join(data_config['dir'], 'gt', '%s.h5' % model_id), 'r') as f:\n gt_complete = f.get('data').value.astype(np.float32)\n gt_complete_npts = gt_complete.shape[0]\n else:\n raise NotImplementedError\n\n # inference\n completion = sess.run(model.completion, feed_dict={inputs: [gt_complete], npts: [gt_complete_npts]})\n\n fps_completion, fps_indices = sess.run(model.fps(data_config['num_gt_points'], completion))\n\n is_from_decoder_raw = \\\n np.arange(0, config['model']['decoder']['num_decoder_points'] + config['model']['upsampling_ratio'] * gt_complete_npts) \\\n >= config['model']['upsampling_ratio'] * gt_complete_npts\n is_from_decoder_fps = fps_indices >= config['model']['upsampling_ratio'] * gt_complete_npts\n\n total_time += time.time() - start\n\n # raw\n cd = sess.run(cd_op, feed_dict={output: completion, gt: [gt_complete]})\n total_cd += cd\n\n # farthest point sampling\n # cd, emd\n fps_cd = sess.run(cd_op, feed_dict={output: fps_completion, gt: [gt_complete]})\n fps_emd = sess.run(emd_op, feed_dict={sampled_output: fps_completion, gt: [gt_complete]})\n total_fps_cd += fps_cd\n total_fps_emd += fps_emd\n # f1_score\n fps_nn_dists1, fps_nn_dists2 = sess.run(nearest_dist_op,\n feed_dict={output: fps_completion, gt: [gt_complete]})\n fps_P = len(fps_nn_dists1[fps_nn_dists1 < test_config['threshold']]) / data_config['num_gt_points']\n fps_R = len(fps_nn_dists2[fps_nn_dists2 < test_config['threshold']]) / data_config['num_gt_points']\n fps_f1_score = 2 * fps_P * fps_R / (fps_P + fps_R)\n total_fps_f1_score += fps_f1_score\n\n writer.writerow([model_id, cd, fps_cd, fps_emd, fps_f1_score])\n csv_file.flush()\n\n synset_id, model_id = model_id.split('/')\n if not cd_per_cat.get(synset_id):\n cd_per_cat[synset_id] = []\n if not fps_cd_per_cat.get(synset_id):\n fps_cd_per_cat[synset_id] = []\n if not fps_emd_per_cat.get(synset_id):\n fps_emd_per_cat[synset_id] = []\n if not fps_f1_score_per_cat.get(synset_id):\n fps_f1_score_per_cat[synset_id] = []\n\n cd_per_cat[synset_id].append(cd)\n fps_cd_per_cat[synset_id].append(fps_cd)\n fps_emd_per_cat[synset_id].append(fps_emd)\n fps_f1_score_per_cat[synset_id].append(fps_f1_score)\n\n # visualize\n if i % test_config['plot_freq'] == 0:\n if config['visualizing']['visu_split']:\n raw_dir = os.path.join(config['results_dir'], 'plots', 'raw', synset_id)\n fps_dir = os.path.join(config['results_dir'], 'plots', 'fps', synset_id)\n\n os.makedirs(raw_dir, exist_ok=True)\n os.makedirs(fps_dir, exist_ok=True)\n\n raw_plot_path = os.path.join(raw_dir, '%s.png' % model_id)\n fps_plot_path = os.path.join(fps_dir, '%s.png' % model_id)\n\n plot_pcd_three_views(raw_plot_path, [gt_complete, completion[0], gt_complete],\n model.visualize_titles, is_from_decoder_raw,\n 'CD %.4f' % (cd)\n )\n\n plot_pcd_three_views(fps_plot_path, [gt_complete, fps_completion[0], gt_complete],\n model.visualize_titles, is_from_decoder_fps[0],\n 'FPS_CD %.4f FPS_EMD %.4f FPS_f1_score %.4f' % (fps_cd, fps_emd, fps_f1_score)\n )\n else:\n os.makedirs(os.path.join(config['results_dir'], 'plots', synset_id), exist_ok=True)\n\n plot_path = os.path.join(config['results_dir'], 'plots', synset_id, '%s.png' % model_id)\n plot_pcd_three_views(plot_path, [gt_complete, completion[0], gt_complete],\n model.visualize_titles, None,\n 'CD %.4f FPS_CD %.4f FPS_EMD %.4f FPS_f1_score %.4f' %\n (cd, fps_cd, fps_emd, fps_f1_score)\n )\n\n if test_config['save_pcd']:\n os.makedirs(os.path.join(config['results_dir'], 'pcds', synset_id), exist_ok=True)\n save_pcd(os.path.join(config['results_dir'], 'pcds', synset_id, '%s.pcd' % model_id), completion[0])\n save_pcd(os.path.join(config['results_dir'], 'pcds', synset_id, '%s_fps.pcd' % model_id), fps_completion[0])\n\n # write average info in csv file\n writer.writerow([\"average\", total_cd / len(model_list),\n total_fps_cd / len(model_list), total_fps_emd / len(model_list), total_fps_f1_score / len(model_list)\n ])\n for synset_id in cd_per_cat.keys():\n writer.writerow([synset_id, np.mean(cd_per_cat[synset_id]),\n np.mean(fps_cd_per_cat[synset_id]), np.mean(fps_emd_per_cat[synset_id]), np.mean(fps_f1_score_per_cat[synset_id])\n ])\n\n # write average distances(cd, emd) in txt file\n with open(os.path.join(config['results_dir'], 'results_summary.txt'), 'w') as log:\n log.write('Average Chamfer distance: %.8f \\n' % (total_cd / len(model_list)))\n log.write('Average FPS Chamfer distance: %.8f \\n' % (total_fps_cd / len(model_list)))\n log.write('Average FPS Earth mover distance: %.8f \\n' % (total_fps_emd / len(model_list)))\n log.write('Average FPS f1_score(threshold: %.4f): %.8f \\n' % (test_config['threshold'], total_fps_f1_score / len(model_list)))\n\n log.write('## Summary for each category ## \\n')\n log.write('ID CD FPS_CD FPS_EMD FPS_f1_score\\n')\n for synset_id in cd_per_cat.keys():\n log.write('%s %.8f %.8f %.8f %.8f\\n' % (synset_id,\n np.mean(cd_per_cat[synset_id]),\n np.mean(fps_cd_per_cat[synset_id]),\n np.mean(fps_emd_per_cat[synset_id]),\n np.mean(fps_f1_score_per_cat[synset_id])\n )\n )\n\n # print results\n print('Average time: %f' % (total_time / len(model_list)))\n print('Average Chamfer distance: %f' % (total_cd / len(model_list)))\n print('Average FPS Chamfer distance: %f' % (total_fps_cd / len(model_list)))\n print('Average FPS Earth mover distance: %f' % (total_fps_emd / len(model_list)))\n print('Average FPS f1_score(threshold: %.4f): %f' % (test_config['threshold'], total_fps_f1_score / len(model_list)))\n\n print('Chamfer distance per category')\n for synset_id in cd_per_cat.keys():\n print(synset_id, '%f' % np.mean(cd_per_cat[synset_id]))\n print('Average FPS Chamfer distance per catergory')\n for synset_id in fps_cd_per_cat.keys():\n print(synset_id, '%f' % np.mean(fps_cd_per_cat[synset_id]))\n print('Average FPS Earth mover distance per category')\n for synset_id in fps_emd_per_cat.keys():\n print(synset_id, '%f' % np.mean(fps_emd_per_cat[synset_id]))\n print('Average FPS f1_score per category')\n for synset_id in fps_f1_score_per_cat.keys():\n print(synset_id, '%f' % np.mean(fps_f1_score_per_cat[synset_id]))\n\n csv_file.close()\n sess.close()\n\n print(colored(\"Test ended!\", 'grey', 'on_green'))\n print('Total testing time', datetime.timedelta(seconds=time.time() - test_start))\n\ndef test(config):\n if config['model']['use_decoder_only']:\n test_vanilla(config)\n else:\n test_saum(config)\n\nif __name__ == '__main__':\n config = testSelfConsistencyArguments().to_config()\n test(config)\n" ]
[ [ "tensorflow.train.latest_checkpoint", "numpy.arange", "tensorflow.placeholder", "tensorflow.ConfigProto", "numpy.mean", "tensorflow.Session", "tensorflow.train.Saver" ] ]
themech/Machine-Learning-Coursera-Tensorflow
[ "bd99b7de6206ad5903313c824fb9dd05df8580c2" ]
[ "ex1-linear regression/1_plot_data.py" ]
[ "import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n\n# Read the data - two comma-separated columns.\n# As there is no header row, we name the columns here\ndf = pd.read_csv('data/ex1data1.txt', names=['population', 'profit'])\n\n# Print the header and first few rows\nprint(df.head())\n\n# Plot the data\nsns.lmplot('population', 'profit', df, size=6, fit_reg=False)\nplt.show()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.show" ] ]
Hao-Kailong/DisFeb
[ "2877edd587556e127d6648ee211ed22838c8d015" ]
[ "load_data.py" ]
[ "# encoding: utf-8\r\nimport json\r\nimport torch.utils.data as data\r\nimport torch\r\nimport os\r\nimport numpy as np\r\nimport random\r\n\r\n\r\nclass FewRelDataset(data.Dataset):\r\n # name是文件名,root是根目录\r\n def __init__(self, name, encoder, N, K, Q, root):\r\n self.root = root\r\n path = os.path.join(root, name + '.json')\r\n if not os.path.exists(path):\r\n print('[ERROR] Data File Not Exist!')\r\n assert 0\r\n self.json_data = json.load(open(path, encoding='utf-8'))\r\n self.classes = list(self.json_data.keys())\r\n self.N = N\r\n self.K = K\r\n self.Q = Q\r\n self.encoder = encoder\r\n\r\n def __getraw__(self, item):\r\n word, pos1, pos2, mask = self.encoder.tokenize(item['tokens'], # 句子\r\n item['h'][2][0], # 实体1位置\r\n item['t'][2][0]) # 实体2位置\r\n return word, pos1, pos2, mask\r\n\r\n def __additem__(self, d, word, pos1, pos2, mask):\r\n d['word'].append(word)\r\n d['pos1'].append(pos1)\r\n d['pos2'].append(pos2)\r\n d['mask'].append(mask)\r\n\r\n def __getitem__(self, item):\r\n target_classes = random.sample(self.classes, self.N)\r\n support_set = {'word': [], 'pos1': [], 'pos2': [], 'mask': []}\r\n query_set = {'word': [], 'pos1': [], 'pos2': [], 'mask': []}\r\n query_label = []\r\n\r\n for i, class_name in enumerate(target_classes):\r\n # 随机选择支撑集和查询集样本\r\n indices = np.random.choice(\r\n list(range(len(self.json_data[class_name]))),\r\n self.K + self.Q, False)\r\n count = 0\r\n for j in indices:\r\n # 获取一个样本\r\n word, pos1, pos2, mask = self.__getraw__(self.json_data[class_name][j])\r\n word = torch.tensor(word).long()\r\n pos1 = torch.tensor(pos1).long()\r\n pos2 = torch.tensor(pos2).long()\r\n mask = torch.tensor(mask).long()\r\n if count < self.K:\r\n self.__additem__(support_set, word, pos1, pos2, mask)\r\n else:\r\n self.__additem__(query_set, word, pos1, pos2, mask)\r\n count += 1\r\n query_label += [i] * self.Q\r\n return support_set, query_set, query_label\r\n\r\n def __len__(self):\r\n return 1000000\r\n\r\n\r\ndef collate_fn(data):\r\n batch_support = {'word': [], 'pos1': [], 'pos2': [], 'mask': []}\r\n batch_query = {'word': [], 'pos1': [], 'pos2': [], 'mask': []}\r\n batch_label = []\r\n support_sets, query_sets, query_labels = zip(*data)\r\n for i in range(len(support_sets)):\r\n for k in support_sets[i]: # 这里的k是dict.key\r\n batch_support[k] += support_sets[i][k]\r\n for k in query_sets[i]:\r\n batch_query[k] += query_sets[i][k]\r\n batch_label += query_labels[i]\r\n for k in batch_support:\r\n batch_support[k] = torch.stack(batch_support[k], 0)\r\n for k in batch_query:\r\n batch_query[k] = torch.stack(batch_query[k], 0)\r\n batch_label = torch.tensor(batch_label)\r\n return batch_support, batch_query, batch_label\r\n\r\n\r\ndef get_loader(name, encoder, N, K, Q, batch_size, num_workers=0, collate_fn=collate_fn, root='data'):\r\n dataset = FewRelDataset(name, encoder, N, K, Q, root)\r\n data_loader = data.DataLoader(dataset=dataset,\r\n batch_size=batch_size,\r\n shuffle=False,\r\n pin_memory=True,\r\n num_workers=num_workers,\r\n collate_fn=collate_fn)\r\n return iter(data_loader)\r\n\r\n\r\n#from encoders.sentence_encoder import BERTSentenceEncoder\r\n#sentence_encoder = BERTSentenceEncoder(\r\n# 'F:/Dataset/BERT/bert-base-uncased',\r\n# 512\r\n# )\r\n#loader = get_loader('demo_nyt10_hm_opennre', sentence_encoder, 5, 5, 10, batch_size=4, collate_fn=collate_fn, root='data')\r\n#while True:\r\n# support, query, label = next(loader)\r\n# print(support)\r\n# print(query)\r\n# print(label)\r\n\r\n" ]
[ [ "torch.stack", "torch.utils.data.DataLoader", "torch.tensor" ] ]
nsarlin/courserapredictprices
[ "78c3023e604e18b50e9e08097e367fc6d740c2f3" ]
[ "src/models/predict_model.py" ]
[ "# -*- coding: utf-8 -*-\nimport click\nimport logging\nimport os\n\nfrom sklearn.metrics import mean_squared_error\n\nimport scipy.sparse as sp\nimport numpy as np\n\nimport dnn\nimport xgb\n\n\[email protected]()\[email protected]('data_dirpath', type=click.Path(exists=True))\[email protected]('model_dirpath', type=click.Path(exists=True))\[email protected]('preds_dirpath', type=click.Path(exists=True))\ndef main(data_dirpath, model_dirpath, preds_dirpath):\n \"\"\"\n Make predictions with model from model_dirpath on data from data_dirpath\n and stores them in preds_dirpath\n \"\"\"\n logger = logging.getLogger(__name__)\n\n logger.info(\"Loading data\")\n X_test = sp.load_npz(os.path.join(data_dirpath, \"X_test.npz\"))\n y_test = np.load(os.path.join(data_dirpath, \"y_test.npy\"))\n\n logger.info(\"Making predictions\")\n dnn_model = dnn.load(model_dirpath)\n y_preds = dnn.predict(dnn_model, X_test)\n np.save(os.path.join(data_dirpath, \"y_preds_dnn.npy\"), y_preds)\n print(\"DNN RMSE: {}\".format(mean_squared_error(y_test, y_preds)))\n xgb_model = xgb.load(model_dirpath)\n y_preds = xgb.predict(xgb_model, X_test)\n np.save(os.path.join(data_dirpath, \"y_preds_xgb.npy\"), y_preds)\n print(\"XGB RMSE: {}\".format(mean_squared_error(y_test, y_preds)))\n\n\nif __name__ == \"__main__\":\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n main()\n" ]
[ [ "sklearn.metrics.mean_squared_error" ] ]
coleygroup/molpal
[ "7b3dd1488af6e734caea527fe2738ff7f6ebfc49" ]
[ "scripts/trajectories.py" ]
[ "from argparse import ArgumentParser\nfrom collections import Counter, defaultdict\nimport csv\nfrom itertools import islice\nfrom operator import itemgetter\nfrom pathlib import Path\nimport pickle\nfrom typing import Iterable, List, Set, Tuple\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom tqdm import tqdm\n\nsns.set_theme(style='white', context='paper')\n\ndef recursive_conversion(nested_dict):\n if not isinstance(nested_dict, defaultdict):\n return nested_dict\n\n for k in nested_dict:\n sub_dict = nested_dict[k]\n nested_dict[k] = recursive_conversion(sub_dict)\n return dict(nested_dict)\n\ndef read_data(p_data, k, maximize: bool = False) -> List[Tuple]:\n c = 1 if maximize else -1\n with open(p_data) as fid:\n reader = csv.reader(fid); next(reader)\n # the data files are always sorted\n data = [(row[0], c * float(row[1]))\n for row in islice(reader, k) if row[1]]\n \n return data\n\ndef get_smis_from_data(p_data) -> Set:\n with open(p_data) as fid:\n reader = csv.reader(fid); next(reader)\n smis = {row[0] for row in reader}\n \n return smis\n\ndef boltzmann(xs: Iterable[float]) -> float:\n X = np.array(xs)\n E = np.exp(-X)\n Z = E.sum()\n return (X * E / Z).sum()\n\ndef calculate_rewards(found: List[Tuple], true: List[Tuple],\n avg: bool = True, smis: bool = True, scores: bool = True\n ) -> Tuple[float, float, float]:\n N = len(found)\n found_smis, found_scores = zip(*found)\n true_smis, true_scores = zip(*true)\n\n if avg:\n found_avg = np.mean(found_scores)\n true_avg = np.mean(true_scores)\n f_avg = found_avg / true_avg\n else:\n f_avg = None\n\n # if boltzmann:\n # found_boltzmann = boltzmann(found_scores)\n # true_boltzmann = boltzmann(true_scores)\n # f_boltzmann = boltzmann(found_scores) / boltzmann(true_scores)\n # else:\n # f_boltzmann = None\n\n if smis:\n found_smis = set(found_smis)\n true_smis = set(true_smis)\n correct_smis = len(found_smis & true_smis)\n f_smis = correct_smis / len(true_smis)\n else:\n f_smis = None\n\n if scores:\n missed_scores = Counter(true_scores)\n missed_scores.subtract(found_scores)\n n_missed_scores = sum(\n count if count > 0 else 0\n for count in missed_scores.values()\n )\n f_scores = (N - n_missed_scores) / N\n else:\n f_scores = None\n\n return f_avg, f_smis, f_scores\n\ndef gather_run_results(\n run, true_data, N, maximize: bool = False\n ) -> List[Tuple[float, float, float]]:\n data = run / 'data'\n\n d_it_results = {}\n for it_data in tqdm(data.iterdir(), 'Iters', None, False):\n try:\n it = int(it_data.stem.split('_')[-1])\n except ValueError:\n continue\n\n found = read_data(it_data, N, maximize)\n d_it_results[it] = calculate_rewards(found, true_data)\n\n return [(d_it_results[it]) for it in sorted(d_it_results.keys())]\n\ndef gather_metric_results(metric, true_data, N, maximize: bool = False):\n rep_results = np.array([\n gather_run_results(rep, true_data, N, maximize)\n for rep in tqdm(metric.iterdir(), 'Reps', None, False)\n ])\n\n means = np.mean(rep_results, axis=0)\n sds = np.sqrt(np.var(rep_results, axis=0))\n\n return {\n 'avg': list(zip(means[:, 0].tolist(), sds[:, 0].tolist())),\n 'smis': list(zip(means[:, 1].tolist(), sds[:, 1].tolist())),\n 'scores': list(zip(means[:, 2].tolist(), sds[:, 2].tolist()))\n }\n\ndef gather_all_rewards(parent_dir, true_data, N: int,\n overwrite: bool = False, maximize: bool = False):\n nested_dict = lambda: defaultdict(nested_dict)\n results = nested_dict()\n\n parent_dir = Path(parent_dir)\n cached_rewards = parent_dir / f'.all_rewards_{N}.pkl'\n if cached_rewards.exists() and not overwrite:\n return pickle.load(open(cached_rewards, 'rb'))\n\n for training in tqdm(parent_dir.iterdir(), 'Training', None, False):\n if not training.is_dir():\n continue\n\n for split in tqdm(training.iterdir(), 'Splits', None, False):\n for model in tqdm(split.iterdir(), 'Models', None, False):\n if model.name == 'random':\n results[\n training.name][\n float(split.name)][\n model.name\n ] = gather_metric_results(model, true_data, N, maximize)\n continue\n\n for metric in tqdm(model.iterdir(), 'Metrics', None, False):\n if metric.name == 'thompson':\n metric_ = 'ts'\n else:\n metric_ = metric.name\n \n results[training.name][\n float(split.name)][\n model.name][\n metric_\n ] = gather_metric_results(metric, true_data, N, maximize)\n results = recursive_conversion(results)\n\n pickle.dump(results, open(cached_rewards, 'wb'))\n\n return results\n\n################################################################################\n#------------------------------------------------------------------------------#\n################################################################################\n\nMETRICS = ['greedy', 'ucb', 'ts', 'ei', 'pi']\nMETRIC_NAMES = {'greedy': 'greedy', 'ucb': 'UCB', 'ts': 'TS',\n 'ei': 'EI', 'pi': 'PI'}\nMETRIC_COLORS = dict(zip(METRICS, sns.color_palette('bright')))\n\nMODELS = ['rf', 'nn', 'mpn']\nMODEL_COLORS = dict(zip(MODELS, sns.color_palette('dark')))\n\nSPLITS = [0.004, 0.002, 0.001]\n\nDASHES = ['dash', 'dot', 'dashdot']\nMARKERS = ['circle', 'square', 'diamond']\n\ndef style_axis(ax):\n ax.set_xlabel(f'Molecules explored')\n ax.set_xlim(left=0)\n ax.set_ylim(bottom=0, top=100)\n ax.xaxis.set_major_locator(ticker.MaxNLocator(7))\n ax.xaxis.set_tick_params(rotation=30)\n ax.grid(True)\n\ndef abbreviate_k_or_M(x: float, pos) -> str:\n if x >= 1e6:\n return f'{x*1e-6:0.1f}M'\n if x >= 1e3:\n return f'{x*1e-3:0.0f}k'\n\n return f'{x:0.0f}'\n\ndef plot_model_metrics(\n results, size: int, N: int,\n split: float = 0.010, reward='scores', si_fig: bool = False\n ):\n xs = [int(size*split * i) for i in range(1, 7)]\n\n fig, axs = plt.subplots(1, 3, sharex=True, sharey=True,\n figsize=(4/1.5 * 3, 4))\n\n fmt = 'o-'\n ms = 5\n capsize = 2\n \n for i, (model, ax) in enumerate(zip(MODELS, axs)):\n for metric in METRICS:\n if metric == 'greedy':\n metric_ = metric\n elif metric == 'thompson':\n metric_ = 'TS'\n else:\n metric_ = metric.upper()\n\n if not si_fig:\n ys, y_sds = zip(\n *results['retrain'][split][model][metric][reward]\n )\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n ax.errorbar(\n xs, ys, yerr=y_sds, color=METRIC_COLORS[metric], \n label=metric_, fmt=fmt, ms=ms, mec='black', capsize=capsize\n )\n else:\n ys, y_sds = zip(\n *results['retrain'][split][model][metric][reward]\n )\n ys = [y*100 for y in ys]\n ax.plot(\n xs, ys, fmt, color=METRIC_COLORS[metric], \n ms=ms, mec='black', alpha=0.33,\n )\n\n ys, y_sds = zip(\n *results['online'][split][model][metric][reward]\n )\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n ax.errorbar(\n xs, ys, yerr=y_sds, color=METRIC_COLORS[metric],\n fmt=fmt, ms=ms, mec='black', capsize=capsize,\n label=metric_\n )\n\n formatter = ticker.FuncFormatter(abbreviate_k_or_M)\n ax.xaxis.set_major_formatter(formatter)\n \n add_random_trace(ax, results, split, reward, xs, fmt, ms, capsize)\n\n ax.set_title(model.upper())\n if i == 0:\n ax.set_ylabel(f'Percentage of Top-{N} {reward.capitalize()} Found')\n ax.legend(loc='upper left', title='Metric')\n \n style_axis(ax)\n \n fig.tight_layout()\n\n return fig\n\ndef plot_split_models(\n results, size: int, N: int, metric: str = 'greedy', reward='scores'\n ):\n fig, axs = plt.subplots(1, 3, sharey=True, figsize=(4/1.5 * 3, 4))\n\n fmt = 'o-'\n ms = 5\n capsize = 2\n \n for i, (split, ax) in enumerate(zip(SPLITS, axs)):\n xs = [int(size*split * i) for i in range(1, 7)]\n\n for model in MODELS:\n if model == 'random':\n continue\n\n ys, y_sds = zip(*results['retrain'][split][model][metric][reward])\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n\n if len(xs) != len(ys):\n continue\n\n ax.errorbar(\n xs, ys, yerr=y_sds, color=MODEL_COLORS[model],\n label=model.upper(), fmt=fmt, ms=ms, mec='black', \n capsize=capsize\n )\n \n add_random_trace(ax, results, split, reward, xs, fmt, ms, capsize)\n\n ax.set_title(f'{split*100:0.1f}%')\n if i == 0:\n ax.set_ylabel(f'Percentage of Top-{N} {reward.capitalize()} Found')\n ax.legend(loc='upper left', title='Model')\n\n style_axis(ax)\n\n formatter = ticker.FuncFormatter(abbreviate_k_or_M)\n ax.xaxis.set_major_formatter(formatter)\n\n fig.tight_layout()\n return fig\n\ndef plot_split_metrics(\n results, size: int, N: int,\n model: str = 'rf', reward='scores'\n ):\n fig, axs = plt.subplots(1, 3, sharey=True, figsize=(4/1.5 * 3, 4))\n\n fmt = 'o-'\n ms = 5\n capsize = 2\n\n for i, (split, ax) in enumerate(zip(SPLITS, axs)):\n if split not in results['retrain']:\n continue\n\n for metric in METRICS:\n if metric not in results['retrain'][split][model]:\n continue\n\n if metric == 'greedy':\n metric_ = metric\n elif metric == 'thompson':\n metric_ = 'TS'\n else:\n metric_ = metric.upper()\n \n ys, y_sds = zip(*results['retrain'][split][model][metric][reward])\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n\n xs = [int(size*split * (i+1)) for i in range(len(ys))]\n\n ax.errorbar(\n xs, ys, yerr=y_sds, color=METRIC_COLORS[metric],\n fmt=fmt, ms=ms, mec='black', capsize=capsize,\n label=metric_\n )\n \n add_random_trace(ax, results, split, reward, xs, fmt, ms, capsize)\n\n ax.set_title(f'{split*100:0.1f}%')\n if i == 0:\n ax.set_ylabel(f'Percentage of Top-{N} {reward.capitalize()} Found')\n ax.legend(loc='upper left', title='Metric')\n\n style_axis(ax)\n formatter = ticker.FuncFormatter(abbreviate_k_or_M)\n ax.xaxis.set_major_formatter(formatter)\n\n fig.tight_layout()\n\n return fig\n\ndef add_random_trace(ax, results, split, reward, xs, fmt, ms, capsize):\n try:\n try:\n ys, y_sds = zip(*results['retrain'][split]['random'][reward])\n except KeyError:\n ys, y_sds = zip(*results['online'][split]['random'][reward])\n except KeyError:\n return\n\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n ax.errorbar(\n xs, ys, yerr=y_sds, fmt=fmt, ms=ms, color='grey',\n mec='black', capsize=capsize, label='random'\n )\n\ndef plot_single_batch(\n full_results, single_batch_results, size: int, N: int,\n metric: str = 'greedy', reward='scores'\n ):\n fig, ax = plt.subplots(1, 1, sharey=True, figsize=(4/1.5, 4))\n\n fmt = 'o-'\n ms = 5\n capsize = 2\n\n for model in MODELS:\n split = 0.004\n xs = [int(size*split * i) for i in range(1, 7)]\n\n ys, y_sds = zip(*full_results['retrain'][split][model][metric][reward])\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n\n if len(xs) != len(ys):\n continue\n\n ax.plot(\n xs, ys, fmt, color=MODEL_COLORS[model],\n ms=ms, mec='black', alpha=0.33\n )\n\n split = 0.004\n xs = [int(size * split), int(size * 0.024)]\n ys, y_sds = zip(\n *single_batch_results['retrain'][split][model][metric][reward]\n )\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n\n ax.errorbar(\n xs, ys, yerr=y_sds, fmt='o-', color=MODEL_COLORS[model], \n ms=ms, mec='black', capsize=capsize, label=model.upper()\n )\n\n split = 0.02\n xs = [int(size * split), int(size * 0.024)]\n\n ys, y_sds = zip(\n *single_batch_results['retrain'][split][model][metric][reward]\n )\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n\n ax.errorbar(\n xs, ys, yerr=y_sds, fmt='o--', color=MODEL_COLORS[model],\n ms=ms, mec='black', capsize=capsize,\n )\n\n ax.set_ylabel(f'Percentage of Top-{N} {reward.capitalize()} Found')\n ax.legend(loc='upper left', title='Model')\n\n style_axis(ax)\n\n formatter = ticker.FuncFormatter(abbreviate_k_or_M)\n ax.xaxis.set_major_formatter(formatter)\n\n fig.tight_layout()\n return fig\n\ndef plot_convergence(\n results, size: int, N: int, metric: str = 'greedy', reward='scores'\n ):\n fig, ax = plt.subplots(1, 1, sharey=True, figsize=(4/1.5, 4))\n\n fmt = 'o-'\n ms = 5\n \n split = 0.001 \n\n for model in MODELS:\n ys, y_sds = zip(*results['retrain'][split][model][metric][reward])\n ys = [y*100 for y in ys]\n y_sds = [y*100 for y in y_sds]\n\n xs = [int(size*split * (i+1)) for i in range(len(ys))]\n\n ax.plot(\n xs, ys, fmt, color=MODEL_COLORS[model],\n label=model.upper(), ms=ms, mec='black'\n )\n \n ax.set_ylabel(f'Percentage of Top-{N} {reward.capitalize()} Found')\n ax.legend(loc='upper left', title='Model')\n \n style_axis(ax)\n \n formatter = ticker.FuncFormatter(abbreviate_k_or_M)\n ax.xaxis.set_major_formatter(formatter)\n\n fig.tight_layout()\n return fig\n\ndef write_csv(rewards, split):\n results_df = []\n\n for training in ('online', 'retrain'):\n for model in MODELS:\n if model not in rewards[training][split]:\n continue\n\n for metric in METRICS:\n if metric not in rewards[training][split][model]:\n continue\n\n if metric == 'greedy':\n metric_ = metric\n elif metric == 'thompson':\n metric_ = 'TS'\n else:\n metric_ = metric.upper()\n\n scores = rewards[training][split][model][metric]['scores'][-1]\n smis = rewards[training][split][model][metric]['smis'][-1]\n avg = rewards[training][split][model][metric]['avg'][-1]\n\n results_df.append({\n 'Training': training,\n 'Model': model.upper(),\n 'Metric': metric_,\n 'Scores ($\\pm$ s.d.)': f'{100*scores[0]:0.1f} ({100*scores[1]:0.1f})',\n 'SMILES ($\\pm$ s.d.)': f'{100*smis[0]:0.1f} ({100*smis[1]:0.1f})',\n 'Average ($\\pm$ s.d.)': f'{100*avg[0]:0.2f} ({100*avg[1]:0.2f})'\n })\n\n if 'random' in rewards['online'][split]:\n scores = rewards['online'][split]['random']['scores'][-1]\n smis = rewards['online'][split]['random']['smis'][-1]\n avg = rewards['online'][split]['random']['avg'][-1]\n\n random_results = {\n 'Training': 'random',\n 'Model': 'random',\n 'Metric': 'random',\n 'Scores ($\\pm$ s.d.)': f'{100*scores[0]:0.1f} ({100*scores[1]:0.1f})',\n 'SMILES ($\\pm$ s.d.)': f'{100*smis[0]:0.1f} ({100*smis[1]:0.1f})',\n 'Average ($\\pm$ s.d.)': f'{100*avg[0]:0.2f} ({100*avg[1]:0.2f})'\n }\n results_df.append(random_results)\n elif 'random' in rewards['retrain'][split]:\n scores = rewards['retrain'][split]['random']['scores'][-1]\n smis = rewards['retrain'][split]['random']['smis'][-1]\n avg = rewards['retrain'][split]['random']['avg'][-1]\n\n random_results = {\n 'Training': 'random',\n 'Model': 'random',\n 'Metric': 'random',\n 'Scores ($\\pm$ s.d.)': f'{100*scores[0]:0.1f} ({100*scores[1]:0.1f})',\n 'SMILES ($\\pm$ s.d.)': f'{100*smis[0]:0.1f} ({100*smis[1]:0.1f})',\n 'Average ($\\pm$ s.d.)': f'{100*avg[0]:0.2f} ({100*avg[1]:0.2f})'\n }\n results_df.append(random_results)\n\n df = pd.DataFrame(results_df).set_index(['Training', 'Model', 'Metric'])\n\n return df\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument('--true-pkl',\n help='a pickle file containing a dictionary of the true scoring data')\n parser.add_argument('--size', type=int,\n help='the size of the full library which was explored. You only need to specify this if you are using a truncated pickle file. I.e., your pickle file contains only the top 1000 scores because you only intend to calculate results of the top-k, where k <= 1000')\n parser.add_argument('--parent-dir',\n help='the parent directory containing all of the results. NOTE: the directory must be organized in the folowing manner: <root>/<online,retrain>/<split_size>/<model>/<metric>/<repeat>/<run>. See the README for a visual description.')\n parser.add_argument('--parent-dir-sb',\n help='the parent directory of the single batch data')\n parser.add_argument('--smiles-col', type=int, default=0)\n parser.add_argument('--score-col', type=int, default=1)\n parser.add_argument('-N', type=int,\n help='the number of top scores from which to calculate perforamnce')\n parser.add_argument('--split', type=float, default=0.004,\n help='the split size to plot when using model-metrics mode')\n parser.add_argument('--model', default='mpn',\n help='the model class to plot when using split-metrics mode')\n parser.add_argument('--metric', default='greedy',\n help='the metric to plot when use split-models mode')\n parser.add_argument('--mode', required=True,\n choices=('model-metrics', 'split-models', \n 'split-metrics', 'si', 'single-batch', 'convergence', 'csv', 'errors', \n 'diversity', 'intersection'),\n help='what figure to generate. For \"x-y\" modes, this corresponds to the figure structure, where there will be a separate panel for each \"x\" and in each panel there will be traces corresponding to each independent \"y\". E.g., \"model-metrics\" makes a figure with three sepearate panels, one for each model and inside each panel a trace for each metric. \"si\" will make the trajectory plots present in the SI.')\n # parser.add_argument('--name', default='.')\n # parser.add_argument('--format', '--fmt', default='png',\n # choices=('png', 'pdf'))\n parser.add_argument('--overwrite', action='store_true', default=False,\n help='whether to overwrite the hidden cache file. This is useful if there is new data in PARENT_DIR.')\n parser.add_argument('--maximize', action='store_true', default=False,\n help='whether the objective for which you are calculating performance should be maximized.')\n\n args = parser.parse_args()\n\n if args.true_pkl:\n true_data = pickle.load(open(args.true_pkl, 'rb'))\n size = args.size or len(true_data)\n try:\n true_data = sorted(true_data.items(), key=itemgetter(1))\n except AttributeError:\n true_data = sorted(true_data, key=itemgetter(1))\n\n if args.maximize:\n true_data = true_data[::-1]\n true_data = true_data[:args.N]\n\n if args.mode in ('model-metrics', 'split-models',\n 'split-metrics', 'si',\n 'single-batch', 'convergence', 'csv'):\n results = gather_all_rewards(\n args.parent_dir, true_data, args.N, args.overwrite, args.maximize\n )\n\n if args.mode == 'model-metrics':\n fig = plot_model_metrics(\n results, size, args.N, args.split, 'scores'\n )\n\n name = input('Figure name: ')\n fig.savefig(f'paper/figures/{name}.pdf')\n\n elif args.mode == 'split-models':\n fig = plot_split_models(\n results, size, args.N, args.metric, 'scores'\n )\n\n name = input('Figure name: ')\n fig.savefig(f'paper/figures/{name}.pdf')\n \n elif args.mode == 'split-metrics':\n fig = plot_split_metrics(\n results, size, args.N, args.model, 'scores'\n )\n\n name = input('Figure name: ')\n fig.savefig(f'paper/figures/{name}.pdf')\n\n elif args.mode == 'si':\n fig = plot_model_metrics(\n results, size, args.N, args.split, 'scores', True\n )\n name = input('Figure name: ')\n fig.savefig(f'paper/figures/{name}.pdf')\n\n elif args.mode == 'csv':\n df = write_csv(results, args.split)\n\n name = input('CSV name: ')\n df.to_csv(f'paper/csv/{name}.csv')\n\n elif args.mode == 'single-batch':\n single_batch_results = gather_all_rewards(\n args.parent_dir_sb, true_data, args.N,\n args.overwrite, args.maximize\n )\n\n fig = plot_single_batch(\n results, single_batch_results, size, args.N, 'greedy', 'scores'\n )\n\n name = input('Figure name: ')\n fig.savefig(f'paper/figures/{name}.pdf')\n \n elif args.mode == 'convergence':\n fig = plot_convergence(results, size, args.N, 'greedy', 'scores')\n\n name = input('Figure name: ')\n fig.savefig(f'paper/figures/{name}.pdf')\n\n else:\n exit()" ]
[ [ "numpy.array", "matplotlib.pyplot.subplots", "pandas.DataFrame", "numpy.mean", "matplotlib.ticker.MaxNLocator", "numpy.var", "matplotlib.ticker.FuncFormatter", "numpy.exp" ] ]
ruofeidu/mdif
[ "41e7edc8ff4765543d98d2dfa28e98ca51e0220d" ]
[ "model/network_pipeline_test.py" ]
[ "#!/usr/bin/python\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for google3.vr.perception.volume_compression.mdif.model.network_pipeline.\"\"\"\n\nfrom absl.testing import parameterized\nimport tensorflow as tf\n\nfrom google3.vr.perception.volume_compression.mdif.model import network_pipeline\n\n\ndef _get_general_params():\n general_params = {\n 'debug_mode': True,\n 'num_train_data': 1,\n 'num_test_data': 1,\n 'latent_optim_target': 'code_grid_enc',\n 'code_grid_enc_shape': [[1, 1, 1, 4], [2, 2, 2, 2]],\n 'codes_init_std': [0., 0.],\n 'mode': 'fully_multi_level',\n 'encoder_mode': 'input_enc+f2c',\n 'code_for_point_mode': 'interpolate',\n 'pipeline_mode': 'general',\n 'sdf_scale': 100,\n 'max_point_per_chunk': 32,\n 'num_point_dim': 3,\n 'num_level': 2,\n 'grid_shape': [[1, 1, 1], [1, 1, 1]],\n 'grid_range_min': [-1, -1, -1],\n 'grid_range_max': [1, 1, 1],\n 'grid_mode': 'regular',\n 'input_config_unified': {\n 'clip': [True, [-5, 5]],\n },\n 'label_config_unified': {\n 'clip': [True, [-5, 5]]\n },\n 'decoder_input_config': [\n {\n 'data': 'lat_code+coord',\n 'empty_vars': [],\n },\n {\n 'data': 'lat_code',\n 'empty_vars': []\n },\n ],\n 'label_config': [\n {\n 'data': 'gt_residual',\n 'stop_grad': False\n },\n {\n 'data': 'gt_residual',\n 'stop_grad': False\n },\n ],\n 'summary_config': {\n 'sdf_range': 5,\n 'sdf_err_factor': 2,\n 'contours_err_max': 5,\n 'slice_idx_z': [0.5],\n 'slice_idx_y': [0.5],\n 'slice_idx_x': [0.5],\n },\n 'eval_data_mode': 'slices',\n }\n\n return general_params\n\n\ndef _get_loss_params():\n loss_params = {\n 'sdf_l1': {\n 'term_weight': [1.0, 1.0]\n },\n 'sdf_reg_l1': {\n 'term_weight': [0., 0.]\n },\n 'sdf_consistency_l1': {\n 'mode': ['every', 1],\n 'stop_grad_ref': True,\n 'point_weight_config/dist_to_visible': ['gaussian', 0.1],\n 'term_weight': [0e0, 0e0]\n },\n 'code_reg_l2': {\n 'term_weight': [0e0, 0e0]\n },\n 'root_feat_reg_l2': {\n 'term_weight': [0e0]\n },\n 'point_weight_config': [\n {\n 'gt_gaussian': {\n 'apply': False,\n 'sigma': 32.0\n },\n 'pred_gaussian': {\n 'apply': False,\n 'sigma': 2.0\n }\n },\n {\n 'gt_gaussian': {\n 'apply': False,\n 'sigma': 8.0\n },\n 'pred_gaussian': {\n 'apply': False,\n 'sigma': 2.0\n }\n },\n ],\n 'summary_config': {\n 'slice_idx_z': [0.5],\n 'slice_idx_y': [0.5],\n 'slice_idx_x': [0.5],\n },\n }\n return loss_params\n\n\ndef _get_input_encoder_params():\n input_encoder_params = [\n {\n 'data_type': '3d',\n 'net_type': 'fully_conv',\n 'num_filters': [2],\n 'num_out_channel': 2,\n 'strides': [2],\n 'num_conv_per_level': 2,\n 'num_levels': 1,\n 'final_pooling': None,\n 'activation_params': {\n 'type': 'leaky_relu',\n 'alpha': 0.2\n },\n },\n ]\n\n return input_encoder_params\n\n\ndef _get_feature_to_code_net_params():\n feature_to_code_net_params = {\n 'data_type':\n '3d',\n 'mode':\n 'single_dec_branch',\n 'out_pre_upsample_id': [0, 1],\n 'dec_only_apply_mask':\n False,\n 'unified_mask_config':\n None,\n 'fusion_params': {\n 'mode': 'concat'\n },\n 'block_params': [\n [\n [\n 'EncoderTemplate', {\n 'net_type': 'fully_conv',\n 'num_levels': 1,\n 'num_filters': [4],\n 'num_out_channel': 4,\n 'strides': [2],\n 'num_conv_per_level': 2,\n 'kernel_size': [3, 1],\n 'final_pooling': None,\n 'normalization_params': None,\n 'activation_params': {\n 'type': 'leaky_relu',\n 'alpha': 0.2\n },\n }\n ],\n [\n 'DecoderConv', {\n 'num_levels': 2,\n 'num_filters': [4, 1],\n 'num_out_channel': None,\n 'initial_upsample': [False, [8, 8], 'bilinear'],\n 'kernel_size': [1, 3, 1],\n 'kernel_size_deconv': 4,\n 'num_conv_per_level': 2,\n 'upsample_type': 'deconv',\n 'normalization_params': None,\n 'activation_params': {\n 'type': 'leaky_relu',\n 'alpha': 0.2\n },\n }\n ],\n ],\n [\n [\n 'EncoderTemplate', {\n 'net_type': 'fully_conv',\n 'num_levels': 0,\n 'num_filters': [],\n 'num_out_channel': 2,\n 'strides': [],\n 'kernel_size': [3],\n 'num_conv_per_level': 2,\n 'final_pooling': None,\n 'normalization_params': None,\n 'activation_params': {\n 'type': 'leaky_relu',\n 'alpha': 0.2\n },\n }\n ],\n [\n 'MaskingLayer', {\n 'mode': 'random',\n 'offset': (0, 0),\n 'masked_value': 0,\n 'dropout_rate': 0.5,\n 'dropout_rescale': False,\n 'resize_mode': 'downsample',\n 'resize_factor': 2,\n 'noise_config': None,\n }\n ],\n ],\n ]\n }\n\n return feature_to_code_net_params\n\n\ndef _get_decoder_params():\n decoder_params = {\n 'num_filter': 16,\n 'num_out_channel': 1,\n 'implicit_net_type': 'imnet',\n 'share_net_level_groups': None,\n 'activation_params': {\n 'type': 'leaky_relu',\n 'alpha': 0.2\n },\n }\n\n return decoder_params\n\n\ndef _get_sampling_params():\n train_sampling_params = {\n 'normalize_coordinates': True,\n 'all_pixels': False,\n 'untruncated': False,\n 'untruncated/num_point': 0,\n 'untruncated/mode': 'uniform',\n 'untruncated/truncate': 5,\n 'regular': False,\n 'regular/num_point': 0,\n 'global': True,\n 'global/num_point': 32,\n 'near_surface': True,\n 'near_surface/num_point': 32,\n }\n\n eval_sampling_params = {\n 'normalize_coordinates': True,\n 'all_pixels': True,\n 'untruncated': False,\n 'untruncated/num_point': 0,\n 'untruncated/mode': 'uniform',\n 'untruncated/truncate': 5,\n 'regular': False,\n 'regular/num_point': 0,\n 'global': False,\n 'global/num_point': 0,\n 'near_surface': False,\n 'near_surface/num_point': 0,\n }\n\n latent_optim_sampling_params = {\n 'normalize_coordinates': True,\n 'all_pixels': False,\n 'untruncated': False,\n 'untruncated/num_point': 0,\n 'untruncated/mode': 'uniform',\n 'untruncated/truncate': 5,\n 'regular': False,\n 'regular/num_point': 0,\n 'global': True,\n 'global/num_point': 32,\n 'near_surface': True,\n 'near_surface/num_point': 32,\n }\n\n return (train_sampling_params, eval_sampling_params,\n latent_optim_sampling_params)\n\n\nclass NetworkPipelineTest(parameterized.TestCase, tf.test.TestCase):\n\n @parameterized.parameters(('full'), ('latent_optim'))\n def test_multires_deep_implicit_function(self, optim_mode):\n general_params = _get_general_params()\n loss_params = _get_loss_params()\n input_encoder_params = _get_input_encoder_params()\n feature_to_code_net_params = _get_feature_to_code_net_params()\n decoder_params = _get_decoder_params()\n (train_sampling_params, eval_sampling_params,\n latent_optim_sampling_params) = _get_sampling_params()\n\n net = network_pipeline.MultiresDeepImplicitFunction(\n general_params, loss_params, input_encoder_params,\n feature_to_code_net_params, decoder_params, train_sampling_params,\n eval_sampling_params, latent_optim_sampling_params)\n\n batch_size = 1\n spatial_dims = (4, 4, 4)\n num_view = 1\n data_batch = {}\n data_batch['world2grid'] = tf.eye(\n 4, batch_shape=[batch_size], dtype=tf.float32)\n data_batch['grid_samples'] = tf.zeros((batch_size, *spatial_dims, 1),\n dtype=tf.float32)\n data_batch['uniform_samples'] = tf.zeros((batch_size, 50, 4),\n dtype=tf.float32)\n data_batch['near_surface_samples'] = tf.zeros((batch_size, 50, 4),\n dtype=tf.float32)\n data_batch['uniform_samples_per_camera'] = tf.zeros(\n (batch_size, num_view, 50, 4), dtype=tf.float32)\n data_batch['near_surface_samples_per_camera'] = tf.zeros(\n (batch_size, num_view, 50, 4), dtype=tf.float32)\n data_batch['depth_xyzn_per_camera'] = tf.zeros(\n (batch_size, num_view, 50, 6), dtype=tf.float32)\n\n data_batch_new = dict(data_batch)\n input_data, gt_data = net._preprocess_data_3d(data_batch_new)\n\n out = net(data_batch, training=True, do_eval=True, optim_mode=optim_mode)\n\n with self.subTest(name='init_latent_codes'):\n self.assertLen(net.codes_train_data, net.num_level)\n self.assertLen(net.codes_test_data, net.num_level)\n\n with self.subTest(name='gather_latent_codes'):\n for latent_code_type in ['train', 'test']:\n codes_each_level = net._gather_latent_codes(latent_code_type)\n for codes_level_i, code_grid_enc_shape_i in zip(\n codes_each_level, net.code_grid_enc_shape):\n self.assertSequenceEqual(codes_level_i.shape,\n (1, *code_grid_enc_shape_i))\n\n with self.subTest(name='interpolate_codes_at_points'):\n code_grid = tf.zeros((2, 4, 4, 4, 16), dtype=tf.float32)\n points = tf.zeros((2, 10, 3), dtype=tf.float32)\n (codes_for_points, points_normalize,\n debug_data) = net._interpolate_codes_at_points(\n code_grid, points, level=0)\n self.assertSequenceEqual(codes_for_points.shape, (2, 10, 16))\n self.assertSequenceEqual(points_normalize.shape, (2, 10, 3))\n self.assertSequenceEqual(debug_data['points_renormalize'].shape,\n (2, 10, 3))\n self.assertSequenceEqual(debug_data['latent_codes'].shape, (2, 10, 6))\n\n with self.subTest(name='preprocess_data_3d'):\n self.assertSequenceEqual(input_data.shape, (batch_size, *spatial_dims, 1))\n self.assertSequenceEqual(gt_data.shape, (batch_size, *spatial_dims, 1))\n\n with self.subTest(name='sample_points'):\n gt_data_for_label = {\n 'grid_samples':\n data_batch_new['grid_samples'],\n 'uniform_samples':\n data_batch_new['uniform_samples'],\n 'near_surface_samples':\n data_batch_new['near_surface_samples'],\n 'uniform_samples_per_camera':\n data_batch_new['uniform_samples_per_camera'],\n 'near_surface_samples_per_camera':\n data_batch_new['near_surface_samples_per_camera'],\n 'depth_xyzn_per_camera':\n data_batch_new['depth_xyzn_per_camera'],\n }\n points_data = net._sample_points(spatial_dims, gt_data_for_label,\n net._train_point_sampler)\n self.assertSequenceEqual(points_data['mask_for_point'].shape,\n (batch_size, 64))\n self.assertSequenceEqual(points_data['points/global/uniform'].shape,\n (batch_size, 32, 3))\n self.assertSequenceEqual(\n points_data['points_sdf_gt/global/uniform'].shape,\n (batch_size, 32, 1))\n self.assertSequenceEqual(points_data['points/near_surface/uniform'].shape,\n (batch_size, 32, 3))\n self.assertSequenceEqual(\n points_data['points_sdf_gt/near_surface/uniform'].shape,\n (batch_size, 32, 1))\n\n with self.subTest(name='final_outputs'):\n self.assertSequenceEqual(\n out['model_outputs_and_targets']['mask_for_point'].shape,\n (batch_size, 48))\n self.assertSequenceEqual(\n out['model_outputs_and_targets']['code_grid/level0'][0].shape,\n (batch_size, 1, 1, 1, 4))\n self.assertSequenceEqual(\n out['model_outputs_and_targets']['code_grid/level1'][0].shape,\n (batch_size, 2, 2, 2, 3))\n\n for level in range(2):\n for sdf_type in ['points_sdf', 'points_residual_sdf']:\n for sample_type in ['global/uniform', 'near_surface/uniform']:\n key = sdf_type + '/' + sample_type + '/level' + str(level)\n self.assertSequenceEqual(\n out['model_outputs_and_targets'][key][0].shape,\n (batch_size, 32, 1))\n for sdf_type in ['eval_points_sdf', 'eval_points_residual_sdf']:\n key = sdf_type + '/all_pixels/level' + str(level)\n self.assertSequenceEqual(\n out['model_outputs_and_targets'][key][0].shape,\n (batch_size, 48, 1))\n self.assertSequenceEqual(\n out['model_outputs_and_targets']['iou/level' + str(level)][0].shape,\n (batch_size,))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.zeros", "tensorflow.test.main", "tensorflow.eye" ] ]
zhangchao162/deep-learning-cnn
[ "3fa513e975263ca95bdd7d5e720455ffb3c82917" ]
[ "04 image_classification/vgg_tf.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nvgg_tf.py:训练tensorflow版的vgg16网络,对cifar-10shuju进行分类\n\"\"\"\nfrom datetime import datetime\nimport math\nimport time\nimport tensorflow as tf\nimport cifar10\n\nbatch_size = 16\nnum_batches = 100\n\n# 定义函数对卷积层进行初始化\n# input_op : 输入数据 \n# name : 该卷积层的名字,用tf.name_scope()来命名\n# kh,kw : 分别是卷积核的高和宽\n# n_out : 输出通道数\n# dh,dw : 步长的高和宽\n# p : 是参数列表,存储VGG所用到的参数\n# 采用xavier方法对卷积核权值进行初始化\ndef conv_op(input_op, name, kh, kw, n_out, dh, dw, p):\n n_in = input_op.get_shape()[-1].value # 获得输入图像的通道数\n with tf.name_scope(name) as scope:\n kernel = tf.get_variable(scope+'w',\n shape = [kh, kw, n_in, n_out], dtype = tf.float32,\n initializer = tf.contrib.layers.xavier_initializer_conv2d())\n # 卷积层计算\n conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding = 'SAME')\n bias_init_val = tf.constant(0.0, shape = [n_out], dtype = tf.float32)\n biases = tf.Variable(bias_init_val, trainable = True, name = 'b')\n z = tf.nn.bias_add(conv, biases)\n activation = tf.nn.relu(z, name = scope)\n p += [kernel, biases]\n return activation\n\n# 定义函数对全连接层进行初始化\n# input_op : 输入数据\n# name : 该全连接层的名字\n# n_out : 输出的通道数\n# p : 参数列表 \n# 初始化方法用 xavier方法\ndef fc_op(input_op, name, n_out, p):\n n_in = input_op.get_shape()[-1].value\n\n with tf.name_scope(name) as scope:\n kernel = tf.get_variable(scope+'w',\n shape = [n_in, n_out], dtype = tf.float32,\n initializer = tf.contrib.layers.xavier_initializer())\n biases = tf.Variable(tf.constant(0.1, shape = [n_out],\n dtype = tf.float32), name = 'b')\n activation = tf.nn.relu_layer(input_op, kernel, # ???????????????\n biases, name = scope)\n p += [kernel, biases]\n return activation \n\n# 定义函数 创建 maxpool层\n# input_op : 输入数据 \n# name : 该卷积层的名字,用tf.name_scope()来命名\n# kh,kw : 分别是卷积核的高和宽\n# dh,dw : 步长的高和宽\ndef mpool_op(input_op, name, kh, kw, dh, dw):\n return tf.nn.max_pool(input_op, ksize = [1,kh,kw,1],\n strides = [1, dh, dw, 1], padding = 'SAME', name = name)\n\n#---------------创建 VGG-16------------------\n\ndef inference_op(input_op, keep_prob):\n p = []\n # 第一块 conv1_1-conv1_2-pool1\n conv1_1 = conv_op(input_op, name='conv1_1', kh=3, kw=3,\n n_out = 64, dh = 1, dw = 1, p = p)\n conv1_2 = conv_op(conv1_1, name='conv1_2', kh=3, kw=3,\n n_out = 64, dh = 1, dw = 1, p = p)\n pool1 = mpool_op(conv1_2, name = 'pool1', kh = 2, kw = 2,\n dw = 2, dh = 2)\n # 第二块 conv2_1-conv2_2-pool2\n conv2_1 = conv_op(pool1, name='conv2_1', kh=3, kw=3,\n n_out = 128, dh = 1, dw = 1, p = p)\n conv2_2 = conv_op(conv2_1, name='conv2_2', kh=3, kw=3,\n n_out = 128, dh = 1, dw = 1, p = p)\n pool2 = mpool_op(conv2_2, name = 'pool2', kh = 2, kw = 2,\n dw = 2, dh = 2)\n # 第三块 conv3_1-conv3_2-conv3_3-pool3\n conv3_1 = conv_op(pool2, name='conv3_1', kh=3, kw=3,\n n_out = 256, dh = 1, dw = 1, p = p)\n conv3_2 = conv_op(conv3_1, name='conv3_2', kh=3, kw=3,\n n_out = 256, dh = 1, dw = 1, p = p)\n conv3_3 = conv_op(conv3_2, name='conv3_3', kh=3, kw=3,\n n_out = 256, dh = 1, dw = 1, p = p)\n pool3 = mpool_op(conv3_3, name = 'pool3', kh = 2, kw = 2,\n dw = 2, dh = 2)\n # 第四块 conv4_1-conv4_2-conv4_3-pool4\n conv4_1 = conv_op(pool3, name='conv4_1', kh=3, kw=3,\n n_out = 512, dh = 1, dw = 1, p = p)\n conv4_2 = conv_op(conv4_1, name='conv4_2', kh=3, kw=3,\n n_out = 512, dh = 1, dw = 1, p = p)\n conv4_3 = conv_op(conv4_2, name='conv4_3', kh=3, kw=3,\n n_out = 512, dh = 1, dw = 1, p = p)\n pool4 = mpool_op(conv4_3, name = 'pool4', kh = 2, kw = 2,\n dw = 2, dh = 2)\n # 第五块 conv5_1-conv5_2-conv5_3-pool5\n conv5_1 = conv_op(pool4, name='conv5_1', kh=3, kw=3,\n n_out = 512, dh = 1, dw = 1, p = p)\n conv5_2 = conv_op(conv5_1, name='conv5_2', kh=3, kw=3,\n n_out = 512, dh = 1, dw = 1, p = p)\n conv5_3 = conv_op(conv5_2, name='conv5_3', kh=3, kw=3,\n n_out = 512, dh = 1, dw = 1, p = p)\n pool5 = mpool_op(conv5_3, name = 'pool5', kh = 2, kw = 2,\n dw = 2, dh = 2)\n # 把pool5 ( [7, 7, 512] ) 拉成向量\n shp = pool5.get_shape()\n flattened_shape = shp[1].value * shp[2].value * shp[3].value\n resh1 = tf.reshape(pool5, [-1, flattened_shape], name = 'resh1')\n\n # 全连接层1 添加了 Droput来防止过拟合 \n fc1 = fc_op(resh1, name = 'fc1', n_out = 2048, p = p)\n fc1_drop = tf.nn.dropout(fc1, keep_prob, name = 'fc1_drop')\n\n # 全连接层2 添加了 Droput来防止过拟合 \n fc2 = fc_op(fc1_drop, name = 'fc2', n_out = 2048, p = p)\n fc2_drop = tf.nn.dropout(fc2, keep_prob, name = 'fc2_drop')\n\n # 全连接层3 加一个softmax求给类别的概率\n fc3 = fc_op(fc2_drop, name = 'fc3', n_out = 1000, p = p)\n softmax = tf.nn.softmax(fc3)\n predictions = tf.argmax(softmax, 1)\n return predictions, softmax, fc3, p\n\n# 定义评测函数\n\ndef time_tensorflow_run(session, target, feed, info_string):\n num_steps_burn_in = 10\n total_duration = 0.0\n total_duration_squared = 0.0\n\n for i in range(num_batches + num_steps_burn_in):\n start_time = time.time()\n _ = session.run(target, feed_dict = feed)\n duration = time.time() - start_time\n if i >= num_steps_burn_in:\n if not i % 10: \n print('%s: step %d, duration = %.3f' % \n (datetime.now(), i-num_steps_burn_in, duration))\n total_duration += duration\n total_duration_squared += duration * duration\n mean_dur = total_duration / num_batches \n var_dur = total_duration_squared / num_batches - mean_dur * mean_dur\n std_dur = math.sqrt(var_dur)\n print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %(datetime.now(), info_string, num_batches, mean_dur, std_dur))\n\n\ndef train_vgg16():\n with tf.Graph().as_default():\n image_size = 224 # 输入图像尺寸\n # 生成随机数测试是否能跑通\n #images = tf.Variable(tf.random_normal([batch_size, image_size, image_size, 3], dtype=tf.float32, stddev=1e-1))\n with tf.device('/cpu:0'):\n images, labels = cifar10.distorted_inputs()\n keep_prob = tf.placeholder(tf.float32)\n prediction,softmax,fc8,p = inference_op(images,keep_prob)\n init = tf.global_variables_initializer()\n sess = tf.Session()\n sess.run(init)\n time_tensorflow_run(sess, prediction,{keep_prob:1.0}, \"Forward\")\n # 用以模拟训练的过程\n objective = tf.nn.l2_loss(fc8) # 给一个loss\n grad = tf.gradients(objective, p) # 相对于loss的 所有模型参数的梯度\n time_tensorflow_run(sess, grad, {keep_prob:0.5},\"Forward-backward\")\n\n\n\n\nif __name__ == '__main__':\n train_vgg16()\n" ]
[ [ "tensorflow.device", "tensorflow.nn.max_pool", "tensorflow.nn.l2_loss", "tensorflow.nn.conv2d", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.gradients", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.argmax", "tensorflow.nn.dropout", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.contrib.layers.xavier_initializer_conv2d", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.nn.softmax", "tensorflow.constant", "tensorflow.nn.relu_layer", "tensorflow.reshape" ] ]
Edelweiss35/deep-machine-learning
[ "b1e4b133609f303be77de824601925f448a94764" ]
[ "dml/KNN/kd.py" ]
[ "from __future__ import division\nimport numpy as np\nimport scipy as sp\nfrom operator import itemgetter\nfrom scipy.spatial.distance import euclidean\nfrom dml.tool import Heap\nclass KDNode:\n\tdef __init__(self,x,y,l):\n\t\tself.x=x\n\t\tself.y=y\n\t\tself.l=l\n\t\tself.F=None\n\t\tself.Lc=None\n\t\tself.Rc=None\n\t\tself.distsToNode=None\n\nclass KDTree:\n\tdef __init__(self,X,y=None,dist=euclidean):\n\t\tself.X=X\n\t\tself.k=X.shape[0] #N\n\t\tself.y=y\n\t\tself.dist=dist\n\t\tself.P=self.maketree(X,y,0)\n\t\tself.P.F=None\n\tdef maketree(self,data,y,deep):\n\t\tif data.size==0:\n\t\t\treturn None\n\t\tlenght = data.shape[0]\n\t\tcase = data.shape[1]\n\t\tp=int((case)/2)\n\t\tl = (deep%self.k)\n\t\t#print data\n\t\tdata=np.vstack((data,y))\n\t\tdata=np.array(sorted(data.transpose(),key=itemgetter(l))).transpose()\n\t\t#print data\n\t\ty=data[lenght,:]\n\t\tdata=data[:lenght,:]\n\t\t\n\t\tv=data[l,p]\n\t\trP=KDNode(data[:,p],y[p],l)\n\t\t#print data[:,p],y[p],l\n\t\tif case>1:\n\t\t\tldata=data[:,data[l,:]<v]\n\t\t\tly=y[data[l,:]<v]\n\t\t\tdata[l,p]=v-1\n\t\t\trdata=data[:,data[l,:]>=v]\n\t\t\try=y[data[l,:]>=v]\n\t\t\tdata[l,p]=v\n\t\t\trP.Lc=self.maketree(ldata,ly,deep+1)\n\t\t\tif rP.Lc!=None:\n\t\t\t\trP.Lc.F=rP\n\t\t\trP.Rc=self.maketree(rdata,ry,deep+1)\n\t\t\tif rP.Rc!=None:\n\t\t\t\trP.Rc.F=rP\n\t\treturn rP\n\n\tdef search_knn(self,P,x,k,maxiter=200):\n\t\tdef pf_compare(a,b):\n\t\t\treturn self.dist(x,a.x)<self.dist(x,b.x)\n\t\tdef ans_compare(a,b):\n\t\t\treturn self.dist(x,a.x)>self.dist(x,b.x)\n\t\tpf_seq=Heap(compare=pf_compare)\n\t\tpf_seq.insert(P) #prior sequence\n\t\tans=Heap(k,compare=ans_compare) #ans sequence\n\t\twhile pf_seq.counter>0:\n\t\t\tt=pf_seq.heap[1]\n\t\t\tpf_seq.delete(1)\n\t\t\tflag=True\n\t\t\tif ans.counter==k:\n\t\t\t\tnow=t.F\n\t\t\t\t#print ans.heap[1].x,'========'\n\t\t\t\tif now != None:\n\t\t\t\t\tq=x.copy()\n\t\t\t\t\tq[now.l]=now.x[now.l]\n\t\t\t\t\tlength=self.dist(q,x)\n\t\t\t\t\tif length>self.dist(ans.heap[1].x,x):\n\t\t\t\t\t\tflag=False\n\t\t\t\t\telse:\n\t\t\t\t\t\tflag=True\n\t\t\t\telse:\n\t\t\t\t\tflag=True\n\t\t\tif flag:\n\t\t\t\ttp,pf_seq,ans=self.to_leaf(t,x,pf_seq,ans)\n\t\t\t#print \"=============\"\n\t\t\t#ans.insert(tp)\n\t\treturn ans\n\n\n\tdef to_leaf(self,P,x,pf_seq,ans):\n\t\ttp=P\n\t\tif tp!=None:\n\t\t\tans.insert(tp)\n\t\t\tif tp.x[tp.l]>x[tp.l]:\n\t\t\t\tif tp.Rc!=None:\n\t\t\t\t\tpf_seq.insert(tp.Rc)\n\t\t\t\tif tp.Lc==None:\n\t\t\t\t\treturn tp,pf_seq,ans\n\t\t\t\telse:\n\t\t\t\t\treturn self.to_leaf(tp.Lc,x,pf_seq,ans)\n\t\t\tif tp.Lc!=None:\n\t\t\t\tpf_seq.insert(tp.Lc)\n\t\t\tif tp.Rc==None:\n\t\t\t\t\treturn tp,pf_seq,ans\n\t\t\telse:\n\t\t\t\t\treturn self.to_leaf(tp.Rc,x,pf_seq,ans)\n\n\n\n\n\n" ]
[ [ "numpy.vstack" ] ]
marneusz/LicensePlatesDetectionAndRecognition
[ "5b3eb2845d64b22abb1a321b5662c24d7b3832b3" ]
[ "read_data.py" ]
[ "# Necessary imports\nimport glob\nimport os\nimport xml.etree.ElementTree as ET\n\nimport pandas as pd\n\n\nclass MacOSFile(object):\n\n def __init__(self, f):\n self.f = f\n\n def __getattr__(self, item):\n return getattr(self.f, item)\n\n def read(self, n):\n # print(\"reading total_bytes=%s\" % n, flush=True)\n if n >= (1 << 31):\n buffer = bytearray(n)\n idx = 0\n while idx < n:\n batch_size = min(n - idx, 1 << 31 - 1)\n # print(\"reading bytes [%s,%s)...\" % (idx, idx + batch_size), end=\"\", flush=True)\n buffer[idx:idx + batch_size] = self.f.read(batch_size)\n # print(\"done.\", flush=True)\n idx += batch_size\n return buffer\n return self.f.read(n)\n\n def write(self, buffer):\n n = len(buffer)\n print(\"writing total_bytes=%s...\" % n, flush=True)\n idx = 0\n while idx < n:\n batch_size = min(n - idx, 1 << 31 - 1)\n print(\"writing bytes [%s, %s)... \" % (idx, idx + batch_size), end=\"\",\n flush=True)\n self.f.write(buffer[idx:idx + batch_size])\n print(\"done.\", flush=True)\n idx += batch_size\n\ndef read_xml():\n PATH = \"plates_dataset/\"\n annotations = os.listdir(PATH + 'annotations2')\n images = os.listdir(PATH + 'images2')\n\n # parsing xml files similar to https://www.kaggle.com/stpeteishii/car-plate-get-annotation-info-from-xml\n\n dataset = {\n \"file\": [],\n \"width\": [],\n \"height\": [],\n \"xmin\": [],\n \"xmax\": [],\n \"ymin\": [],\n \"ymax\": [],\n }\n\n for annotation in glob.glob(PATH + \"annotations2/*.xml\"):\n # representing xml files as a tree\n tree = ET.parse(annotation)\n\n for element in tree.iter():\n if 'size' in element.tag:\n for attribute in list(element):\n if 'width' in attribute.tag:\n width = int(round(float(attribute.text)))\n if 'height' in attribute.tag:\n height = int(round(float(attribute.text)))\n\n if 'object' in element.tag:\n for attribute in list(element):\n\n if 'name' in attribute.tag:\n dataset['width'].append(width)\n dataset['height'].append(height)\n dataset['file'].append(annotation.split('/')[-1].split('.')[0])\n\n if 'bndbox' in attribute.tag:\n for dim in list(attribute):\n if 'xmin' in dim.tag:\n xmin = int(round(float(dim.text)))\n dataset['xmin'].append(xmin)\n if 'ymin' in dim.tag:\n ymin = int(round(float(dim.text)))\n dataset['ymin'].append(ymin)\n if 'xmax' in dim.tag:\n xmax = int(round(float(dim.text)))\n dataset['xmax'].append(xmax)\n if 'ymax' in dim.tag:\n ymax = int(round(float(dim.text)))\n dataset['ymax'].append(ymax)\n data = pd.DataFrame(dataset)\n data = data.sort_values('file').reset_index(drop=True)\n return data\n" ]
[ [ "pandas.DataFrame" ] ]
48cfu/CarND-Traffic-Sign-Classifier-Project
[ "825a8a43e6bc95648a601a5761278e486054c3b9" ]
[ "source/LeNet-Lab.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # LeNet Lab\n# ![LeNet Architecture](lenet.png)\n# Source: Yan LeCun\n\n# ## Load Data\n# \n# Load the MNIST data, which comes pre-loaded with TensorFlow.\n# \n# You do not need to modify this section.\n\n# In[1]:\n\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", reshape=False)\nX_train, y_train = mnist.train.images, mnist.train.labels\nX_validation, y_validation = mnist.validation.images, mnist.validation.labels\nX_test, y_test = mnist.test.images, mnist.test.labels\n\nassert(len(X_train) == len(y_train))\nassert(len(X_validation) == len(y_validation))\nassert(len(X_test) == len(y_test))\n\nprint()\nprint(\"Image Shape: {}\".format(X_train[0].shape))\nprint()\nprint(\"Training Set: {} samples\".format(len(X_train)))\nprint(\"Validation Set: {} samples\".format(len(X_validation)))\nprint(\"Test Set: {} samples\".format(len(X_test)))\n\n\n# The MNIST data that TensorFlow pre-loads comes as 28x28x1 images.\n# \n# However, the LeNet architecture only accepts 32x32xC images, where C is the number of color channels.\n# \n# In order to reformat the MNIST data into a shape that LeNet will accept, we pad the data with two rows of zeros on the top and bottom, and two columns of zeros on the left and right (28+2+2 = 32).\n# \n# You do not need to modify this section.\n\n# In[2]:\n\n\nimport numpy as np\n\n# Pad images with 0s\nX_train = np.pad(X_train, ((0,0),(2,2),(2,2),(0,0)), 'constant')\nX_validation = np.pad(X_validation, ((0,0),(2,2),(2,2),(0,0)), 'constant')\nX_test = np.pad(X_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')\n \nprint(\"Updated Image Shape: {}\".format(X_train[0].shape))\n\n\n# ## Visualize Data\n# \n# View a sample from the dataset.\n# \n# You do not need to modify this section.\n\n# In[3]:\n\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nindex = random.randint(0, len(X_train))\nimage = X_train[index].squeeze()\n\nplt.figure(figsize=(1,1))\nplt.imshow(image, cmap=\"gray\")\nprint(y_train[index])\n\n\n# ## Preprocess Data\n# \n# Shuffle the training data.\n# \n# You do not need to modify this section.\n\n# In[4]:\n\n\nfrom sklearn.utils import shuffle\n\nX_train, y_train = shuffle(X_train, y_train)\n\n\n# ## Setup TensorFlow\n# The `EPOCH` and `BATCH_SIZE` values affect the training speed and model accuracy.\n# \n# You do not need to modify this section.\n\n# In[5]:\n\n\nimport tensorflow as tf\n\nEPOCHS = 10\nBATCH_SIZE = 128\n\n\n# ## TODO: Implement LeNet-5\n# Implement the [LeNet-5](http://yann.lecun.com/exdb/lenet/) neural network architecture.\n# \n# This is the only cell you need to edit.\n# ### Input\n# The LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since MNIST images are grayscale, C is 1 in this case.\n# \n# ### Architecture\n# **Layer 1: Convolutional.** The output shape should be 28x28x6.\n# \n# **Activation.** Your choice of activation function.\n# \n# **Pooling.** The output shape should be 14x14x6.\n# \n# **Layer 2: Convolutional.** The output shape should be 10x10x16.\n# \n# **Activation.** Your choice of activation function.\n# \n# **Pooling.** The output shape should be 5x5x16.\n# \n# **Flatten.** Flatten the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using `tf.contrib.layers.flatten`, which is already imported for you.\n# \n# **Layer 3: Fully Connected.** This should have 120 outputs.\n# \n# **Activation.** Your choice of activation function.\n# \n# **Layer 4: Fully Connected.** This should have 84 outputs.\n# \n# **Activation.** Your choice of activation function.\n# \n# **Layer 5: Fully Connected (Logits).** This should have 10 outputs.\n# \n# ### Output\n# Return the result of the 2nd fully connected layer.\n\n# In[6]:\n\n\nfrom tensorflow.contrib.layers import flatten\n\ndef LeNet(x): \n # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer\n mu = 0\n sigma = 0.1\n\n # Weight and bias\n weights ={\n 'layer_1': tf.Variable(tf.truncated_normal([5, 5, 1, 6], mu, sigma)),\n 'layer_2': tf.Variable(tf.truncated_normal([5, 5, 6, 16], mu, sigma)),\n 'layer_3': tf.Variable(tf.truncated_normal([400, 120], mu, sigma)),\n 'layer_4': tf.Variable(tf.truncated_normal([120, 84], mu, sigma)),\n 'layer_5': tf.Variable(tf.truncated_normal([84, 10], mu, sigma)),\n }\n biases = {\n 'layer_1': tf.Variable(tf.zeros(6)),\n 'layer_2': tf.Variable(tf.zeros(16)),\n 'layer_3': tf.Variable(tf.zeros(120)),\n 'layer_4': tf.Variable(tf.zeros(84)),\n 'layer_5': tf.Variable(tf.zeros(10)),\n }\n '''\n SOLUTION: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.\n SOLUTION: Activation.\n SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.\n '''\n layer_1 = tf.nn.conv2d(x, weights['layer_1'], strides = [1, 1, 1, 1], padding = 'VALID')\n layer_1 = tf.nn.bias_add(layer_1, biases['layer_1'])\n layer_1 = tf.nn.relu(layer_1)\n layer_1 = tf.nn.max_pool(layer_1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n \n '''\n SOLUTION: Layer 2: Convolutional. Output = 10x10x16.\n SOLUTION: Activation.\n SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.\n '''\n layer_2 = tf.nn.conv2d(layer_1, weights['layer_2'], strides = [1, 1, 1, 1], padding = 'VALID')\n layer_2 = tf.nn.bias_add(layer_2, biases['layer_2'])\n layer_2 = tf.nn.relu(layer_2)\n layer_2 = tf.nn.max_pool(layer_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n \n '''\n SOLUTION: Flatten. Input = 5x5x16. Output = 400.\n '''\n flat_layer = flatten(layer_2)\n \n '''\n SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.\n SOLUTION: Activation.\n '''\n layer_3 = tf.matmul(flat_layer, weights['layer_3']) + biases['layer_3']\n layer_3 = tf.nn.relu(layer_3)\n \n '''\n SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.\n SOLUTION: Activation.\n '''\n layer_4 = tf.matmul(layer_3, weights['layer_4']) + biases['layer_4']\n layer_4 = tf.nn.relu(layer_4)\n \n '''\n SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 10.\n '''\n layer_5 = tf.matmul(layer_4, weights['layer_5']) + biases['layer_5']\n logits = layer_5\n \n return logits\n\n\n# ## Features and Labels\n# Train LeNet to classify [MNIST](http://yann.lecun.com/exdb/mnist/) data.\n# \n# `x` is a placeholder for a batch of input images.\n# `y` is a placeholder for a batch of output labels.\n# \n# You do not need to modify this section.\n\n# In[7]:\n\n\nx = tf.placeholder(tf.float32, (None, 32, 32, 1))\ny = tf.placeholder(tf.int32, (None))\none_hot_y = tf.one_hot(y, 10)\n\n\n# ## Training Pipeline\n# Create a training pipeline that uses the model to classify MNIST data.\n# \n# You do not need to modify this section.\n\n# In[8]:\n\n\nrate = 0.001\n\nlogits = LeNet(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)\nloss_operation = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate = rate)\ntraining_operation = optimizer.minimize(loss_operation)\n\n\n# ## Model Evaluation\n# Evaluate how well the loss and accuracy of the model for a given dataset.\n# \n# You do not need to modify this section.\n\n# In[9]:\n\n\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})\n total_accuracy += (accuracy * len(batch_x))\n return total_accuracy / num_examples\n\n\n# ## Train the Model\n# Run the training data through the training pipeline to train the model.\n# \n# Before each epoch, shuffle the training set.\n# \n# After each epoch, measure the loss and accuracy of the validation set.\n# \n# Save the model after training.\n# \n# You do not need to modify this section.\n\n# In[10]:\n\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n num_examples = len(X_train)\n \n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n X_train, y_train = shuffle(X_train, y_train)\n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = X_train[offset:end], y_train[offset:end]\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n \n validation_accuracy = evaluate(X_validation, y_validation)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n \n saver.save(sess, './lenet')\n print(\"Model saved\")\n\n\n# ## Evaluate the Model\n# Once you are completely satisfied with your model, evaluate the performance of the model on the test set.\n# \n# Be sure to only do this once!\n# \n# If you were to measure the performance of your trained model on the test set, then improve your model, and then measure the performance of your model on the test set again, that would invalidate your test results. You wouldn't get a true measure of how well your model would perform against real data.\n# \n# You do not need to modify this section.\n\n# In[11]:\n\n\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('.'))\n\n test_accuracy = evaluate(X_test, y_test)\n print(\"Test Accuracy = {:.3f}\".format(test_accuracy))\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.imshow", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.zeros", "tensorflow.nn.max_pool", "tensorflow.cast", "tensorflow.contrib.layers.flatten", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "numpy.pad", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "matplotlib.pyplot.figure", "tensorflow.matmul", "tensorflow.truncated_normal", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.one_hot", "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.get_default_session", "tensorflow.train.latest_checkpoint", "tensorflow.reduce_mean", "sklearn.utils.shuffle" ] ]
OmidSa75/mmdetection
[ "baccacb2d2675fdc430b4f9546a7fd46f9097a20" ]
[ "mmdet/core/bbox/coder/delta_xywh_bbox_coder.py" ]
[ "import mmcv\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_CODERS\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass DeltaXYWHBBoxCoder(BaseBBoxCoder):\n \"\"\"Delta XYWH BBox coder.\n\n Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_,\n this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and\n decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).\n\n Args:\n target_means (Sequence[float]): Denormalizing means of target for\n delta coordinates\n target_stds (Sequence[float]): Denormalizing standard deviation of\n target for delta coordinates\n clip_border (bool, optional): Whether clip the objects outside the\n border of the image. Defaults to True.\n \"\"\"\n\n def __init__(self,\n target_means=(0., 0., 0., 0.),\n target_stds=(1., 1., 1., 1.),\n clip_border=True):\n super(BaseBBoxCoder, self).__init__()\n self.means = target_means\n self.stds = target_stds\n self.clip_border = clip_border\n\n def encode(self, bboxes, gt_bboxes):\n \"\"\"Get box regression transformation deltas that can be used to\n transform the ``bboxes`` into the ``gt_bboxes``.\n\n Args:\n bboxes (torch.Tensor): Source boxes, e.g., object proposals.\n gt_bboxes (torch.Tensor): Target of the transformation, e.g.,\n ground-truth boxes.\n\n Returns:\n torch.Tensor: Box transformation deltas\n \"\"\"\n\n assert bboxes.size(0) == gt_bboxes.size(0)\n assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)\n return encoded_bboxes\n\n def decode(self,\n bboxes,\n pred_bboxes,\n max_shape=None,\n wh_ratio_clip=16 / 1000):\n \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n Args:\n boxes (torch.Tensor): Basic boxes.\n pred_bboxes (torch.Tensor): Encoded boxes with shape\n max_shape (tuple[int], optional): Maximum shape of boxes.\n Defaults to None.\n wh_ratio_clip (float, optional): The allowed ratio between\n width and height.\n\n Returns:\n torch.Tensor: Decoded boxes.\n \"\"\"\n\n assert pred_bboxes.size(0) == bboxes.size(0)\n decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds,\n max_shape, wh_ratio_clip, self.clip_border)\n\n return decoded_bboxes\n\n\[email protected](coderize=True)\ndef bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):\n \"\"\"Compute deltas of proposals w.r.t. gt.\n\n We usually compute the deltas of x, y, w, h of proposals w.r.t ground\n truth bboxes to get regression target.\n This is the inverse function of :func:`delta2bbox`.\n\n Args:\n proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)\n gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)\n means (Sequence[float]): Denormalizing means for delta coordinates\n stds (Sequence[float]): Denormalizing standard deviation for delta\n coordinates\n\n Returns:\n Tensor: deltas with shape (N, 4), where columns represent dx, dy,\n dw, dh.\n \"\"\"\n assert proposals.size() == gt.size()\n\n proposals = proposals.float()\n gt = gt.float()\n px = (proposals[..., 0] + proposals[..., 2]) * 0.5\n py = (proposals[..., 1] + proposals[..., 3]) * 0.5\n pw = proposals[..., 2] - proposals[..., 0]\n ph = proposals[..., 3] - proposals[..., 1]\n\n gx = (gt[..., 0] + gt[..., 2]) * 0.5\n gy = (gt[..., 1] + gt[..., 3]) * 0.5\n gw = gt[..., 2] - gt[..., 0]\n gh = gt[..., 3] - gt[..., 1]\n\n dx = (gx - px) / pw\n dy = (gy - py) / ph\n dw = torch.log(gw / pw)\n dh = torch.log(gh / ph)\n deltas = torch.stack([dx, dy, dw, dh], dim=-1)\n\n means = deltas.new_tensor(means).unsqueeze(0)\n stds = deltas.new_tensor(stds).unsqueeze(0)\n deltas = deltas.sub_(means).div_(stds)\n\n return deltas\n\n\[email protected](coderize=True)\ndef delta2bbox(rois,\n deltas,\n means=(0., 0., 0., 0.),\n stds=(1., 1., 1., 1.),\n max_shape=None,\n wh_ratio_clip=16 / 1000,\n clip_border=True):\n \"\"\"Apply deltas to shift/scale base boxes.\n\n Typically the rois are anchor or proposed bounding boxes and the deltas are\n network outputs used to shift/scale those boxes.\n This is the inverse function of :func:`bbox2delta`.\n\n Args:\n rois (Tensor): Boxes to be transformed. Has shape (N, 4)\n deltas (Tensor): Encoded offsets with respect to each roi.\n Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when\n rois is a grid of anchors. Offset encoding follows [1]_.\n means (Sequence[float]): Denormalizing means for delta coordinates\n stds (Sequence[float]): Denormalizing standard deviation for delta\n coordinates\n max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)\n wh_ratio_clip (float): Maximum aspect ratio for boxes.\n clip_border (bool, optional): Whether clip the objects outside the\n border of the image. Defaults to True.\n\n Returns:\n Tensor: Boxes with shape (N, 4), where columns represent\n tl_x, tl_y, br_x, br_y.\n\n References:\n .. [1] https://arxiv.org/abs/1311.2524\n\n Example:\n >>> rois = torch.Tensor([[ 0., 0., 1., 1.],\n >>> [ 0., 0., 1., 1.],\n >>> [ 0., 0., 1., 1.],\n >>> [ 5., 5., 5., 5.]])\n >>> deltas = torch.Tensor([[ 0., 0., 0., 0.],\n >>> [ 1., 1., 1., 1.],\n >>> [ 0., 0., 2., -1.],\n >>> [ 0.7, -1.9, -0.5, 0.3]])\n >>> delta2bbox(rois, deltas, max_shape=(32, 32))\n tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n [0.1409, 0.1409, 2.8591, 2.8591],\n [0.0000, 0.3161, 4.1945, 0.6839],\n [5.0000, 5.0000, 5.0000, 5.0000]])\n \"\"\"\n means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(1) // 4)\n stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(1) // 4)\n denorm_deltas = deltas * stds + means\n dx = denorm_deltas[:, 0::4]\n dy = denorm_deltas[:, 1::4]\n dw = denorm_deltas[:, 2::4]\n dh = denorm_deltas[:, 3::4]\n max_ratio = np.abs(np.log(wh_ratio_clip))\n dw = dw.clamp(min=-max_ratio, max=max_ratio)\n dh = dh.clamp(min=-max_ratio, max=max_ratio)\n # Compute center of each roi\n px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)\n py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)\n # Compute width/height of each roi\n pw = (rois[:, 2] - rois[:, 0]).unsqueeze(1).expand_as(dw)\n ph = (rois[:, 3] - rois[:, 1]).unsqueeze(1).expand_as(dh)\n # Use exp(network energy) to enlarge/shrink each roi\n gw = pw * dw.exp()\n gh = ph * dh.exp()\n # Use network energy to shift the center of each roi\n gx = px + pw * dx\n gy = py + ph * dy\n # Convert center-xy/width/height to top-left, bottom-right\n x1 = gx - gw * 0.5\n y1 = gy - gh * 0.5\n x2 = gx + gw * 0.5\n y2 = gy + gh * 0.5\n if clip_border and max_shape is not None:\n # use where() to replace clip(),\n # because clip()'s attr min/max do not support dynamic in onnx\n if torch.onnx.is_in_onnx_export():\n zero = torch.tensor(0, dtype=torch.float32)\n zero = zero.expand(x1.size())\n width = torch.tensor(max_shape[1], dtype=torch.float32)\n width = width.expand(x1.size())\n height = torch.tensor(max_shape[0], dtype=torch.float32)\n height = height.expand(x1.size())\n x1 = torch.where(x1 < zero, zero, x1)\n x1 = torch.where(x1 > width, width, x1)\n y1 = torch.where(y1 < zero, zero, y1)\n y1 = torch.where(y1 > height, height, y1)\n x2 = torch.where(x2 < zero, zero, x2)\n x2 = torch.where(x2 > width, width, x2)\n y2 = torch.where(y2 < zero, zero, y2)\n y2 = torch.where(y2 > height, height, y2)\n else:\n x1 = x1.clamp(min=0, max=max_shape[1])\n y1 = y1.clamp(min=0, max=max_shape[0])\n x2 = x2.clamp(min=0, max=max_shape[1])\n y2 = y2.clamp(min=0, max=max_shape[0])\n bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())\n return bboxes\n" ]
[ [ "numpy.log", "torch.tensor", "torch.log", "torch.where", "torch.stack", "torch.onnx.is_in_onnx_export" ] ]
HilitOreny/SheCodes
[ "be98ca8654c6c0539fe2cd22ad2ae0fdbcf60117" ]
[ "Sustain/sustain_code_example.py" ]
[ "\"\"\"This module analyzes the content of an excel file for the Welcome Team of She Codes\"\"\"\n\nimport pandas\nimport datetime\nimport os\n\n\ndef pathfinder():\n path = '/python_scripts/welcome.xlsx'\n check_file_location = os.path.isfile(path)\n return check_file_location\n\n\ndef read_file(track_name):\n \"\"\"This function opens a xlsx file and returns the content of the file. \n :param track_name: The name of the track (the name of the sheet).\n :return: a dataframe which contains the content of the sheet in the excel file.\n \"\"\"\n try:\n file_content = pandas.read_excel('/python_scripts/welcome.xlsx', sheet_name=track_name)\n except ImportError:\n print(\"Python tried to open the file, but encountered a problem. Contact code maintainers\")\n except FileNotFoundError:\n print(\"The file is not present. Contact code maintainers\")\n else:\n return file_content\n\n\ndef generate_data(raw_excel_data):\n \"\"\"this function replaces empty cells with 0 and removes irrelevant data.\n :param raw_excel_data: a dataframe which contains the raw data from the excel file.\n :unfiltered_data type: dataframe\n :return: a dataframe which only contains the relevant data.\n :rtype: dataframe\"\"\"\n filled_excel_data = raw_excel_data.fillna(0)\n no_team = filled_excel_data[filled_excel_data[\"Staff\"] == \"No\"]\n drop_unused = no_team.drop(columns=[\n \"Index\", \"Staff\", \"Email\", \"Joined\", \"Track\",\n \"Attendance in the last 10 weeks\", \"Max Lesson Entered\"])\n recent = drop_unused.drop(drop_unused.columns[[1, 2, 3, 4, 5, 6]], axis=1)\n return recent\n\n\ndef get_last_column(data):\n last_column_name = data.columns.values[-1]\n return last_column_name\n\n\ndef finished_track(all_students_df, last_column):\n all_lessons_completed = all_students_df[all_students_df[last_column] < 12]\n return all_lessons_completed\n\n\ndef remove_completed(data, last_column):\n \"\"\"this function removes students who completed the 12 lesson from the dataframe.\n :return: a dataframe which contains only students who are in lesson 1 - 11\"\"\"\n still_learning = data[data[last_column] < 12]\n return still_learning\n\n\ndef missing_students(students_df, last_column):\n \"\"\"this function filters the students who did not show up to the last lesson \n return: a dataframe which contains only students who did not show up to the last lesson\"\"\"\n missed_last = students_df[students_df[last_column] == 0]\n return missed_last\n\n\ndef generate_excel(df_to_export, df_name, sheet_name):\n \"\"\"this function generates an excel file with today's date and the dataframe's name.\n :param df_to_export: a dataframe to export to excel file. \n :param df_name: dataframe's name to be copied to the excel file.\n :param sheet_name: the name of the excel sheet\n \"\"\"\n today_string = datetime.datetime.today().strftime(\"%d %B, %Y\")\n excel_name = f\"/python_scripts/{df_name} - {sheet_name} - {today_string}.xlsx\"\n df_to_export.to_excel(excel_name)\n output_message = \"excel files crated on python_scrips folder\"\n return\n\n\ndef file_analysis():\n track_list = [\"Basic Python\", \"Python for Programmers\", \"React\", \"Web\"]\n for item in track_list:\n excel_content = read_file(item)\n all_courses = generate_data(excel_content)\n last = get_last_column(all_courses)\n active_students = remove_completed(all_courses, last)\n missing = missing_students(active_students, last)\n generate_excel(missing, \"missing students\", item)\n\n\ndef getting_started():\n file_located = pathfinder()\n if file_located:\n file_analysis()\n else:\n message = \"\"\"Please go to C folder and create a sub-folder called \"python_scrips\".\n Put welcome.xlsx in this folder\"\"\"\n print(message)\n\n\ngetting_started()\n" ]
[ [ "pandas.read_excel" ] ]
mightypirate1/Smartorn
[ "27cbe5abac45eb144a44ec369fcf92f298db3743" ]
[ "nn/smartorn.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nimport utils.utils as utils\n\nEPSILON = 10**-7\n\nclass smartorn:\n def __init__(\n self,\n input_shape,\n output_shape,\n n_neurons=100,\n n_layers=3,\n n_dimentions=3,\n dtype=tf.float16,\n name=\"smartorn\",\n radial_decay=True,\n depth_decay=True,\n normalized_activations=False,\n trainable_bias=True,\n ):\n self.DEBUG = False\n self.dbg_tensors = []\n #Store some numbers\n self.input_shape = input_shape if type(input_shape) is list else input_shape.as_list()\n self.output_shape = output_shape if type(output_shape) is list else output_shape.as_list()\n self.n_input = np.prod(input_shape[1:])\n self.n_output = np.prod(output_shape[1:])\n self.n = n_neurons + self.n_input + self.n_output\n self.dim = n_dimentions\n self.dtype = dtype\n self.radial_decay = radial_decay\n self.depth_decay = depth_decay\n self.normalized_activations = normalized_activations\n self.trainable_bias = trainable_bias\n #Initialize\n self.input = tf.placeholder(dtype, input_shape)\n self.init_activation_tf = tf.placeholder(self.dtype, shape=[None, self.n], name=\"initial_activation\")\n self.scope = name\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n self.create_neurons()\n #Unroll brain\n self.activations = []\n self.outputs = []\n x = self.init_activation_tf\n for _ in range(n_layers):\n print(\"X:\", x)\n x = self.apply_neurons(x)\n self.activations.append(x)\n self.outputs.append(x[:,-self.n_output:])\n self.create_regularizers(spatial=True, direction=True, spatial_mode='hard')\n self.create_init_op()\n\n def apply_neurons(self, current_activation):\n def strength(x, w=2.0):\n # x in [-1, 1]\n ret = ( tf.math.exp(w*x) - (np.e**-1) ) / ( np.e**w-np.e**-1 ) + 0.01\n # ret = tf.math.exp(0*x)\n #Original thought was a relu so signals are not \"felt\" if receptor and sender are not pointing the same way. This is a \"fuzzied\" version which has a gradient always...\n # (e^(2x) - e^-1) / (e^2-e^(-1))\n # f = lambda x : 1.0 + tf.nn.elu( x - 1.0 )\n # ret = f(x)\n self.dbg_tensors += [ret]\n return ret\n\n def activation(x):\n return tf.nn.elu(x)\n x = self.renormalize_activations(current_activation)\n x = tf.expand_dims(x,2)\n x = tf.expand_dims(x,2)\n outdir_bar = tf.expand_dims( self.outdir_bar_tf, 2)\n indir_bar = tf.expand_dims( self.indir_bar_tf, 1)\n power = tf.expand_dims( self.power_tf, 2)\n\n #####\n #######\n #########\n delta_p_norm = tf.reduce_sum( tf.math.square(self.delta_p_tf), axis=-1, keepdims=True)\n delta_p_bar = tf.math.divide( self.delta_p_tf, tf.math.maximum( 0.0001, delta_p_norm) )\n delta_p_bar = utils.remove_nan(delta_p_bar, value=0.0)\n #########\n #######\n #####\n alpha = strength( utils.dot( delta_p_bar, outdir_bar) ) #Send strength\n if self.radial_decay:\n print(\"Radial decay code has not been debugged: there MAY be errors...\")\n exponent = 1.0 + self.radial_decay_tf[:,:,tf.newaxis,tf.newaxis]\n alpha = tf.math.pow( alpha, exponent )\n beta = strength( utils.dot( delta_p_bar, indir_bar) ) #Receive strength\n if self.depth_decay:\n print(\"Depth decay code has not been debugged: there MAY be errors...\")\n decay_factor = self.depth_decay_tf[:,:,tf.newaxis,tf.newaxis]\n beta = beta * tf.math.exp( -decay_factor * delta_p_norm )\n Z = x * power * alpha * beta\n new_activation = activation( tf.reduce_mean(Z, axis=1, keepdims=False) - tf.expand_dims(self.bias_tf,2))\n output = self.reapply_inputs(tf.squeeze(new_activation, axis=-1))\n # self.dbg_tensors = [outdir_bar, indir_bar, delta_p_bar, alpha, beta, x, output, delta_p_norm]\n self.dbg_tensors = []\n return output\n\n def renormalize_activations(self, x):\n if self.normalized_activations:\n sum = tf.reduce_sum(x+EPSILON, axis=-1, keepdims=True)\n return tf.math.divide(x,sum)\n else:\n return x\n\n def reapply_inputs(self, x):\n mode = 'fixed'\n # mode = 'none'\n if mode == 'none':\n return x\n if mode == 'fixed':\n #Each \"layer\" has the same activation for the input neurons\n mask = np.zeros((1,self.n))\n mask[0,:self.n_input] = 1\n mask_tf = self._variable('input_mask', mask, trainable=False)\n x = x * (1-mask_tf) + self.init_activation_tf * mask_tf\n return x\n\n def create_neurons(self, debug=True):\n #dist\n init_dist = np.ones((1,self.n,self.n))\n #positions\n init_pos = 2 * np.random.random(size=[1,self.n, self.dim]) - 1\n init_pos[:,:self.n_input,:] = utils.init_pos_from_shape(self.input_shape, is_input=True)\n init_pos[:,-self.n_output:,:] = utils.init_pos_from_shape(self.output_shape, is_input=False)\n #in_dir\n init_indir = 2 * np.random.random(size=[1,self.n, self.dim]) - 1\n init_indir[:,:self.n_input,:] = utils.init_dir_from_shape(self.input_shape)\n init_indir[:,-self.n_output:,:] = utils.init_dir_from_shape(self.output_shape)\n #out_dir\n init_outdir = 2 * np.random.random(size=[1,self.n, self.dim]) - 1\n init_outdir[:,:self.n_input,:] = utils.init_dir_from_shape(self.input_shape)\n init_outdir[:,-self.n_output:,:] = utils.init_dir_from_shape(self.output_shape)\n #bias\n init_bias = np.ones((1,self.n))\n #decays\n init_depth_decay = np.zeros((1,self.n))\n init_radial_decay = np.zeros((1,self.n))\n #power\n init_power = np.zeros((1,self.n))\n ##\n if self.DEBUG:\n init_pos[:,self.n_input,:] = [-1,0,0]\n init_indir[:,self.n_input,:] = [0,0,1]\n #Create tensors\n self.power_tf = self._variable(\"power\" , init_power , collections=['signal',])\n self.position_tf = p = self._variable(\"position\" , init_pos , collections=['signal', 'position',])\n self.indir_tf = self._variable(\"indir\" , init_indir, normalize=True, collections=['direction',])\n self.outdir_tf = self._variable(\"outdir\" , init_outdir, normalize=True, collections=['direction',])\n self.bias_tf = self._variable(\"bias\" , init_bias , collections=['signal',], trainable=self.trainable_bias)\n self.radial_decay_tf = self._variable(\"radial_decay\", init_radial_decay , collections=['decay',]) if self.radial_decay else None\n self.depth_decay_tf = self._variable(\"depth_decay\" , init_depth_decay , collections=['decay',]) if self.depth_decay else None\n self.delta_p_tf = tf.expand_dims(p, 1) - tf.expand_dims(p, 2)\n self.outdir_bar_tf = tf.nn.l2_normalize(self.outdir_tf, axis=-1)\n self.indir_bar_tf = tf.nn.l2_normalize(self.indir_tf, axis=-1)\n\n def _variable(self, name, initval, trainable=True, normalize=False, collections=[]):\n if normalize:\n initval = initval / np.linalg.norm(initval, axis=2, keepdims=True)\n return tf.get_variable(\n name,\n initval.shape,\n dtype=self.dtype,\n initializer=tf.constant_initializer(initval),\n trainable=trainable,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES, \"smartorn\", *collections],\n )\n\n def create_init_op(self):\n self.init_op = tf.variables_initializer(tf.get_collection(self.scope))\n\n def create_regularizers(self, direction=False, spatial=True, directional_mode='both',spatial_mode='soft', norm_ord=2, reg_type=tf.math.square):\n self.regularizers = []\n if spatial:\n #spatial regularizer\n assert spatial_mode in ['hard', 'soft'], \"spatial regularizer modes are: soft, hard\"\n dist = tf.norm(self.position_tf, axis=-1, ord=norm_ord)\n if spatial_mode == 'soft':\n x = tf.reduce_mean( (dist - 1), axis=-1)\n if spatial_mode == 'hard':\n x = tf.reduce_mean( tf.nn.relu(dist - 1), axis=-1)\n spatial_reg_tf = reg_type(x)\n self.regularizers.append(spatial_reg_tf)\n if direction:\n #directional regularizer\n assert directional_mode in ['indir', 'outdir', 'both'], \"directional regularizer modes are: indir, outdir, both\"\n centroid = tf.reduce_mean( self.position_tf, axis=1, keepdims=True )\n reg_dir = centroid - self.position_tf\n if directional_mode == 'indir':\n x = tf.nn.relu(utils.dot(reg_dir, self.indir_bar_tf))\n if directional_mode == 'outdir':\n x = tf.nn.relu(utils.dot(reg_dir, self.outdir_bar_tf))\n if directional_mode == 'both':\n x = tf.nn.relu(utils.dot(reg_dir, self.indir_bar_tf)) + tf.nn.relu(utils.dot(reg_dir, self.outdir_bar_tf))\n directional_reg_tf = reg_type( tf.reshape(tf.reduce_mean(x, axis=1), [1,]) )\n self.regularizers.append(directional_reg_tf)\n\n def input_pad(self, x):\n batch_size = x.shape[0]\n n = x.shape[1]\n assert n in [self.n, self.n_input], \"Invalid initial activations passed to brain\"\n #If only n_input activations are specified, those are assumed to be fore the input activations, and the rest is zero.\n if n < self.n:\n _x = np.zeros((batch_size, self.n))\n _x[:,:n] = x\n x = _x\n return x\n\n @property\n def output(self):\n return self.outputs[-1]\n\n @property\n def activation(self):\n return self.activations[-1]\n" ]
[ [ "tensorflow.reduce_sum", "tensorflow.get_collection", "tensorflow.squeeze", "tensorflow.math.divide", "tensorflow.math.pow", "numpy.zeros", "tensorflow.nn.l2_normalize", "tensorflow.norm", "tensorflow.nn.elu", "tensorflow.placeholder", "tensorflow.math.exp", "tensorflow.math.square", "tensorflow.nn.relu", "numpy.random.random", "tensorflow.reduce_mean", "tensorflow.expand_dims", "numpy.linalg.norm", "numpy.ones", "tensorflow.constant_initializer", "numpy.prod", "tensorflow.variable_scope", "tensorflow.math.maximum" ] ]
willtop/imitation-learning
[ "2c00f77e4e575e38ef233cc5eac6862e598ec4ac" ]
[ "Rejection-System/parse_json_tagging_targets.py" ]
[ "import numpy as np\nimport json\n\n# TYPE = \"TRAIN\"\nTYPE = \"VALID\"\n\nif(__name__==\"__main__\"):\n if (TYPE == \"TRAIN\"):\n filename = \"Data/Train/train_tags.json\"\n filename_processed = \"Data/Train/train_tags_processed.json\"\n number_of_images = 5000\n target_filename = \"Data/Train/train_targets.npy\"\n else:\n filename = \"Data/Valid/valid_tags.json\"\n filename_processed = \"Data/Valid/valid_tags_processed.json\"\n number_of_images = 1000\n target_filename = \"Data/Valid/valid_targets.npy\"\n # first of all, make the json records contained within an array\n with open(filename, \"r\") as f_in:\n with open(filename_processed, \"w\") as f_out:\n for i, line in enumerate(f_in, 1):\n line = line.rstrip(\"\\n\")\n if(i==1): # first line, add start of array bracket\n line = \"[\"+line\n if(i==number_of_images): # last line, close the bracket for the array\n line += \"]\"\n else: # add comma separating elements within the array\n line += \",\"\n f_out.write(line+\"\\n\")\n\n # use json to load the processed file as a list of records\n all_labels = np.zeros([number_of_images, 3])\n with open(filename_processed, \"r\") as f:\n all_records = json.load(f)\n assert np.size(all_records)==number_of_images\n for record in all_records:\n # obtain image index (starting from 0)\n token = record['content']\n image_index = int(token.split(\"_\")[-2])\n assert image_index in range(number_of_images)\n assert np.sum(all_labels[image_index])==0 # ensure no repetitive labelling\n # obtain tagging targets\n tags = record['annotation']['labels']\n if('left' in tags or 'Left' in tags):\n all_labels[image_index][0] = 1\n if('straight' in tags or 'Straight' in tags):\n all_labels[image_index][1] = 1\n if('right' in tags or 'Right' in tags):\n all_labels[image_index][2] = 1\n print(\"# tagged: left: {}; straight: {}; right: {}\".format(\n np.sum(all_labels[:,0]), np.sum(all_labels[:,1]), np.sum(all_labels[:,2])))\n np.save(target_filename, all_labels)\n print(\"Script finished successfully!\")" ]
[ [ "numpy.size", "numpy.zeros", "numpy.sum", "numpy.save" ] ]
wpirkl/matplotlib_qtquick_playground
[ "17f8b600776d4a051d945c74a50ec3423169b8fa" ]
[ "backend/backend_qtquick5/backend_qquick5agg.py" ]
[ "import ctypes \nimport os\nimport sys\nimport traceback \n\nimport matplotlib\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.backend_bases import cursors\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5 import TimerQT\n\nimport six\n\nfrom PyQt5 import QtCore, QtGui, QtQuick, QtWidgets\n\nDEBUG = False\n\nclass MatplotlibIconProvider(QtQuick.QQuickImageProvider):\n \"\"\" This class provide the matplotlib icons for the navigation toolbar.\n \"\"\"\n\n def __init__(self, img_type = QtQuick.QQuickImageProvider.Pixmap):\n self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')\n QtQuick.QQuickImageProvider.__init__(self, img_type)\n\n def requestImage(self, id, size):\n img = QtGui.QImage(os.path.join(self.basedir, id + '.png'))\n size = img.size()\n return img, size\n \n def requestPixmap(self, id, size): \n img, size = self.requestImage(id, size)\n pixmap = QtGui.QPixmap.fromImage(img)\n \n return pixmap, size\n\nclass FigureCanvasQtQuickAgg(QtQuick.QQuickPaintedItem, FigureCanvasAgg):\n \"\"\" This class creates a QtQuick Item encapsulating a Matplotlib\n Figure and all the functions to interact with the 'standard'\n Matplotlib navigation toolbar.\n \"\"\"\n\n # map Qt button codes to MouseEvent's ones:\n buttond = {\n QtCore.Qt.LeftButton: 1,\n QtCore.Qt.MidButton: 2,\n QtCore.Qt.RightButton: 3,\n # QtCore.Qt.XButton1: None,\n # QtCore.Qt.XButton2: None,\n }\n \n cursord = {\n cursors.MOVE: QtCore.Qt.SizeAllCursor,\n cursors.HAND: QtCore.Qt.PointingHandCursor,\n cursors.POINTER: QtCore.Qt.ArrowCursor,\n cursors.SELECT_REGION: QtCore.Qt.CrossCursor,\n }\n \n messageChanged = QtCore.pyqtSignal(str)\n \n leftChanged = QtCore.pyqtSignal()\n rightChanged = QtCore.pyqtSignal()\n topChanged = QtCore.pyqtSignal()\n bottomChanged = QtCore.pyqtSignal()\n wspaceChanged = QtCore.pyqtSignal()\n hspaceChanged = QtCore.pyqtSignal()\n\n def __init__(self, figure, parent=None, coordinates=True):\n if DEBUG:\n print('FigureCanvasQtQuickAgg qtquick5: ', figure)\n # _create_qApp()\n if figure is None:\n figure = Figure((6.0, 4.0))\n\n QtQuick.QQuickPaintedItem.__init__(self, parent=parent)\n FigureCanvasAgg.__init__(self, figure=figure)\n\n self._drawRect = None\n self.blitbox = None\n \n # Activate hover events and mouse press events\n self.setAcceptHoverEvents(True)\n self.setAcceptedMouseButtons(QtCore.Qt.AllButtons)\n \n self._agg_draw_pending = False\n \n def getFigure(self):\n return self.figure\n \n def drawRectangle(self, rect):\n self._drawRect = rect\n self.update()\n\n def paint(self, p):\n \"\"\"\n Copy the image from the Agg canvas to the qt.drawable.\n In Qt, all drawing should be done inside of here when a widget is\n shown onscreen.\n \"\"\"\n # if the canvas does not have a renderer, then give up and wait for\n # FigureCanvasAgg.draw(self) to be called\n if not hasattr(self, 'renderer'):\n return\n\n if DEBUG:\n print('FigureCanvasQtQuickAgg.paint: ', self,\n self.get_width_height())\n\n if self.blitbox is None:\n # matplotlib is in rgba byte order. QImage wants to put the bytes\n # into argb format and is in a 4 byte unsigned int. Little endian\n # system is LSB first and expects the bytes in reverse order\n # (bgra).\n if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:\n stringBuffer = self.renderer._renderer.buffer_rgba()\n else:\n stringBuffer = self.renderer._renderer.buffer_rgba()\n\n refcnt = sys.getrefcount(stringBuffer)\n\n # convert the Agg rendered image -> qImage\n qImage = QtGui.QImage(stringBuffer, self.renderer.width,\n self.renderer.height,\n QtGui.QImage.Format_ARGB32)\n # get the rectangle for the image\n rect = qImage.rect()\n # p = QtGui.QPainter(self)\n # reset the image area of the canvas to be the back-ground color\n p.eraseRect(rect)\n # draw the rendered image on to the canvas\n p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))\n\n # draw the zoom rectangle to the QPainter\n if self._drawRect is not None:\n p.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DotLine))\n x, y, w, h = self._drawRect\n p.drawRect(x, y, w, h)\n\n else:\n bbox = self.blitbox\n l, b, r, t = bbox.extents\n w = int(r) - int(l)\n h = int(t) - int(b)\n t = int(b) + h\n reg = self.copy_from_bbox(bbox)\n stringBuffer = reg.to_string_argb()\n qImage = QtGui.QImage(stringBuffer, w, h,\n QtGui.QImage.Format_ARGB32)\n\n pixmap = QtGui.QPixmap.fromImage(qImage)\n p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)\n\n # draw the zoom rectangle to the QPainter\n if self._drawRect is not None:\n p.setPen(QtGui.QPen(QtCore.Qt.black, 1, QtCore.Qt.DotLine))\n x, y, w, h = self._drawRect\n p.drawRect(x, y, w, h)\n \n self.blitbox = None\n\n def draw(self):\n \"\"\"\n Draw the figure with Agg, and queue a request for a Qt draw.\n \"\"\"\n # The Agg draw is done here; delaying causes problems with code that\n # uses the result of the draw() to update plot elements.\n FigureCanvasAgg.draw(self)\n self.update()\n\n def draw_idle(self):\n \"\"\"\n Queue redraw of the Agg buffer and request Qt paintEvent.\n \"\"\"\n # The Agg draw needs to be handled by the same thread matplotlib\n # modifies the scene graph from. Post Agg draw request to the\n # current event loop in order to ensure thread affinity and to\n # accumulate multiple draw requests from event handling.\n # TODO: queued signal connection might be safer than singleShot\n if not self._agg_draw_pending:\n self._agg_draw_pending = True\n QtCore.QTimer.singleShot(0, self.__draw_idle_agg)\n\n def __draw_idle_agg(self, *args):\n if self.height() < 0 or self.width() < 0:\n self._agg_draw_pending = False\n return\n try:\n FigureCanvasAgg.draw(self)\n self.update()\n except Exception:\n # Uncaught exceptions are fatal for PyQt5, so catch them instead.\n traceback.print_exc()\n finally:\n self._agg_draw_pending = False\n\n def blit(self, bbox=None):\n \"\"\"\n Blit the region in bbox\n \"\"\"\n # If bbox is None, blit the entire canvas. Otherwise\n # blit only the area defined by the bbox.\n if bbox is None and self.figure:\n bbox = self.figure.bbox\n\n self.blitbox = bbox\n l, b, w, h = bbox.bounds\n t = b + h\n self.repaint(l, self.renderer.height-t, w, h) \n\n def geometryChanged(self, new_geometry, old_geometry):\n w = new_geometry.width()\n h = new_geometry.height()\n \n if (w <= 0.0) and (h <= 0.0):\n return\n \n if DEBUG:\n print('resize (%d x %d)' % (w, h))\n print(\"FigureCanvasQtQuickAgg.geometryChanged(%d, %d)\" % (w, h))\n dpival = self.figure.dpi\n winch = w / dpival\n hinch = h / dpival\n self.figure.set_size_inches(winch, hinch)\n FigureCanvasAgg.resize_event(self)\n self.draw_idle()\n QtQuick.QQuickPaintedItem.geometryChanged(self, new_geometry, old_geometry)\n \n def hoverEnterEvent(self, event):\n FigureCanvasAgg.enter_notify_event(self, guiEvent=event)\n\n def hoverLeaveEvent(self, event):\n QtWidgets.QApplication.restoreOverrideCursor()\n FigureCanvasAgg.leave_notify_event(self, guiEvent=event)\n\n def hoverMoveEvent(self, event):\n x = event.pos().x()\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.pos().y()\n FigureCanvasAgg.motion_notify_event(self, x, y, guiEvent=event)\n \n # if DEBUG: \n # print('hover move')\n\n # hoverMoveEvent kicks in when no mouse buttons are pressed\n # otherwise mouseMoveEvent are emitted\n def mouseMoveEvent(self, event):\n x = event.x()\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y()\n FigureCanvasAgg.motion_notify_event(self, x, y, guiEvent=event)\n # if DEBUG: \n # print('mouse move')\n\n def mousePressEvent(self, event):\n x = event.pos().x()\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.pos().y()\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasAgg.button_press_event(self, x, y, button,\n guiEvent=event)\n if DEBUG:\n print('button pressed:', event.button())\n\n def mouseReleaseEvent(self, event):\n x = event.x()\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y()\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasAgg.button_release_event(self, x, y, button,\n guiEvent=event)\n if DEBUG:\n print('button released')\n\n def mouseDoubleClickEvent(self, event):\n x = event.pos().x()\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.pos().y()\n button = self.buttond.get(event.button())\n if button is not None:\n FigureCanvasAgg.button_press_event(self, x, y,\n button, dblclick=True,\n guiEvent=event)\n if DEBUG:\n print('button doubleclicked:', event.button())\n\n def wheelEvent(self, event):\n x = event.x()\n # flipy so y=0 is bottom of canvas\n y = self.figure.bbox.height - event.y()\n # from QWheelEvent::delta doc\n if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:\n steps = event.angleDelta().y() / 120\n else:\n steps = event.pixelDelta().y()\n\n if steps != 0:\n FigureCanvasAgg.scroll_event(self, x, y, steps, guiEvent=event)\n if DEBUG:\n print('scroll event: '\n 'steps = %i ' % (steps))\n\n def keyPressEvent(self, event):\n key = self._get_key(event)\n if key is None:\n return\n FigureCanvasAgg.key_press_event(self, key, guiEvent=event)\n if DEBUG:\n print('key press', key)\n\n def keyReleaseEvent(self, event):\n key = self._get_key(event)\n if key is None:\n return\n FigureCanvasAgg.key_release_event(self, key, guiEvent=event)\n if DEBUG:\n print('key release', key)\n\n def _get_key(self, event):\n if event.isAutoRepeat():\n return None\n\n event_key = event.key()\n event_mods = int(event.modifiers()) # actually a bitmask\n\n # get names of the pressed modifier keys\n # bit twiddling to pick out modifier keys from event_mods bitmask,\n # if event_key is a MODIFIER, it should not be duplicated in mods\n mods = [name for name, mod_key, qt_key in MODIFIER_KEYS\n if event_key != qt_key and (event_mods & mod_key) == mod_key]\n try:\n # for certain keys (enter, left, backspace, etc) use a word for the\n # key, rather than unicode\n key = SPECIAL_KEYS[event_key]\n except KeyError:\n # unicode defines code points up to 0x0010ffff\n # QT will use Key_Codes larger than that for keyboard keys that are\n # are not unicode characters (like multimedia keys)\n # skip these\n # if you really want them, you should add them to SPECIAL_KEYS\n MAX_UNICODE = 0x10ffff\n if event_key > MAX_UNICODE:\n return None\n\n key = six.unichr(event_key)\n # qt delivers capitalized letters. fix capitalization\n # note that capslock is ignored\n if 'shift' in mods:\n mods.remove('shift')\n else:\n key = key.lower()\n\n mods.reverse()\n return '+'.join(mods + [key])\n\n def new_timer(self, *args, **kwargs):\n \"\"\"\n Creates a new backend-specific subclass of\n :class:`backend_bases.Timer`. This is useful for getting\n periodic events through the backend's native event\n loop. Implemented only for backends with GUIs.\n\n optional arguments:\n\n *interval*\n Timer interval in milliseconds\n\n *callbacks*\n Sequence of (func, args, kwargs) where func(*args, **kwargs)\n will be executed by the timer every *interval*.\n \"\"\"\n return TimerQT(*args, **kwargs)\n\n def flush_events(self):\n global qApp\n qApp.processEvents()\n\n def start_event_loop(self, timeout):\n FigureCanvasAgg.start_event_loop_default(self, timeout)\n\n #start_event_loop.__doc__ = \\\n # FigureCanvasAgg.start_event_loop_default.__doc__\n\n def stop_event_loop(self):\n FigureCanvasAgg.stop_event_loop_default(self)\n\n #stop_event_loop.__doc__ = FigureCanvasAgg.stop_event_loop_default.__doc__\n\n \nclass FigureQtQuickAggToolbar(FigureCanvasQtQuickAgg):\n \"\"\" This class creates a QtQuick Item encapsulating a Matplotlib\n Figure and all the functions to interact with the 'standard'\n Matplotlib navigation toolbar.\n \"\"\"\n \n cursord = {\n cursors.MOVE: QtCore.Qt.SizeAllCursor,\n cursors.HAND: QtCore.Qt.PointingHandCursor,\n cursors.POINTER: QtCore.Qt.ArrowCursor,\n cursors.SELECT_REGION: QtCore.Qt.CrossCursor,\n }\n \n messageChanged = QtCore.pyqtSignal(str)\n \n leftChanged = QtCore.pyqtSignal()\n rightChanged = QtCore.pyqtSignal()\n topChanged = QtCore.pyqtSignal()\n bottomChanged = QtCore.pyqtSignal()\n wspaceChanged = QtCore.pyqtSignal()\n hspaceChanged = QtCore.pyqtSignal()\n\n def __init__(self, figure, parent=None, coordinates=True):\n if DEBUG:\n print('FigureQtQuickAggToolbar qtquick5: ', figure)\n\n FigureCanvasQtQuickAgg.__init__(self, figure=figure, parent=parent)\n \n self._message = \"\"\n #\n # Attributes from NavigationToolbar2QT\n #\n self.coordinates = coordinates\n self._actions = {}\n \n # reference holder for subplots_adjust window\n self.adj_window = None\n #\n # Attributes from NavigationToolbar2\n #\n self.canvas = self.figure.canvas\n self.toolbar = self\n # a dict from axes index to a list of view limits\n self._views = matplotlib.cbook.Stack()\n self._positions = matplotlib.cbook.Stack() # stack of subplot positions\n self._xypress = None # the location and axis info at the time\n # of the press\n self._idPress = None\n self._idRelease = None\n self._active = None\n self._lastCursor = None\n \n self._idDrag = self.canvas.mpl_connect(\n 'motion_notify_event', self.mouse_move)\n\n self._ids_zoom = []\n self._zoom_mode = None\n\n self._button_pressed = None # determined by the button pressed\n # at start\n\n self.mode = '' # a mode string for the status bar\n self.set_history_buttons()\n \n #\n # Store margin\n #\n self._defaults = {}\n for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):\n val = getattr(self.figure.subplotpars, attr)\n self._defaults[attr] = val\n setattr(self, attr, val)\n \n @QtCore.pyqtProperty('QString', notify=messageChanged)\n def message(self):\n return self._message\n \n @message.setter\n def message(self, msg):\n if msg != self._message:\n self._message = msg\n self.messageChanged.emit(msg)\n \n @QtCore.pyqtProperty('QString', constant=True)\n def defaultDirectory(self):\n startpath = matplotlib.rcParams.get('savefig.directory', '')\n return os.path.expanduser(startpath)\n \n @QtCore.pyqtProperty('QStringList', constant=True)\n def fileFilters(self):\n filetypes = self.canvas.get_supported_filetypes_grouped()\n sorted_filetypes = list(six.iteritems(filetypes))\n sorted_filetypes.sort()\n \n filters = []\n for name, exts in sorted_filetypes:\n exts_list = \" \".join(['*.%s' % ext for ext in exts])\n filter = '%s (%s)' % (name, exts_list)\n filters.append(filter)\n \n return filters\n\n @QtCore.pyqtProperty('QString', constant=True)\n def defaultFileFilter(self): \n default_filetype = self.canvas.get_default_filetype()\n \n selectedFilter = None\n for filter in self.fileFilters:\n exts = filter.split('(', maxsplit=1)[1]\n exts = exts[:-1].split()\n if default_filetype in exts:\n selectedFilter = filter\n break\n \n if selectedFilter is None:\n selectedFilter = self.fileFilters[0]\n \n return selectedFilter\n \n @QtCore.pyqtProperty(float, notify=leftChanged)\n def left(self):\n return self.figure.subplotpars.left\n \n @left.setter\n def left(self, value):\n if value != self.figure.subplotpars.left:\n self.figure.subplots_adjust(left=value)\n self.leftChanged.emit()\n \n self.figure.canvas.draw_idle()\n \n @QtCore.pyqtProperty(float, notify=rightChanged)\n def right(self):\n return self.figure.subplotpars.right\n \n @right.setter\n def right(self, value):\n if value != self.figure.subplotpars.right:\n self.figure.subplots_adjust(right=value)\n self.rightChanged.emit()\n \n self.figure.canvas.draw_idle()\n \n @QtCore.pyqtProperty(float, notify=topChanged)\n def top(self):\n return self.figure.subplotpars.top\n \n @top.setter\n def top(self, value):\n if value != self.figure.subplotpars.top:\n self.figure.subplots_adjust(top=value)\n self.topChanged.emit()\n \n self.figure.canvas.draw_idle()\n \n @QtCore.pyqtProperty(float, notify=bottomChanged)\n def bottom(self):\n return self.figure.subplotpars.bottom\n \n @bottom.setter\n def bottom(self, value):\n if value != self.figure.subplotpars.bottom:\n self.figure.subplots_adjust(bottom=value)\n self.bottomChanged.emit()\n \n self.figure.canvas.draw_idle()\n \n @QtCore.pyqtProperty(float, notify=hspaceChanged)\n def hspace(self):\n return self.figure.subplotpars.hspace\n \n @hspace.setter\n def hspace(self, value):\n if value != self.figure.subplotpars.hspace:\n self.figure.subplots_adjust(hspace=value)\n self.hspaceChanged.emit()\n \n self.figure.canvas.draw_idle()\n \n @QtCore.pyqtProperty(float, notify=wspaceChanged)\n def wspace(self):\n return self.figure.subplotpars.wspace\n \n @wspace.setter\n def wspace(self, value):\n if value != self.figure.subplotpars.wspace:\n self.figure.subplots_adjust(wspace=value)\n self.wspaceChanged.emit()\n \n self.figure.canvas.draw_idle()\n\n def mouse_move(self, event):\n self._set_cursor(event)\n\n if event.inaxes and event.inaxes.get_navigate():\n\n try:\n s = event.inaxes.format_coord(event.xdata, event.ydata)\n except (ValueError, OverflowError):\n pass\n else:\n artists = [a for a in event.inaxes.mouseover_set\n if a.contains(event)]\n\n if artists:\n\n a = max(enumerate(artists), key=lambda x: x[1].zorder)[1]\n if a is not event.inaxes.patch:\n data = a.get_cursor_data(event)\n if data is not None:\n s += ' [{:s}]'.format(a.format_cursor_data(data))\n\n if len(self.mode):\n self.message = '{:s}, {:s}'.format(self.mode, s)\n else:\n self.message = s\n else:\n self.message = self.mode\n\n def dynamic_update(self):\n self.canvas.draw_idle()\n\n def push_current(self):\n \"\"\"push the current view limits and position onto the stack\"\"\"\n views = []\n pos = []\n for a in self.canvas.figure.get_axes():\n views.append(a._get_view())\n # Store both the original and modified positions\n pos.append((\n a.get_position(True).frozen(),\n a.get_position().frozen()))\n self._views.push(views)\n self._positions.push(pos)\n self.set_history_buttons()\n\n def set_history_buttons(self):\n \"\"\"Enable or disable back/forward button\"\"\"\n pass\n\n def _update_view(self):\n \"\"\"Update the viewlim and position from the view and\n position stack for each axes\n \"\"\"\n\n views = self._views()\n if views is None:\n return\n pos = self._positions()\n if pos is None:\n return\n for i, a in enumerate(self.canvas.figure.get_axes()):\n a._set_view(views[i])\n # Restore both the original and modified positions\n a.set_position(pos[i][0], 'original')\n a.set_position(pos[i][1], 'active')\n\n self.canvas.draw_idle()\n\n @QtCore.pyqtSlot()\n def home(self, *args):\n \"\"\"Restore the original view\"\"\"\n self._views.home()\n self._positions.home()\n self.set_history_buttons()\n self._update_view()\n\n @QtCore.pyqtSlot()\n def forward(self, *args):\n \"\"\"Move forward in the view lim stack\"\"\"\n self._views.forward()\n self._positions.forward()\n self.set_history_buttons()\n self._update_view()\n\n @QtCore.pyqtSlot()\n def back(self, *args):\n \"\"\"move back up the view lim stack\"\"\"\n self._views.back()\n self._positions.back()\n self.set_history_buttons()\n self._update_view()\n\n def _set_cursor(self, event):\n if not event.inaxes or not self._active:\n if self._lastCursor != cursors.POINTER:\n self.set_cursor(cursors.POINTER)\n self._lastCursor = cursors.POINTER\n else:\n if self._active == 'ZOOM':\n if self._lastCursor != cursors.SELECT_REGION:\n self.set_cursor(cursors.SELECT_REGION)\n self._lastCursor = cursors.SELECT_REGION\n elif (self._active == 'PAN' and\n self._lastCursor != cursors.MOVE):\n self.set_cursor(cursors.MOVE)\n\n self._lastCursor = cursors.MOVE\n\n def set_cursor(self, cursor):\n \"\"\"\n Set the current cursor to one of the :class:`Cursors`\n enums values\n \"\"\"\n if DEBUG:\n print('Set cursor', cursor)\n self.canvas.setCursor(self.cursord[cursor])\n\n def draw_with_locators_update(self):\n \"\"\"Redraw the canvases, update the locators\"\"\"\n for a in self.canvas.figure.get_axes():\n xaxis = getattr(a, 'xaxis', None)\n yaxis = getattr(a, 'yaxis', None)\n locators = []\n if xaxis is not None:\n locators.append(xaxis.get_major_locator())\n locators.append(xaxis.get_minor_locator())\n if yaxis is not None:\n locators.append(yaxis.get_major_locator())\n locators.append(yaxis.get_minor_locator())\n\n for loc in locators:\n loc.refresh()\n self.canvas.draw_idle()\n\n def press(self, event):\n \"\"\"Called whenever a mouse button is pressed.\"\"\"\n pass\n\n def press_pan(self, event):\n \"\"\"the press mouse button in pan/zoom mode callback\"\"\"\n\n if event.button == 1:\n self._button_pressed = 1\n elif event.button == 3:\n self._button_pressed = 3\n else:\n self._button_pressed = None\n return\n\n x, y = event.x, event.y\n\n # push the current view to define home if stack is empty\n if self._views.empty():\n self.push_current()\n\n self._xypress = []\n for i, a in enumerate(self.canvas.figure.get_axes()):\n if (x is not None and y is not None and a.in_axes(event) and\n a.get_navigate() and a.can_pan()):\n a.start_pan(x, y, event.button)\n self._xypress.append((a, i))\n self.canvas.mpl_disconnect(self._idDrag)\n self._idDrag = self.canvas.mpl_connect('motion_notify_event',\n self.drag_pan)\n\n self.press(event)\n\n def release(self, event):\n \"\"\"this will be called whenever mouse button is released\"\"\"\n pass\n\n def release_pan(self, event):\n \"\"\"the release mouse button callback in pan/zoom mode\"\"\"\n\n if self._button_pressed is None:\n return\n self.canvas.mpl_disconnect(self._idDrag)\n self._idDrag = self.canvas.mpl_connect(\n 'motion_notify_event', self.mouse_move)\n for a, ind in self._xypress:\n a.end_pan()\n if not self._xypress:\n return\n self._xypress = []\n self._button_pressed = None\n self.push_current()\n self.release(event)\n self.draw_with_locators_update()\n\n def drag_pan(self, event):\n \"\"\"the drag callback in pan/zoom mode\"\"\"\n\n for a, ind in self._xypress:\n #safer to use the recorded button at the press than current button:\n #multiple button can get pressed during motion...\n a.drag_pan(self._button_pressed, event.key, event.x, event.y)\n self.dynamic_update()\n\n @QtCore.pyqtSlot()\n def pan(self, *args):\n \"\"\"Activate the pan/zoom tool. pan with left button, zoom with right\"\"\"\n # set the pointer icon and button press funcs to the\n # appropriate callbacks\n\n if self._active == 'PAN':\n self._active = None\n else:\n self._active = 'PAN'\n if self._idPress is not None:\n self._idPress = self.canvas.mpl_disconnect(self._idPress)\n self.mode = ''\n\n if self._idRelease is not None:\n self._idRelease = self.canvas.mpl_disconnect(self._idRelease)\n self.mode = ''\n\n if self._active:\n self._idPress = self.canvas.mpl_connect(\n 'button_press_event', self.press_pan)\n self._idRelease = self.canvas.mpl_connect(\n 'button_release_event', self.release_pan)\n self.mode = 'pan/zoom'\n self.canvas.widgetlock(self)\n else:\n self.canvas.widgetlock.release(self)\n\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self._active)\n\n self.message = self.mode\n\n def draw_rubberband(self, event, x0, y0, x1, y1):\n \"\"\"Draw a rectangle rubberband to indicate zoom limits\"\"\"\n height = self.canvas.figure.bbox.height\n y1 = height - y1\n y0 = height - y0\n\n w = abs(x1 - x0)\n h = abs(y1 - y0)\n\n rect = [int(val)for val in (min(x0, x1), min(y0, y1), w, h)]\n self.canvas.drawRectangle(rect)\n\n def remove_rubberband(self):\n \"\"\"Remove the rubberband\"\"\"\n self.canvas.drawRectangle(None)\n\n def _switch_on_zoom_mode(self, event):\n self._zoom_mode = event.key\n self.mouse_move(event)\n\n def _switch_off_zoom_mode(self, event):\n self._zoom_mode = None\n self.mouse_move(event)\n\n def drag_zoom(self, event):\n \"\"\"the drag callback in zoom mode\"\"\"\n\n if self._xypress:\n x, y = event.x, event.y\n lastx, lasty, a, ind, view = self._xypress[0]\n\n # adjust x, last, y, last\n x1, y1, x2, y2 = a.bbox.extents\n x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)\n y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)\n\n if self._zoom_mode == \"x\":\n x1, y1, x2, y2 = a.bbox.extents\n y, lasty = y1, y2\n elif self._zoom_mode == \"y\":\n x1, y1, x2, y2 = a.bbox.extents\n x, lastx = x1, x2\n\n self.draw_rubberband(event, x, y, lastx, lasty)\n\n def press_zoom(self, event):\n \"\"\"the press mouse button in zoom to rect mode callback\"\"\"\n # If we're already in the middle of a zoom, pressing another\n # button works to \"cancel\"\n if self._ids_zoom != []:\n for zoom_id in self._ids_zoom:\n self.canvas.mpl_disconnect(zoom_id)\n self.release(event)\n self.draw_with_locators_update()\n self._xypress = None\n self._button_pressed = None\n self._ids_zoom = []\n return\n\n if event.button == 1:\n self._button_pressed = 1\n elif event.button == 3:\n self._button_pressed = 3\n else:\n self._button_pressed = None\n return\n\n x, y = event.x, event.y\n\n # push the current view to define home if stack is empty\n if self._views.empty():\n self.push_current()\n\n self._xypress = []\n for i, a in enumerate(self.canvas.figure.get_axes()):\n if (x is not None and y is not None and a.in_axes(event) and\n a.get_navigate() and a.can_zoom()):\n self._xypress.append((x, y, a, i, a._get_view()))\n\n id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)\n id2 = self.canvas.mpl_connect('key_press_event',\n self._switch_on_zoom_mode)\n id3 = self.canvas.mpl_connect('key_release_event',\n self._switch_off_zoom_mode)\n\n self._ids_zoom = id1, id2, id3\n self._zoom_mode = event.key\n\n self.press(event)\n\n def release_zoom(self, event):\n \"\"\"the release mouse button callback in zoom to rect mode\"\"\"\n for zoom_id in self._ids_zoom:\n self.canvas.mpl_disconnect(zoom_id)\n self._ids_zoom = []\n\n self.remove_rubberband()\n\n if not self._xypress:\n return\n\n last_a = []\n\n for cur_xypress in self._xypress:\n x, y = event.x, event.y\n lastx, lasty, a, ind, view = cur_xypress\n # ignore singular clicks - 5 pixels is a threshold\n # allows the user to \"cancel\" a zoom action\n # by zooming by less than 5 pixels\n if ((abs(x - lastx) < 5 and self._zoom_mode!=\"y\") or\n (abs(y - lasty) < 5 and self._zoom_mode!=\"x\")):\n self._xypress = None\n self.release(event)\n self.draw_with_locators_update()\n return\n\n # detect twinx,y axes and avoid double zooming\n twinx, twiny = False, False\n if last_a:\n for la in last_a:\n if a.get_shared_x_axes().joined(a, la):\n twinx = True\n if a.get_shared_y_axes().joined(a, la):\n twiny = True\n last_a.append(a)\n\n if self._button_pressed == 1:\n direction = 'in'\n elif self._button_pressed == 3:\n direction = 'out'\n else:\n continue\n\n a._set_view_from_bbox((lastx, lasty, x, y), direction,\n self._zoom_mode, twinx, twiny)\n\n self.draw_with_locators_update()\n self._xypress = None\n self._button_pressed = None\n\n self._zoom_mode = None\n\n self.push_current()\n self.release(event)\n\n @QtCore.pyqtSlot()\n def zoom(self, *args):\n \"\"\"Activate zoom to rect mode\"\"\"\n if self._active == 'ZOOM':\n self._active = None\n else:\n self._active = 'ZOOM'\n\n if self._idPress is not None:\n self._idPress = self.canvas.mpl_disconnect(self._idPress)\n self.mode = ''\n\n if self._idRelease is not None:\n self._idRelease = self.canvas.mpl_disconnect(self._idRelease)\n self.mode = ''\n\n if self._active:\n self._idPress = self.canvas.mpl_connect('button_press_event',\n self.press_zoom)\n self._idRelease = self.canvas.mpl_connect('button_release_event',\n self.release_zoom)\n self.mode = 'zoom rect'\n self.canvas.widgetlock(self)\n else:\n self.canvas.widgetlock.release(self)\n\n for a in self.canvas.figure.get_axes():\n a.set_navigate_mode(self._active)\n\n self.message = self.mode\n\n @QtCore.pyqtSlot()\n def tight_layout(self):\n self.figure.tight_layout()\n # self._setSliderPositions()\n self.draw_idle()\n\n @QtCore.pyqtSlot()\n def reset_margin(self):\n self.figure.subplots_adjust(**self._defaults)\n # self._setSliderPositions()\n self.draw_idle()\n \n @QtCore.pyqtSlot(str)\n def print_figure(self, fname, *args, **kwargs):\n if fname:\n fname = QtCore.QUrl(fname).toLocalFile()\n # save dir for next time\n savefig_dir = os.path.dirname(six.text_type(fname))\n matplotlib.rcParams['savefig.directory'] = savefig_dir\n fname = six.text_type(fname)\n FigureCanvasAgg.print_figure(self, fname, *args, **kwargs)\n self.draw()\n \nFigureCanvasQTAgg = FigureCanvasQtQuickAgg\nFigureCanvasQTAggToolbar = FigureQtQuickAggToolbar" ]
[ [ "matplotlib.backends.backend_agg.FigureCanvasAgg.resize_event", "matplotlib.backends.backend_agg.FigureCanvasAgg.key_release_event", "matplotlib.rcParams.get", "matplotlib.backends.backend_agg.FigureCanvasAgg.button_press_event", "matplotlib.backends.backend_agg.FigureCanvasAgg.key_press_event", "matplotlib.backends.backend_agg.FigureCanvasAgg.print_figure", "matplotlib.figure.Figure", "matplotlib.backends.backend_agg.FigureCanvasAgg.button_release_event", "matplotlib.backends.backend_agg.FigureCanvasAgg.scroll_event", "matplotlib.backends.backend_agg.FigureCanvasAgg.enter_notify_event", "matplotlib.backends.backend_agg.FigureCanvasAgg.draw", "matplotlib.backends.backend_agg.FigureCanvasAgg.motion_notify_event", "matplotlib.backends.backend_qt5.TimerQT", "matplotlib.backends.backend_agg.FigureCanvasAgg.__init__", "matplotlib.cbook.Stack", "matplotlib.backends.backend_agg.FigureCanvasAgg.start_event_loop_default", "matplotlib.backends.backend_agg.FigureCanvasAgg.stop_event_loop_default", "matplotlib.backends.backend_agg.FigureCanvasAgg.leave_notify_event" ] ]
mattwigway/tssankey
[ "01b3456bc7c308450dcb82154eddfdb984cc7014" ]
[ "tssankey/sankey.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches\nimport textwrap\n\n\n# just using fill_between and offsetting curves vertically makes steep curves appear to get\n# thinner in the middle, because you perceive the width of the line perpendicular to its direction\ndef _cwcurve(x_left, x_right, base_left, base_right, height, ax, steps=50, **kwargs):\n # create the center of the curve\n # just define one, which we multiply as needed to get the right height\n # This is just the ys\n # This uses the smootherstep algorithm: https://en.wikipedia.org/wiki/Smoothstep#Variations\n center_x = np.linspace(-0.01, 1.01, steps)\n smoothstepxs = np.clip(center_x, 0, 1)\n # note that this is 0 when x = 0 and 1 when x = 1\n smoothstep = 6 * smoothstepxs ** 5 - 15 * smoothstepxs ** 4 + 10 * smoothstepxs ** 3\n center_x -= np.min(center_x)\n center_x /= np.max(center_x)\n center_x *= x_right - x_left\n center_x += x_left\n center_y = smoothstep * (base_right - base_left) + base_left + height / 2\n\n # compute width: https://stackoverflow.com/questions/19394505\n fig = ax.get_figure()\n # width is float in [0, 1] to say how much of bbox is used by axis. 72 is inches to points\n axis_length_pt = fig.bbox_inches.height * ax.get_position().height * 72\n axis_length_data = np.abs(ax.get_ylim()[1] - ax.get_ylim()[0])\n pts_per_data_unit = axis_length_pt / axis_length_data\n width = pts_per_data_unit * height\n\n xy = np.array([center_x, center_y]).T\n return matplotlib.patches.Polygon(\n xy, closed=False, facecolor=\"none\", lw=width, **kwargs\n )\n\n\ndef tssankey(\n df,\n bar_width=0.4,\n figsize=(12, 8),\n total_gap=100,\n ax=None,\n weights=None,\n colors=None,\n curve_color=None,\n curve_alpha=0.25,\n percent_labels=True,\n min_percent_label=0.05,\n wrap=0,\n):\n \"\"\"\n Create a Sankey plot. The only required parameter is the data frame which has categorical columns. These are grouped\n by and summed to create the bars.\n\n Parameters\n ----------\n\n df: pd.DataFrame \n A dataframe with one categorical column per time period and one row per observation to show how individuals transition between the categories. Categories do not need to be the same in all columns.\n bar_width: float\n The width of the bars, with 1.0 indicating they touch and have no space for the Sankey lines between them. Default 0.4.\n figsize: tuple\n Size of the figure to plot, as a tuple (x, y). Default (12, 8).\n total_gap: float\n The total vertical gap between all categories in a period. Scale is number of observations. For instance, a value of 100 with five categories will mean that there will be a space equivalent to 25 observations between each category and the next. Default 100.\n ax: axes\n axes to plot on. Figsize ignored if specified. Default is to create new axes.\n weights: pd.Series\n weights for each observation, parallel to df. Default no weights.\n colors: dict\n Map from category names to colors to use for that category. Default is to use colors from the matplotlib style.\n curve_color: function\n Function that receives first category, left category, and right category for a curve, and returns a color. Default to use the colors of the first category, as specified by colors or in the style.\n percent_labels: bool\n If True, label each category with the percent of the total represented by that category.\n min_percent_label: float\n Float between 0 and 1 for minimum proportion for which to show percent labels.\n wrap: int\n Wrap labels after this number of characters (default 0 for no wrapping)\n \"\"\"\n\n if ax is None:\n f, ax = plt.subplots(figsize=figsize)\n\n bases = {}\n\n cols = df.columns\n for i, col in enumerate(cols):\n base = 0\n gap = total_gap / (len(df[col].cat.categories) - 1)\n bases[col] = dict()\n for val in df[col].cat.categories:\n if weights is not None:\n hgt = weights[df[col] == val].sum()\n else:\n hgt = (df[col] == val).sum()\n\n if colors is None:\n color = \"C4\" if i > 0 else None\n else:\n if val in colors:\n color = colors[val]\n else:\n color = \"C4\"\n\n rect = ax.bar(\n [i], [hgt], bottom=base, width=bar_width, color=color, zorder=10\n )[0]\n\n # label it\n wrapval = textwrap.wrap(val, wrap) if wrap > 0 else val\n\n if weights is not None:\n total = np.sum(weights)\n else:\n total = len(df)\n if percent_labels:\n label = f\"{wrapval}\\n({int(round(hgt / total * 100))}%)\"\n else:\n label = wrapval\n if hgt / total < min_percent_label:\n label = wrapval\n ax.annotate(\n label,\n xy=(\n rect.get_x() + rect.get_width() / 2,\n rect.get_y() + rect.get_height() / 2,\n ),\n xytext=(0, 0), # 3 points vertical offset\n textcoords=\"offset points\",\n ha=\"center\",\n va=\"center\",\n color=\"white\",\n weight=\"bold\",\n zorder=20,\n )\n\n bases[col][val] = base\n base += hgt + gap\n\n # make the snakes\n i = 0\n for lcol, rcol in zip(cols[:-1], cols[1:]):\n # protective copy\n lbases = {k: v for k, v in bases[lcol].items()}\n rbases = {k: v for k, v in bases[rcol].items()}\n for orig_idx, orig_val in enumerate(df[cols[0]].cat.categories):\n for lval in df[lcol].cat.categories:\n for rval in df[rcol].cat.categories:\n if curve_color is not None:\n color = curve_color(orig_val, lval, rval)\n elif colors is None or orig_val not in colors:\n color = f\"C{orig_idx}\"\n else:\n color = colors[orig_val]\n\n if weights is not None:\n count = np.sum(\n weights[\n (df[cols[0]] == orig_val)\n & (df[lcol] == lval)\n & (df[rcol] == rval)\n ]\n )\n else:\n count = np.sum(\n (df[cols[0]] == orig_val)\n & (df[lcol] == lval)\n & (df[rcol] == rval)\n )\n if count == 0:\n continue\n\n ax.add_patch(\n _cwcurve(\n i + bar_width / 2,\n i + 1 - bar_width / 2,\n lbases[lval],\n rbases[rval],\n count,\n ax=ax,\n edgecolor=color,\n alpha=curve_alpha,\n )\n )\n\n lbases[lval] += count\n rbases[rval] += count\n i += 1\n\n ax.set_xticks(np.arange(len(cols)))\n ax.set_xticklabels(cols)\n ax.set_yticks([])\n" ]
[ [ "numpy.linspace", "numpy.min", "numpy.clip", "matplotlib.pyplot.subplots", "numpy.max", "numpy.array", "numpy.sum" ] ]
lekooooook/POINT2-pytorch
[ "c9f5fad59e2f7da2c169255de5a730d861a1a96e" ]
[ "lib/net/triangulation_layer.py" ]
[ "import torch\nimport torch.nn as nn\nfrom kornia import SpatialSoftArgmax2d\n\n\nclass triangulation_layer(nn.Module):\n def __init__(self, device):\n super(triangulation_layer, self).__init__()\n self.device = device\n self.softArgmax = SpatialSoftArgmax2d(temperature=10000, normalized_coordinates=False) \n\n # 成像参数\n self.distance = 1800\n self.center = 900 # 注意:c表示成像系统在Rt变换前的中心(初始化为焦距的一半)\n # self.K = troch.tensor([[self.distance, 0, 0],\n # [0, self.distance, 0],\n # [0, 0, 1]]).to(device=self.device, dtype=torch.float32)\n # self.h = troch.tensor([[0, 0, self.center]]).t().to(device=self.device, dtype=torch.float32)\n Tr_ap = torch.tensor([[1., 0., 0., 0.],\n [0., 0., 1., -700.],\n [0., -1., 0., 0.],\n [0., 0., 0., 1.]]).to(device=self.device, dtype=torch.float32)\n Tr_ap_inv = torch.inverse(Tr_ap)\n self.R_view_ap = Tr_ap_inv[0 : 3, 0 : 3]\n self.t_view_ap = Tr_ap_inv[:3, 3].t()\n Tr_lat = torch.tensor([[0., 0., 1., -700.],\n [-1., 0., 0., 0.],\n [0., -1., 0., 0.],\n [0., 0., 0., 1.]]).to(device=self.device, dtype=torch.float32)\n Tr_lat_inv = torch.inverse(Tr_lat)\n self.R_view_lat = Tr_lat_inv[0 : 3, 0 : 3]\n self.t_view_lat = Tr_lat_inv[:3, 3].t()\n self.center_volume = torch.tensor([127.5, 127.5, 127.5]).to(device=self.device, dtype=torch.float32)\n self.K_part = torch.tensor([[-self.distance, 0], [0, -self.distance]]).to(device=self.device, dtype=torch.float32)\n\n\n def forward(self, score_map_ap, score_map_lat):\n self.score_map_ap = score_map_ap\n self.score_map_lat = score_map_lat\n self.batch_size = score_map_ap.shape[0]\n self.point_num = score_map_ap.shape[1]\n self.s_size_H = score_map_ap.shape[2]\n self.s_size_W = score_map_ap.shape[3]\n\n fiducial_3D_pred_list = []\n for batch_index in range(self.batch_size):\n fiducial_3D_pred_per_batch_list = []\n for point_index in range(self.point_num):\n score_map_ap_devided = self.score_map_ap[batch_index][point_index]\n score_map_lat_devided = self.score_map_lat[batch_index][point_index]\n score_map_ap_devided = score_map_ap_devided.unsqueeze(0).unsqueeze(0)\n score_map_lat_devided = score_map_lat_devided.unsqueeze(0).unsqueeze(0)\n\n max_index_ap = self.softArgmax(score_map_ap_devided)\n max_index_ap = max_index_ap.view(-1)\n # max_index_ap = torch.flip(max_index_ap, dims=[0])\n max_index_lat = self.softArgmax(score_map_lat_devided)\n max_index_lat = max_index_lat.view(-1)\n # max_index_lat = torch.flip(max_index_lat, dims=[0])\n\n max_index_ap[0] = max_index_ap[0] - self.s_size_W / 2\n max_index_ap[1] = max_index_ap[1] - self.s_size_H / 2\n max_index_lat[0] = max_index_lat[0] - self.s_size_W / 2\n max_index_lat[1] = max_index_lat[1] - self.s_size_H / 2\n\n # max_index_ap = torch.tensor([86.40445959, -47.38309074]).to(device=self.device, dtype=torch.float32)\n # max_index_lat = torch.tensor([-15.55886736, -45.60357675]).to(device=self.device, dtype=torch.float32)\n\n D_x1 = torch.cat([self.K_part, max_index_ap.unsqueeze(0).t()], dim=1)\n D_x2 = torch.cat([self.K_part, max_index_lat.unsqueeze(0).t()], dim=1)\n\n A = torch.squeeze(torch.cat([torch.matmul(D_x1, self.R_view_ap), torch.matmul(D_x2, self.R_view_lat)], dim=0))\n b = torch.cat([-self.center * max_index_ap - torch.matmul(D_x1, self.t_view_ap), -self.center * max_index_lat - torch.matmul(D_x2, self.t_view_lat)], dim=0)\n\n X_3d_pred = torch.matmul(torch.pinverse(A), b)\n X_3d_pred = X_3d_pred + self.center_volume\n X_3d_pred = X_3d_pred.unsqueeze(0)\n\n # [77.5, 14.5, 42.5]\n\n fiducial_3D_pred_per_batch_list.append(X_3d_pred)\n\n fiducial_3D_pred_per_batch = torch.cat(fiducial_3D_pred_per_batch_list, dim=0)\n fiducial_3D_pred_per_batch = fiducial_3D_pred_per_batch.unsqueeze(0)\n fiducial_3D_pred_list.append(fiducial_3D_pred_per_batch)\n \n fiducial_3D_pred = torch.cat(fiducial_3D_pred_list, dim=0)\n\n return fiducial_3D_pred" ]
[ [ "torch.cat", "torch.pinverse", "torch.tensor", "torch.inverse", "torch.matmul" ] ]
dtanoglidis/DeepGhostBusters
[ "eb33bd4f7db6ce65e5fd70231d4f2a9d18c2080c" ]
[ "run.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nGeneric python script.\n\"\"\"\n__author__ = \"Alex Drlica-Wagner\"\n\nimport os\nimport numpy as np\nimport pylab as plt\nimport pandas as pd\nimport scipy.ndimage as nd\n#import fitsio\n\nimport collections\nimport subprocess\n\nimport matplotlib\nfrom matplotlib.colors import LogNorm\nimport matplotlib.image\nfrom matplotlib.patches import Rectangle\n\ndef fov_geometry(release='sva1',size=[530,454]):\n \"\"\"\n Return positions of each CCD in PNG image for\n a given data release.\n\n Parameters:\n release : Data release name (currently ['sva1','y1a1']\n size : Image dimensions in pixels [width,height]\n Returns:\n list : A list of [id, xmin, ymin, xmax, ymax] for each CCD\n \"\"\"\n\n SIZE=size\n WIDTH=SIZE[0]\n HEIGHT=SIZE[1]\n # CCDs belonging to each row\n ROWS = [ [3,2,1], #range(3,0,-1),\n [7,6,5,4], #range(7,3,-1),\n [12,11,10,9,8], #range(12,7,-1),\n [18,17,16,15,14,13], #range(18,12,-1),\n [24,23,22,21,20,19], #range(24,18,-1),\n [31,30,29,28,27,26,25], #range(31,24,-1),\n [38,37,36,35,34,33,32], #range(38,31,-1),\n [44,43,42,41,40,39], #range(44,38,-1),\n [50,49,48,47,46,45], #range(50,44,-1),\n [55,54,53,52,51], #range(55,50,-1),\n [59,58,57,56], #range(59,55,-1),\n [62,61,60], #range(62,59,-1)\n ]\n\n if release.lower() == 'sva1':\n # These are the old SV pngs, not the ones made for Y2A1\n # Boder padding in x,y; assumed symmetric\n PAD = [0,0] \n ROWS = [r[::-1] for r in ROWS[::-1]]\n else:\n PAD = [0.02*WIDTH,0.02*HEIGHT]\n ROWS = ROWS\n\n NROWS = len(ROWS) # Number of rows\n NCCDS = [len(row) for row in ROWS]\n CCD_SIZE = [float(WIDTH-2*PAD[0])/max(NCCDS),\n float(HEIGHT-2*PAD[1])/NROWS] # CCD dimension (assumed to span image)\n\n ret = []\n for i,ccds in enumerate(ROWS):\n for j,ccd in enumerate(ccds):\n xpad = (SIZE[0] - len(ccds)*CCD_SIZE[0])/2.\n ypad = PAD[1]\n xmin = xpad + j*CCD_SIZE[0]\n xmax = xmin + CCD_SIZE[0]\n ymin = ypad + i*CCD_SIZE[1]\n ymax = ymin + CCD_SIZE[1]\n # These are output as ints now\n ret += [[int(ccd), int(xmin), int(ymin), int(xmax), int(ymax)]]\n return sorted(ret)\n\ndef draw_png(url):\n png = os.path.basename(url)\n if os.path.exists(png): os.remove(png)\n subprocess.check_call('wget %s'%url,shell=True)\n image = matplotlib.image.imread(png)\n ax = plt.gca()\n ax.axis('off')\n ax.imshow(image,cmap='gray',interpolation='none')\n if os.path.exists(png): os.remove(png)\n ax.annotate('png',(0.05,0.9),xycoords='axes fraction',ha='left',fontsize=10,\n bbox={'boxstyle':'round','fc':'white'})\n return image\n\ndef draw_fov(png,ccds=[],release='y1a1'):\n ax = plt.gca()\n ax.axis('off')\n ax.imshow(fov,cmap='gray',interpolation='none')\n ret = fov_geometry(release='y1a1',size=png.shape[::-1])\n patches = []\n\n for i,x1,y1,x2,y2 in ret:\n WIDTH = np.abs(x2-x1)\n HEIGHT = np.abs(y2-y1)\n rect = Rectangle((x1,y1),WIDTH,HEIGHT,fc='none',ec='w',lw=2)\n ax.add_artist(rect)\n patches.append(rect)\n center = (x2+x1)/2.,(y2+y1)/2.\n ax.annotate(i,xy=center,color='w',ha='center',va='center',fontsize=10)\n\n for ccd in badccds:\n i,x1,y1,x2,y2 = ret[ccd-1]\n fov[y1:y2,x1:x2] = 1\n center = ((x1+x2)/2.,(y1+y2)/2.)\n ax.annotate(i,xy=center,color='k',ha='center',va='center',fontsize=10)\n ax.imshow(fov,cmap='gray',interpolation='none')\n ax.annotate('blacklist',(0.05,0.9),xycoords='axes fraction',ha='left',fontsize=10,\n bbox={'boxstyle':'round','fc':'white'})\n return fov,patches\n\ndef draw_mini(url):\n ax = plt.gca()\n mini = os.path.basename(url)\n if os.path.exists(mini): os.remove(mini)\n subprocess.check_call('wget %s'%url,shell=True)\n fp = fitsio.read(mini)\n fp_img = -1*np.ones((202,224))\n fp_img[5:-5,:] = fp[::-1].T\n vmin,median,vmax = np.percentile(fp[fp!=-1],q=[5,50,95])\n ax.axis('off')\n im = ax.imshow(fp_img,cmap='gray',interpolation='none',vmin=vmin,vmax=vmax)\n ax.annotate('mini',(0.05,0.9),xycoords='axes fraction',ha='left',fontsize=10,\n bbox={'boxstyle':'round','fc':'white'})\n if os.path.exists(mini): os.remove(mini)\n return fp_img, im, [vmin,vmax]\n\nif __name__ == \"__main__\":\n import argparse\n description = __doc__\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('filename',default='ghost-scatter-y6.txt')\n parser.add_argument('-f','--force',action='store_true')\n args = parser.parse_args()\n\n bl = np.genfromtxt(args.filename,names=True,dtype=int)\n bl.dtype.names = map(str.upper,bl.dtype.names)\n bl = bl[np.argsort(bl['EXPNUM'])]\n\n urls = pd.read_csv('urls-y6a1.csv').to_records(index=False)\n urls = urls[np.argsort(urls['EXPNUM'])]\n urls = urls[np.in1d(urls['EXPNUM'],np.unique(bl['EXPNUM']))]\n\n outdir = 'pngs'\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n for i,(expnum,url) in enumerate(urls):\n print (\"(%i/%i)\"%(i+1,len(urls)))\n outfile = os.path.join(outdir,os.path.basename(url).replace('_TN',''))\n\n if os.path.exists(outfile) and not args.force:\n print(\"Found %s; skipping...\"%outfile)\n continue\n\n fig,ax = plt.subplots(1,2,figsize=(10,6))\n plt.tight_layout()\n plt.subplots_adjust(wspace=0.01)\n\n plt.sca(ax[0])\n image = draw_png(url)\n \n fov = np.zeros_like(image)\n ret = fov_geometry(size=fov.shape[::-1])\n badccds = bl['CCDNUM'][bl['EXPNUM'] == expnum].astype(int)\n print (\"Blacklist CCDs:\",badccds)\n plt.sca(ax[-1])\n draw_fov(fov,ccds=badccds)\n\n title = os.path.splitext(outfile)[0]\n plt.savefig(outfile,bbox_inches='tight')\n plt.close()\n\n#plt.ion()\n" ]
[ [ "pandas.read_csv", "numpy.abs", "numpy.unique", "matplotlib.patches.Rectangle", "numpy.percentile", "numpy.genfromtxt", "numpy.ones", "matplotlib.image.imread", "numpy.zeros_like", "numpy.argsort" ] ]
temcomp/helita
[ "33c71837f79cb2acb89144fcfb59a7ae84fe6db0" ]
[ "helita/obs/iris_util.py" ]
[ "\"\"\"\nSet of utility programs for IRIS.\n\"\"\"\nimport os\nimport re\nimport io\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom glob import glob\n\n# pylint: disable=F0401,E0611,E1103\nfrom urllib.request import urlopen\nfrom urllib.parse import urljoin, urlparse\nfrom urllib.error import HTTPError, URLError\n\n\ndef iris_timeline_parse(timeline_file):\n \"\"\"\n Parses an IRIS timeline file (SCI format) into a structured array. This\n version outputs a strucured array instead of a pandas DataSet.\n\n Parameters\n ----------\n timeline_file - string\n Filename with timeline file, or URL to the file.\n\n Returns\n -------\n result - pandas.DataFrame\n DataFrame with timeline.\n \"\"\"\n from sunpy.time import parse_time\n data = []\n slews = []\n curr_slew = np.array([np.nan, np.nan])\n line_pat = re.compile('.+OBSID=.+rpt.+endtime', re.IGNORECASE)\n slew_pat = re.compile('.+I_EVENT_MESSAGE.+MSG=\"SLEW*', re.IGNORECASE)\n if urlparse(timeline_file).netloc == '': # local file\n file_obj = open(timeline_file, 'r')\n else: # network location\n try:\n tmp = urlopen(timeline_file).read()\n file_obj = io.StringIO(tmp)\n except (HTTPError, URLError):\n raise EOFError(('iris_timeline_parse: could not open the '\n 'following file:\\n' + timeline_file))\n for line in file_obj:\n if slew_pat.match(line):\n tmp = line.split('=')[1].replace('\"', '').strip('SLEW_').split('_')\n curr_slew = np.array(tmp).astype('f')\n if line_pat.match(line):\n data.append(line.replace('//', '').replace(' x ', ', ').strip())\n slews.append(curr_slew) # include most up to date slew\n file_obj.close()\n if len(data) == 0:\n raise EOFError(('iris_timeline_parse: could not find any'\n ' observations in:\\n' + str(timeline_file)))\n arr_type = [('date_obs', 'datetime64[us]'), ('date_end', 'datetime64[us]'),\n ('obsid', 'i8'), ('repeats', 'i4'), ('duration', 'f'),\n ('size', 'f'), ('description', '|S200'), ('xpos', 'f'),\n ('ypos', 'f'), ('timeline_name', '|S200')]\n result = np.zeros(len(data), dtype=arr_type)\n result['timeline_name'] = timeline_file\n for i, line in enumerate(data):\n date_tmp = line.split()[0]\n if date_tmp[-2:] == '60': # deal with non-compliant second formats\n date_tmp = date_tmp[:-2] + '59.999999'\n result[i]['date_obs'] = parse_time(date_tmp)\n tmp = line.replace(' Mbits, end', ', end') # Remove new Mbits size str\n tmp = tmp.split('desc=')\n result[i]['description'] = tmp[1]\n tmp = tmp[0]\n tmp = [k.split('=')[-1] for k in ' '.join(tmp.split()[1:]).split(',')]\n result[i]['obsid'] = int(tmp[0])\n result[i]['repeats'] = int(tmp[1])\n result[i]['duration'] = float(tmp[2][:-1])\n result[i]['size'] = float(tmp[3])\n tmp = tmp[4].split()\n result[i]['date_end'] = parse_time(date_tmp[:9] + tmp[-1]) + \\\n timedelta(days=int(tmp[0].strip('+')))\n result[i]['xpos'] = slews[i][0]\n result[i]['ypos'] = slews[i][1]\n return pd.DataFrame(result) # order by date_obs\n\n\ndef get_iris_timeline(date_start, date_end, path=None, fmt='%Y/%m/%d',\n pattern='.*IRIS_science_timeline.+txt'):\n \"\"\"\n Gets IRIS timelines for a given time period.\n \"\"\"\n if path is None:\n path = ('http://iris.lmsal.com/health-safety/timeline/'\n 'iris_tim_archive/')\n print('Locating files...')\n file_obj = FileCrawler(date_start, date_end, path, pattern, fmt)\n result = pd.DataFrame()\n for tfile in file_obj.files:\n try:\n print('Parsing:\\n' + tfile)\n timeline = iris_timeline_parse(tfile)\n result = result.append(timeline)\n except EOFError:\n print('get_iris_timeline: could not read timeline data from:\\n' +\n tfile)\n return result\n\n\ndef get_iris_files(date_start, date_end, pattern='iris.*.fits', base='level1',\n path='/Users/tiago/data/IRIS/data/'):\n \"\"\"\n Gets list of IRIS observations for a given time period.\n\n Parameters\n ----------\n date_start : str or datetime object\n Starting date to search\n date_end : str or datetime object\n Ending date to search\n path : str\n Base path to look into\n pattern : str\n Regular expression used to match file names.\n\n Returns\n -------\n files : list\n List of strings with matching file names.\n \"\"\"\n file_path = os.path.join(path, base)\n file_obj = FileCrawler(date_start, date_end, file_path, pattern,\n fmt='%Y/%m/%d/H%H%M')\n return file_obj.files\n\n\nclass FileCrawler(object):\n \"\"\"\n Crawls through file names in a local or remote (http) path.\n\n Parameters\n ----------\n date_start : str or datetime object\n Starting date to search\n date_end : str or datetime object\n Ending date to search\n path : str\n Base path to look into\n pattern : str\n Regular expression used to match file names.\n recursive: bool\n If True, will recursively search subdirectories of dates.\n\n Attributes\n ----------\n date_start : str or datetime object\n Starting date given as input\n date_end : str or datetime object\n Ending date given as input\n paths : list\n List of file paths given the supplied dates\n files : list\n List of file names given the supplied path, dates, and pattern\n\n Methods\n -------\n get_remote_paths(date_start, date_end, path, fmt='%Y%m%d')\n Finds existing remote paths within specified dates in path, given fmt.\n get_remote_files(path, pattern)\n Finds existing remote files within specified path matching pattern.\n \"\"\"\n\n def __init__(self, date_start, date_end, path, pattern, fmt='%Y%m%d',\n verbose=False):\n self.date_start = date_start\n self.date_end = date_end\n self.paths = self.get_paths(date_start, date_end, path, fmt)\n if verbose:\n print('Found the following paths:')\n for item in self.paths:\n print(item)\n self.files = []\n for item in self.paths:\n self.files += self.get_files(item, pattern)\n if verbose:\n print('Found the following files:')\n for item in self.files:\n print(item)\n\n @classmethod\n def get_paths(cls, date_start, date_end, path, fmt='%Y%m%d'):\n \"\"\"\n Gets paths within specified date range.\n\n Parameters\n ----------\n date_start : str or datetime object\n Starting date to search\n date_end : str or datetime object\n Ending date to search\n path : str\n Base path where to look for locations (if starts with http,\n remote search will be done)\n format : str\n datetime format string for date in directories.\n\n Returns\n -------\n dates - list\n List with path locations (local directories or remote paths)\n \"\"\"\n from sunpy.time import parse_time\n dates = []\n date_start = parse_time(date_start)\n date_end = parse_time(date_end)\n curr = date_start\n if '%H' in fmt:\n incr = [0, 1] # increment only hours\n else:\n incr = [1, 0] # increment only days\n if urlparse(path).netloc == '': # local file\n while curr <= date_end:\n curr_path = os.path.join(path, datetime.strftime(curr, fmt))\n curr += timedelta(days=incr[0], hours=incr[1])\n if os.path.isdir(curr_path):\n dates.append(curr_path)\n else: # network location\n while curr <= date_end:\n curr_path = urljoin(path, datetime.strftime(curr, fmt) + '/')\n curr += timedelta(days=incr[0], hours=incr[1])\n try:\n urlopen(curr_path)\n dates.append(curr_path)\n except (HTTPError, URLError):\n continue\n return dates\n\n @classmethod\n def get_files(cls, path, pattern):\n \"\"\"\n Obtains local or remote files patching a pattern.\n\n Parameters\n ----------\n path : str\n Local directory or remote URL (e.g. 'http://www.google.com/test/')\n pattern : str\n Regular expression to be matched in href link names.\n\n Returns\n -------\n files : list\n List of strings. Each string has the path for the files matching\n the pattern (and are made sure exist).\n\n .. todo:: add recursive option, add option for FTP\n \"\"\"\n from bs4 import BeautifulSoup\n files = []\n pat_re = re.compile(pattern, re.IGNORECASE)\n if urlparse(path).scheme == '': # local file\n all_files = glob(path + '/*')\n for item in all_files:\n if pat_re.match(item) and os.path.isfile(item):\n files.append(item)\n elif urlparse(path).scheme == 'http':\n soup = BeautifulSoup(urlopen(path).read())\n for link in soup.find_all('a'):\n if pat_re.match(link.get('href')):\n file_url = urljoin(path, link.get('href'))\n try: # Add only links that exist\n urlopen(file_url)\n files.append(file_url)\n except (HTTPError, URLError):\n pass\n elif urlparse(path).scheme == 'ftp':\n raise NotImplementedError('ftp not yet supported...')\n return files\n" ]
[ [ "numpy.array", "pandas.DataFrame" ] ]
mmore500/pipe-profile
[ "861babd819909d1bda5e933269e7bc64018272d6" ]
[ "conduitpylib/test/test_utils/test_consolidate_merge.py" ]
[ "#!/bin/python3\n\nimport pandas as pd\nimport pandas.testing as pd_testing\nimport unittest\n\nfrom conduitpylib.utils import consolidate_merge\n\nclass TestCountHandsWithKOrMoreSets(unittest.TestCase):\n\n # adapted from https://stackoverflow.com/a/54344148\n def assertDataframeEqual(self, a, b, msg):\n try:\n pd_testing.assert_frame_equal(a, b)\n except AssertionError as e:\n raise self.failureException(msg) from e\n\n def setUp(self):\n self.addTypeEqualityFunc(pd.DataFrame, self.assertDataframeEqual)\n\n # adapted from https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#brief-primer-on-merge-methods-relational-algebra\n def test_no_identical_column(self):\n\n left = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n })\n\n\n right = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n })\n\n result = consolidate_merge(left, right, on='key', how='outer')\n expected_result = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n })\n\n self.assertEqual(\n result.sort_index(axis=1),\n expected_result.sort_index(axis=1)\n )\n\n # adapted from https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#brief-primer-on-merge-methods-relational-algebra\n def test_one_identical_varied_column(self):\n\n left = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n })\n\n\n right = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n })\n\n result = consolidate_merge(left, right, on='key', how='outer')\n expected_result = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n })\n\n self.assertEqual(\n result.sort_index(axis=1),\n expected_result.sort_index(axis=1)\n )\n\n # adapted from https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#brief-primer-on-merge-methods-relational-algebra\n def test_one_identical_uniform_column(self):\n\n left = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A', 'A', 'A', 'A'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n })\n\n\n right = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n 'A': ['A', 'A', 'A', 'A'],\n })\n\n result = consolidate_merge(left, right, on='key', how='outer')\n expected_result = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A', 'A', 'A', 'A'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n })\n\n self.assertEqual(\n result.sort_index(axis=1),\n expected_result.sort_index(axis=1)\n )\n\n # adapted from https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html#brief-primer-on-merge-methods-relational-algebra\n def test_custom_labels(self):\n\n left = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['X', 'X', 'X', 'X'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n })\n\n\n right = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n 'A': ['Y', 'Y', 'Y', 'Y'],\n })\n\n result = consolidate_merge(\n left,\n right,\n on='key',\n how='outer',\n suffixes=(\" left\", \" right\"),\n\n )\n expected_result = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A left': ['X', 'X', 'X', 'X'],\n 'A right': ['Y', 'Y', 'Y', 'Y'],\n 'B': ['B0', 'B1', 'B2', 'B3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3'],\n })\n\n self.assertEqual(\n result.sort_index(axis=1),\n expected_result.sort_index(axis=1)\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.testing.assert_frame_equal", "pandas.DataFrame" ] ]
Wastoon/TSAL
[ "0f880c600f1a2e128de9c9fdfb94ae0776948cbe" ]
[ "lib/datasets/GD.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\nfrom __future__ import print_function\nfrom PIL import Image\nfrom os import path as osp\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport os\n\nfrom pts_utils import generate_label_map\nfrom .file_utils import load_file_lists\nfrom .dataset_utils import pil_loader\nfrom .dataset_utils import anno_parser\nfrom .point_meta import Point_Meta\nimport torch\nimport torch.utils.data as data\nfrom .image import get_affine_transform, affine_transform\nimport cv2\n\n\ndef gaussian_radius(det_size, min_overlap=0.7):\n height, width = det_size\n\n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)\n r1 = (b1 + sq1) / 2\n\n a2 = 4\n b2 = 2 * (height + width)\n c2 = (1 - min_overlap) * width * height\n sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)\n r2 = (b2 + sq2) / 2\n\n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (height + width)\n c3 = (min_overlap - 1) * width * height\n sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)\n r3 = (b3 + sq3) / 2\n return min(r1, r2, r3)\n\n\ndef gaussian2D(shape, sigma=1):\n m, n = [(ss - 1.) / 2. for ss in shape]\n y, x = np.ogrid[-m:m + 1, -n:n + 1]\n\n h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\n h[h < np.finfo(h.dtype).eps * h.max()] = 0\n return h\n\n\ndef draw_umich_gaussian(heatmap, center, radius, k=1):\n diameter = 2 * radius + 1\n gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)\n\n x, y = int(center[0]), int(center[1])\n\n height, width = heatmap.shape[0:2]\n\n left, right = min(x, radius), min(width - x, radius + 1)\n top, bottom = min(y, radius), min(height - y, radius + 1)\n\n masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]\n masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]\n if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug\n np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n return heatmap\n\n\nclass GeneralDataset(data.Dataset):\n\n def __init__(self, transform, sigma, downsample, heatmap_type, data_indicator, phase='train', pca_trans=None):\n\n self.transform = transform\n self.pca_transform = pca_trans\n self.sigma = sigma\n self.downsample = downsample\n self.heatmap_type = heatmap_type\n self.dataset_name = data_indicator\n self.phase = phase\n self.max_objs = 32\n self.seq_length = 10\n\n self.reset()\n print('The general dataset initialization done : {:}'.format(self))\n\n def __repr__(self):\n return (\n '{name}(point-num={NUM_PTS}, sigma={sigma}, heatmap_type={heatmap_type}, length={length}, dataset={dataset_name})'.format(\n name=self.__class__.__name__, **self.__dict__))\n\n def reset(self, num_pts=-1):\n self.length = 0\n self.NUM_PTS = num_pts\n self.datas = []\n self.labels = []\n self.face_sizes = []\n assert self.dataset_name is not None, 'The dataset name is None'\n\n def __len__(self):\n assert len(self.datas) == self.length, 'The length is not correct : {}'.format(self.length)\n return self.length\n\n def append(self, data, labels, box, face_size):\n assert osp.isfile(data[0]), 'The image path is not a file : {}'.format(data)\n self.datas.append(data)\n meat_list = []\n for idx, label in enumerate(labels):\n if (label is not None) and (label.lower() != 'none'):\n if isinstance(label, str):\n assert osp.isfile(label), 'The annotation path is not a file : {}'.format(label)\n np_points, _ = anno_parser(label, self.NUM_PTS)\n meta = Point_Meta(self.NUM_PTS, np_points, box[idx], data[idx], self.dataset_name)\n elif isinstance(label, Point_Meta):\n meta = label.copy()\n else:\n raise NameError('Do not know this label : {}'.format(label))\n else:\n meta = Point_Meta(self.NUM_PTS, None, box[idx], data[idx], self.dataset_name)\n meat_list.append(meta)\n self.labels.append(meat_list)\n self.face_sizes.append(face_size)\n self.length = self.length + 1\n\n def prepare_input(self, image, box):\n meta = Point_Meta(self.NUM_PTS, None, np.array(box), image, self.dataset_name)\n image = pil_loader(image)\n return self._process_(image, meta, -1), meta\n\n def load_data(self, datas, labels, boxes, face_sizes, num_pts, reset):\n # each data is a png file name\n # each label is a Point_Meta class or the general pts format file (anno_parser_v1)\n assert isinstance(datas, list), 'The type of the datas is not correct : {}'.format(type(datas))\n assert isinstance(labels, list) and len(datas) == len(\n labels), 'The type of the labels is not correct : {}'.format(type(labels))\n assert isinstance(boxes, list) and len(datas) == len(boxes), 'The type of the boxes is not correct : {}'.format(\n type(boxes))\n assert isinstance(face_sizes, list) and len(datas) == len(\n face_sizes), 'The type of the face_sizes is not correct : {}'.format(type(face_sizes))\n if reset:\n self.reset(num_pts)\n else:\n assert self.NUM_PTS == num_pts, 'The number of point is inconsistance : {} vs {}'.format(self.NUM_PTS,\n num_pts)\n\n print('[GeneralDataset] load-data {:} datas begin'.format(len(datas)))\n\n for idx, batch_data in enumerate(datas):\n for batch_idx, data in enumerate(batch_data):\n assert isinstance(data, str), 'The type of data is not correct : {}'.format(data)\n assert osp.isfile(datas[idx][batch_idx]), '{} is not a file'.format(datas[idx])\n self.append(datas[idx], labels[idx], boxes[idx], face_sizes[idx])\n\n assert len(self.datas) == self.length, 'The length and the data is not right {} vs {}'.format(self.length,\n len(self.datas))\n assert len(self.labels) == self.length, 'The length and the labels is not right {} vs {}'.format(self.length,\n len(\n self.labels))\n assert len(self.face_sizes) == self.length, 'The length and the face_sizes is not right {} vs {}'.format(\n self.length, len(self.face_sizes))\n print('Load data done for the general dataset, which has {} images.'.format(self.length))\n\n def load_list(self, file_lists, num_pts, reset):\n lists = load_file_lists(file_lists)\n print('GeneralDataset : load-list : load {:} lines'.format(len(lists)))\n\n datas, labels, boxes, face_sizes = [], [], [], []\n batch_data, batch_labels, batch_boxes, batch_face_sizes = [], [], [], []\n for idx, data in enumerate(lists):\n alls = [x for x in data.split(' ') if x != '']\n\n assert len(alls) == 6 or len(alls) == 7, 'The {:04d}-th line in {:} is wrong : {:}'.format(idx, data)\n batch_data.append(alls[0])\n if alls[1] == 'None':\n batch_labels.append(None)\n else:\n batch_labels.append(alls[1])\n\n box = np.array([float(alls[2]), float(alls[3]), float(alls[4]), float(alls[5])])\n batch_boxes.append(box)\n if len(alls) == 6:\n batch_face_sizes.append(None)\n else:\n batch_face_sizes.append(float(alls[6]))\n\n if (idx+1)%self.seq_length == 0:\n datas.append(batch_data)\n labels.append(batch_labels)\n boxes.append(batch_boxes)\n face_sizes.append(batch_face_sizes)\n batch_data, batch_labels, batch_boxes, batch_face_sizes = [], [], [], []\n\n self.load_data(datas, labels, boxes, face_sizes, num_pts, reset)\n\n def _get_border(self, border, size):\n i = 1\n while size - border // i <= border // i:\n i *= 2\n return border // i\n\n #def __getitem__(self, index):\n # assert index >= 0 and index < self.length, 'Invalid index : {:}'.format(index)\n # image = pil_loader(self.datas[index])\n # target = self.labels[index].copy()\n # return self._process_(image, target, index)\n\n def __getitem__(self, index, seq_length=10, stride=1):\n assert index >= 0 and index < self.length //10, 'Invalid index : {:}'.format(index)\n image_list = []\n heatmaps_list = []\n mask_list = []\n points_list = []\n torch_index_list = []\n torch_nopoints_list = []\n ori_size_list = []\n nose_center_hm_list = []\n hp_offset_Lco_list = []\n kps_mask_list = []\n nose_ind_list = []\n for idx in range(seq_length):\n image = pil_loader(self.datas[index][idx])\n target = self.labels[index][idx].copy()\n out = self._process_single_img(image, target, index)\n image, heatmaps, mask, points, torch_index, torch_nopoints, ori_size, nose_center_hm, hp_offset_Lco, kps_mask, nose_ind = out\n image_list.append(image)\n heatmaps_list.append(heatmaps)\n mask_list.append(mask)\n points_list.append(points)\n torch_index_list.append(torch_index)\n torch_nopoints_list.append(torch_nopoints)\n ori_size_list.append(ori_size)\n nose_center_hm_list.append(nose_center_hm)\n hp_offset_Lco_list.append(hp_offset_Lco)\n kps_mask_list.append(kps_mask)\n nose_ind_list.append(nose_ind)\n image = torch.cat(image_list, dim=0)\n heatmaps = torch.cat(heatmaps_list, dim=0)\n mask = torch.cat(mask_list, dim=0)\n points = torch.cat(points_list, dim=0)\n torch_index = torch.cat(torch_index_list, dim=0)\n torch_nopoints = torch.cat(torch_nopoints_list, dim=0)\n ori_size = torch.cat(ori_size_list, dim=0)\n nose_center_hm = np.concatenate(nose_center_hm_list, axis=0)\n hp_offset_Lco = np.concatenate(hp_offset_Lco_list, axis=0)\n kps_mask = np.concatenate(kps_mask_list, axis=0)\n nose_ind = np.concatenate(nose_ind, axis=0)\n\n return image, heatmaps, mask, points, torch_index, torch_nopoints, ori_size, nose_center_hm, hp_offset_Lco, kps_mask, nose_ind\n\n def _process_single_img(self, image, target, index):\n\n # transform the image and points\n if self.transform is not None:\n image, target = self.transform(image, target)\n\n\n # obtain the visiable indicator vector\n if target.is_none():\n nopoints = True\n else:\n nopoints = False\n\n # If for evaluation not load label, keeps the original data\n temp_save_wh = target.temp_save_wh\n ori_size = torch.IntTensor(\n [temp_save_wh[1], temp_save_wh[0], temp_save_wh[2], temp_save_wh[3]]) # H, W, Cropped_[x1,y1]\n\n if isinstance(image, Image.Image):\n height, width = image.size[1], image.size[0]\n elif isinstance(image, torch.FloatTensor):\n height, width = image.size(1), image.size(2)\n else:\n raise Exception('Unknown type of image : {}'.format(type(image)))\n\n if target.is_none() == False:\n target.apply_bound(width, height)\n points = target.points.copy()\n points = torch.from_numpy(points.transpose((1, 0))).type(torch.FloatTensor)\n Hpoint = target.points.copy()\n\n nose_hm = np.zeros((1, height // self.downsample , width // self.downsample), dtype=np.float32)\n hp_offset_Lco = np.zeros((self.max_objs ,self.NUM_PTS*2), dtype=np.float32)\n kps_mask = np.zeros((self.max_objs, self.NUM_PTS * 2), dtype=np.uint8)\n hp_ind = np.zeros((self.max_objs), dtype=np.int64)\n\n else:\n points = torch.from_numpy(np.zeros((self.NUM_PTS, 3))).type(torch.FloatTensor)\n Hpoint = np.zeros((3, self.NUM_PTS))\n nose_hm = np.zeros((1, height // self.downsample, width // self.downsample), dtype=np.float32)\n hp_offset_Lco = np.zeros((self.max_objs ,self.NUM_PTS * 2), dtype=np.float32)\n kps_mask = np.zeros((self.max_objs, self.NUM_PTS * 2), dtype=np.uint8)\n hp_ind = np.zeros((self.max_objs), dtype=np.int64)\n hp_mask = np.zeros((self.max_objs * self.NUM_PTS), dtype=np.int64)\n\n heatmaps, mask = generate_label_map(Hpoint, height // self.downsample, width // self.downsample, self.sigma,\n self.downsample, nopoints, self.heatmap_type) # H*W*C\n\n output_res = 32\n nose_hm = heatmaps[:,:,30][np.newaxis, ...]\n peak_ind = np.argmax(nose_hm)\n row = peak_ind // 32\n col = peak_ind % 32\n ct_int = np.array([row, col]) ##choose nose as face center\n hp_ind[0] = ct_int[1] * output_res + ct_int[0]\n face_bbox_w = (Hpoint[0,:].max() - Hpoint[0,:].min())/4\n face_bbox_h = (Hpoint[1,:].max() - Hpoint[1,:].min())/4\n nose_point_radius = gaussian_radius((math.ceil(face_bbox_h), math.ceil(face_bbox_w)))\n nose_center_hm = draw_umich_gaussian(nose_hm[0], ct_int, min(4, int(nose_point_radius)))\n nose_center = Hpoint[:2, 30].astype(np.int32)\n\n for j in range(self.NUM_PTS):\n if Hpoint[2, j] > 0: # means this joint can be seen\n Hpoint[:2, j] = Hpoint[:2, j] // 4\n if Hpoint[0, j] >= 0 and Hpoint[0, j] < output_res and Hpoint[1, j] >= 0 and Hpoint[1, j] < output_res:\n hp_offset_Lco[0, j * 2: j * 2 + 2] = Hpoint[:2, j] - nose_center\n kps_mask[0, j * 2: j * 2 + 2] = 1\n\n heatmaps = torch.from_numpy(heatmaps.transpose((2, 0, 1))).type(torch.FloatTensor)\n mask = torch.from_numpy(mask.transpose((2, 0, 1))) #####.type(torch.bool)\n\n torch_index = torch.IntTensor([index])\n torch_nopoints = torch.ByteTensor([nopoints])\n\n return image, heatmaps, mask, points, torch_index, torch_nopoints, ori_size, nose_center_hm, hp_offset_Lco, kps_mask, hp_ind\n\n\n\n\n\n def plot_porttraits(self, images, titles, h, w, n_row, n_col):\n plt.figure(figsize=(2.2 * n_col, 2.2 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.20)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i])\n plt.xticks(())\n plt.yticks(())\n plt.show()\n\n def reconstruction(self, weights, C, M, h, w, num_components):\n centered_vector = np.dot(weights[:num_components], C[:num_components, :])\n recovered_image = (M + centered_vector).reshape(h, w)\n return recovered_image\n\n\ndef bgr2gray(rgb):\n return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])\n\n\ndef rgb2gray(rgb):\n return np.dot(rgb[..., :3], [0.114, 0.587, 0.299])\n" ]
[ [ "numpy.dot", "numpy.sqrt", "torch.cat", "numpy.concatenate", "numpy.exp", "numpy.finfo", "numpy.argmax", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots_adjust", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "torch.ByteTensor", "numpy.maximum", "torch.utils.data.split", "torch.IntTensor", "matplotlib.pyplot.yticks" ] ]