repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Computational-Imaging-LAB/Identification-of-S100-using-T2-w-images-deep-learning-
[ "5b847d2c4b0201e0734ff6d04efa4b4a8f65558a" ]
[ "prepare_data.py" ]
[ "import glob\nfrom tqdm import tqdm\nimport nibabel as nib\nimport numpy as np\nimport preprocess\npath='/cta/users/abas/Desktop/Embeddings/'\npath+='/gliom_data/Meningiom/nii_Meningioma_directory_April2021'\npath+='/*/*/Segmentations/*FLAIR_HYP*.ni*'\nsegmentations=glob.glob(path)\nout_path='/cta/users/abas/Desktop/Embeddings/masked_images_MEN_FLAIR/'\nshape_problem=[]\nprg=preprocess.preGlioma()\nfor idx,segs in tqdm(enumerate(segmentations),total=len(segmentations)):\n\n anat_list=segs.split('/')[:-2]\n anat='/'.join(anat_list)\n #anat+='/Anatomic/*T2*DARK*FLUID*/*.nii'\n anat+='/Anatomic/*DARK*FLUID*/*.nii'\n anat_img_path=glob.glob(anat)\n if len(anat_img_path)==0:\n print(anat_img_path)\n continue\n anat_img_nii=nib.load(anat_img_path[-1])\n anat_img=anat_img_nii.get_data()\n anat_img=prg.normalize(anat_img)\n seg_img=nib.load(segs).get_data()\n\n x,y,z=np.where(seg_img>np.mean(seg_img))\n seg_img[np.min(x):np.max(x),np.min(y):np.max(y),np.min(z):np.max(z)]=1\n if seg_img.shape!=anat_img.shape:\n print(seg_img.shape,anat_img.shape)\n print(segs)\n shape_problem.append(anat_list[-1])\n continue\n masked_im=anat_img*seg_img\n x,y,z=np.where(masked_im>np.mean(masked_im))\n masked_im=masked_im[np.min(x):np.max(x),np.min(y):np.max(y),np.min(z):np.max(z)]\n nib.save( nib.Nifti1Image(masked_im,affine=anat_img_nii.affine),out_path+anat_list[-2]+'_T1_OUT.nii.gz')\n\n \n" ]
[ [ "numpy.max", "numpy.min", "numpy.mean" ] ]
lavoiems/lightning-bolts
[ "208e92ba3dcdbc029afd37e09ec9461fbcf3f293" ]
[ "pl_bolts/models/rl/common/distributions.py" ]
[ "\"\"\"Distributions used in some continuous RL algorithms.\"\"\"\nimport torch\n\n\nclass TanhMultivariateNormal(torch.distributions.MultivariateNormal):\n \"\"\"The distribution of X is an affine of tanh applied on a normal distribution.\n\n X = action_scale * tanh(Z) + action_bias\n Z ~ Normal(mean, variance)\n \"\"\"\n\n def __init__(self, action_bias, action_scale, **kwargs):\n super().__init__(**kwargs)\n\n self.action_bias = action_bias\n self.action_scale = action_scale\n\n def rsample_with_z(self, sample_shape=torch.Size()):\n \"\"\"Samples X using reparametrization trick with the intermediate variable Z.\n\n Returns:\n Sampled X and Z\n \"\"\"\n z = super().rsample()\n return self.action_scale * torch.tanh(z) + self.action_bias, z\n\n def log_prob_with_z(self, value, z):\n \"\"\"Computes the log probability of a sampled X.\n\n Refer to the original paper of SAC for more details in equation (20), (21)\n\n Args:\n value: the value of X\n z: the value of Z\n Returns:\n Log probability of the sample\n \"\"\"\n value = (value - self.action_bias) / self.action_scale\n z_logprob = super().log_prob(z)\n correction = torch.log(self.action_scale * (1 - value ** 2) + 1e-7).sum(1)\n return z_logprob - correction\n\n def rsample_and_log_prob(self, sample_shape=torch.Size()):\n \"\"\"Samples X and computes the log probability of the sample.\n\n Returns:\n Sampled X and log probability\n \"\"\"\n z = super().rsample()\n z_logprob = super().log_prob(z)\n value = torch.tanh(z)\n correction = torch.log(self.action_scale * (1 - value ** 2) + 1e-7).sum(1)\n return self.action_scale * value + self.action_bias, z_logprob - correction\n\n def rsample(self, sample_shape=torch.Size()):\n fz, z = self.rsample_with_z(sample_shape)\n return fz\n\n def log_prob(self, value):\n value = (value - self.action_bias) / self.action_scale\n z = torch.log(1 + value) / 2 - torch.log(1 - value) / 2\n return self.log_prob_with_z(value, z)\n" ]
[ [ "torch.Size", "torch.tanh", "torch.log" ] ]
elifesciences/peerscout
[ "2e899f268b4712ffb7a09b171de3f3841337d65d" ]
[ "tests/docvec_model/Doc2VecTransformer_test.py" ]
[ "import logging\n\nimport pytest\n\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nfrom peerscout.docvec_model.Doc2VecTransformer import (\n Doc2VecTransformer\n)\n\nRELATED_TEXTS = [\n 'life sciences may include research subjects such as mice',\n 'life sciences may also look at cats',\n 'mice are to life sciences what bread is to butter'\n]\nRELATED_TEXTS_INDICES = range(len(RELATED_TEXTS))\n\nUNRELATED_TEXTS = [\n 'astronomy is an entirly different field',\n 'stars in astronomy are shining',\n 'astronomy is looking into the sky'\n]\nUNRELATED_TEXTS_INDICES = range(\n len(RELATED_TEXTS),\n len(RELATED_TEXTS) + len(UNRELATED_TEXTS)\n)\n\nTEXT_LIST = RELATED_TEXTS + UNRELATED_TEXTS\n\n\ndef get_logger():\n return logging.getLogger(__name__)\n\n\[email protected]\nclass TestDoc2VecTransformer:\n def test_should_find_similarities(self):\n docvecs = Doc2VecTransformer(\n min_count=1, # do not skip infrequent works (in our small dataset)\n vec_size=2, # need to reduce the vector size due to our very small dataset\n n_epochs=100, # need more iterations\n workers=1,\n seed=1\n ).fit_transform(TEXT_LIST)\n get_logger().debug('docvecs: %s', docvecs)\n\n similarities_first_with = cosine_similarity(\n docvecs, [docvecs[RELATED_TEXTS_INDICES[0]]])[:, 0]\n get_logger().debug('similarities_first_with: %s', similarities_first_with)\n\n self_similarity = similarities_first_with[RELATED_TEXTS_INDICES[0]]\n related_similarities = [similarities_first_with[i] for i in RELATED_TEXTS_INDICES[1:]]\n unrelated_similarities = [similarities_first_with[i] for i in UNRELATED_TEXTS_INDICES]\n\n related_mean = np.mean(related_similarities)\n unrelated_mean = np.mean(unrelated_similarities)\n\n get_logger().debug(\n 'related_similarities: %s (mean: %s)', related_similarities, related_mean\n )\n get_logger().debug(\n 'unrelated_similarities: %s (mean: %s)', unrelated_similarities, unrelated_mean\n )\n\n assert self_similarity > 0.99\n\n # Note: this test doesn't currently reliably work because of the very small dataset\n # assert related_mean > unrelated_mean\n" ]
[ [ "numpy.mean", "sklearn.metrics.pairwise.cosine_similarity" ] ]
Tupi14/VAEP
[ "b0a27dc0c0c4f98e7d0c9c68c65ab63830ffac5a" ]
[ "socceraction/classification/features.py" ]
[ "import socceraction.spadl.config as spadl\nimport pandas as pd\nimport numpy as np\n\n\n_spadlcolumns =[\"game_id\",\"period_id\",\n \"time_seconds\",\"timestamp\",\n \"team_id\",\"player_id\",\"start_x\",\"start_y\",\n \"end_x\",\"end_y\",\"result_id\",\"result_name\",\n \"bodypart_id\",\"bodypart_name\",\"type_id\",\"type_name\"]\n_dummy_actions = pd.DataFrame(np.zeros((10,len(_spadlcolumns))),columns = _spadlcolumns)\nfor c in _spadlcolumns:\n if \"name\" in c:\n _dummy_actions[c] = _dummy_actions[c].astype(str)\n\ndef feature_column_names(fs,nb_prev_actions=3):\n gs = gamestates(_dummy_actions,nb_prev_actions)\n return list(pd.concat([f(gs) for f in fs],axis=1).columns)\n\ndef gamestates(actions, nb_prev_actions=3):\n \"\"\"This function take a dataframe <actions> and outputs gamestates.\n Each gamestate is represented as the <nb_prev_actions> previous actions.\n\n The list of gamestates is internally represented as a list of actions dataframes [a_0,a_1,..] \n where each row in the a_i dataframe contains the previous action of \n the action in the same row in the a_i-1 dataframe.\n \"\"\"\n states = [actions]\n for i in range(1, nb_prev_actions):\n prev_actions = actions.copy().shift(i, fill_value=0)\n prev_actions.loc[: i - 1, :] = pd.concat([actions[:1]] * i, ignore_index=True)\n states.append(prev_actions)\n return states\n\n\ndef play_left_to_right(gamestates, home_team_id):\n a0 = gamestates[0]\n away_idx = a0.team_id != home_team_id\n for actions in gamestates:\n for col in [\"start_x\", \"end_x\"]:\n actions.loc[away_idx, col] = (\n spadl.spadl_length - actions[away_idx][col].values\n )\n for col in [\"start_y\", \"end_y\"]:\n actions.loc[away_idx, col] = (\n spadl.spadl_width - actions[away_idx][col].values\n )\n return gamestates\n\n\ndef simple(actionfn):\n \"Function decorator to apply actionfeatures to gamestates\"\n\n def wrapper(gamestates):\n if not isinstance(gamestates, (list,)):\n gamestates = [gamestates]\n X = []\n for i, a in enumerate(gamestates):\n Xi = actionfn(a)\n Xi.columns = [c + \"_a\" + str(i) for c in Xi.columns]\n X.append(Xi)\n return pd.concat(X, axis=1)\n\n return wrapper\n\n\n# SIMPLE FEATURES\n\n\n@simple\ndef actiontype(actions):\n return actions[[\"type_id\"]]\n\n\n@simple\ndef actiontype_onehot(actions):\n X = pd.DataFrame()\n for type_name in spadl.actiontypes:\n col = \"type_\" + type_name\n X[col] = actions[\"type_name\"] == type_name\n return X\n\n\n@simple\ndef result(actions):\n return actions[[\"result_id\"]]\n\n\n@simple\ndef result_onehot(actions):\n X = pd.DataFrame()\n for result_name in spadl.results:\n col = \"result_\" + result_name\n X[col] = actions[\"result_name\"] == result_name\n return X\n\n@simple\ndef actiontype_result_onehot(actions):\n res = result_onehot(actions)\n tys = actiontype_onehot(actions)\n df = pd.DataFrame()\n for tyscol in list(tys.columns):\n for rescol in list(res.columns):\n df[tyscol + \"_\" + rescol] = tys[tyscol] & res[rescol]\n return df \n\n\n\n@simple\ndef bodypart(actions):\n return actions[[\"bodypart_id\"]]\n\n\n@simple\ndef bodypart_onehot(actions):\n X = pd.DataFrame()\n for bodypart_name in spadl.bodyparts:\n col = \"bodypart_\" + bodypart_name\n X[col] = actions[\"bodypart_name\"] == bodypart_name\n return X\n\n\n@simple\ndef time(actions):\n timedf = actions[[\"period_id\", \"time_seconds\"]].copy()\n timedf[\"time_seconds_overall\"] = (\n (timedf.period_id - 1) * 45 * 60\n ) + timedf.time_seconds\n return timedf\n\n\n@simple\ndef startlocation(actions):\n return actions[[\"start_x\", \"start_y\"]]\n\n@simple\ndef endlocation(actions):\n return actions[[\"end_x\", \"end_y\"]]\n\n\n_goal_x = spadl.spadl_length\n_goal_y = spadl.spadl_width / 2\n\n\n@simple\ndef startpolar(actions):\n polardf = pd.DataFrame()\n dx = _goal_x - actions[\"start_x\"]\n dy = abs(_goal_y - actions[\"start_y\"])\n polardf[\"start_dist_to_goal\"] = np.sqrt(dx ** 2 + dy ** 2)\n polardf[\"start_tan_angle_to_goal\"] = np.divide(dx, dy, where=dy != 0)\n return polardf\n\n@simple\ndef endpolar(actions):\n polardf = pd.DataFrame()\n dx = _goal_x - actions[\"end_x\"]\n dy = abs(_goal_y - actions[\"end_y\"])\n polardf[\"end_dist_to_goal\"] = np.sqrt(dx ** 2 + dy ** 2)\n polardf[\"end_tan_angle_to_goal\"] = np.divide(dx, dy, where=dy != 0)\n return polardf\n\n\n@simple\ndef movement(actions):\n mov = pd.DataFrame()\n mov[\"dx\"] = actions.end_x - actions.start_x\n mov[\"dy\"] = actions.end_y - actions.start_y\n mov[\"movement\"] = np.sqrt(mov.dx ** 2 + mov.dy ** 2)\n return mov\n\n\n# STATE FEATURES\n\n\ndef team(gamestates):\n a0 = gamestates[0]\n teamdf = pd.DataFrame()\n for i, a in enumerate(gamestates[1:]):\n teamdf[\"team_\" + (str(i + 1))] = a.team_id == a0.team_id\n return teamdf\n\n\ndef time_delta(gamestates):\n a0 = gamestates[0]\n dt = pd.DataFrame()\n for i, a in enumerate(gamestates[1:]):\n dt[\"time_delta_\" + (str(i + 1))] = a0.time_seconds - a.time_seconds\n return dt\n\n\ndef space_delta(gamestates):\n a0 = gamestates[0]\n spaced = pd.DataFrame()\n for i, a in enumerate(gamestates[1:]):\n dx = a.end_x - a0.start_x\n spaced[\"dx_a0\" + (str(i + 1))] = dx\n dy = a.end_y - a0.start_y\n spaced[\"dy_a0\" + (str(i + 1))] = dy\n spaced[\"mov_a0\" + (str(i + 1))] = np.sqrt(dx ** 2 + dy ** 2)\n return spaced\n\n\n# CONTEXT FEATURES\n\n\ndef goalscore(gamestates):\n \"\"\"\n This function determines the nr of goals scored by each team after the \n action\n \"\"\"\n actions = gamestates[0]\n teamA = actions[\"team_id\"].values[0]\n goals = actions[\"type_name\"].str.contains(\"shot\") & (\n actions[\"result_id\"] == spadl.results.index(\"success\")\n )\n owngoals = actions[\"type_name\"].str.contains(\"shot\") & (\n actions[\"result_id\"] == spadl.results.index(\"owngoal\")\n )\n teamisA = actions[\"team_id\"] == teamA\n teamisB = ~teamisA\n goalsteamA = (goals & teamisA) | (owngoals & teamisB)\n goalsteamB = (goals & teamisB) | (owngoals & teamisA)\n goalscoreteamA = goalsteamA.cumsum() - goalsteamA\n goalscoreteamB = goalsteamB.cumsum() - goalsteamB\n\n scoredf = pd.DataFrame()\n scoredf[\"goalscore_team\"] = (goalscoreteamA * teamisA) + (goalscoreteamB * teamisB)\n scoredf[\"goalscore_opponent\"] = (goalscoreteamB * teamisA) + (\n goalscoreteamA * teamisB\n )\n scoredf[\"goalscore_diff\"] = (\n scoredf[\"goalscore_team\"] - scoredf[\"goalscore_opponent\"]\n )\n return scoredf" ]
[ [ "numpy.divide", "pandas.DataFrame", "numpy.sqrt", "pandas.concat" ] ]
Claybarn/permute
[ "292d1925f27f91f1971baac4b27d10731716be2d" ]
[ "permute/data/__init__.py" ]
[ "\"\"\"Standard test data.\n\nFor more information, see\n\n - http://www.wiley.com/legacy/wileychi/pesarin/material.html\n\n\"\"\"\n\n\nimport os as _os\n\nimport numpy as np\n\nfrom .. import data_dir\n\n\n__all__ = ['load',\n 'kenya', ]\n\n\ndef load(f):\n r\"\"\"Load a data file located in the data directory.\n\n Parameters\n ----------\n f : string\n File name.\n\n Returns\n -------\n x : array like\n Data loaded from permute.data_dir.\n \"\"\"\n return np.recfromcsv(_os.path.join(data_dir, f), delimiter=\",\", encoding=None)\n\n\ndef nsgk():\n r\"\"\"NSGK test data for irr.\n\n Notes\n -----\n\n Here is first 5 lines of `nsgk.csv`::\n\n time_stamp,domain,video,rater\n 1,8,1,1\n 1,12,1,1\n 1,15,1,1\n 1,20,1,1\n\n \"\"\"\n nz = np.loadtxt(_os.path.join(data_dir, \"nsgk.csv\"),\n delimiter=',', skiprows=1, dtype=np.int32)\n shape = tuple(nz.max(axis=0))\n x = np.zeros(shape, dtype=np.int32)\n nz -= 1\n for r in nz:\n x[tuple(r)] = 1\n\n # given order: time_stamp,domain,video,rater\n # desired order: domain,video,rater,time_stamp\n x = x.transpose(1, 2, 3, 0)\n # hardcoding the number of timestamps per video\n time_stamps = [36, 32, 35, 37, 31, 35, 40, 32]\n p1 = [[m[:, :time_stamps[i]] for i, m in enumerate(n)]for n in x]\n\n ## Alternatively, I could return a 2D object array with\n ## rater x time_stamp(video) matrices as entries\n ## Not sure which is better, so I will wait to see how I use it.\n # p1 = np.zeros(x.shape[:2], dtype=object)\n # for i, n in enumerate(x):\n # for j, m in enumerate(n):\n # p1[i, j] = m\n\n return p1\n\n\ndef macnell2014():\n r\"\"\"Data from MacNell et al. 2014\n\n .. Lillian MacNell, Adam Driscoll, and Andrea N Hunt, \"What's\n in a Name: Exposing Gender Bias in Student Ratings of Teaching,\"\n Innovative Higher Education, pp. 1-13, 2014.\n \"\"\"\n return load(\"MacNell2014.csv\")\n\n\ndef clinical_trial():\n r\"\"\"Data from Ottoboni et al. 2018\n\n .. Kellie Ottoboni, Fraser Lewis, and Luigi Salmaso, \"An Empirical \n Comparison of Parametric and Permutation Tests for Regression \n Analysis of Randomized Experiments,\" Statistics in \n Biopharmaceutical Research, 2018.\n \"\"\"\n return load(\"rb_clinical_trial.csv\")\n\n\n# def another_poss():\n# nz = np.loadtxt(_os.path.join(data_dir, \"nsgk.csv\"),\n# delimiter=',', skiprows=1, dtype=np.int)\n# _, nd, nv, nr = tuple(nz.max(axis=0))\n# dv = np.zeros((nd, nv), dtype=object)\n# time_stamps = [36, 32, 35, 37, 31, 35, 40, 32]\n# for n in range(nd):\n# for v in range(nv):\n# dv[n, v] = np.zeros((nr, time_stamps[v]), dtype=np.int)\n# nz -= 1\n# for _ts, _d, _v, _r in nz:\n# dv[_d, _v][_r, _ts] = 1 \n#\n\ndef botulinum():\n r\"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"botulinum.csv\"))\n\n\ndef chrom17m():\n r\"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"chrom17m.csv\"))\n\n\ndef confocal():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"confocal.csv\"))\n\n\ndef germina():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"germina.csv\"))\n\n\ndef kenya():\n \"\"\"The Kenya dataset contains 16 observations and two variables in total.\n It concerns an anthropological study on the \"Ol Molo\" and \"Kamba\"\n populations.\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"kenya.csv\"))\n\n\ndef massaro_blair():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"massaro_blair.csv\"))\n\n\ndef monachus():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"monachus.csv\"))\n\n\ndef mult():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"mult.csv\"))\n\n\ndef perch():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"perch.csv\"))\n\n\ndef rats():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"rats.csv\"))\n\n\ndef setig():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"setig.csv\"))\n\n\ndef urology():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"urology.csv\"))\n\n\ndef washing_test():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"washing_test.csv\"))\n\n\ndef waterfalls():\n \"\"\"The\n\n \"\"\"\n return load(_os.path.join(\"npc\", \"waterfalls.csv\"))\n\n\ndef ipat():\n \"\"\"The IPAT dataset from Pesarin and Salmaso Chapter 1\n \"\"\"\n return load(_os.path.join(\"npc\", \"examples_chapters_1-4\", \"ipat.csv\"))\n\n\ndef job():\n \"\"\"The job satisfaction dataset from Pesarin and Salmaso Chapter 1\n \"\"\"\n return load(_os.path.join(\"npc\", \"examples_chapters_1-4\", \"job.csv\"))\n\n\ndef fly():\n \"\"\"The fly dataset from Pesarin and Salmaso Chapter 4\n \"\"\"\n return load(_os.path.join(\"npc\", \"examples_chapters_1-4\", \"fly.csv\"))\n\n\ndef testosterone():\n \"\"\"The testosterone dataset from Pesarin and Salmaso Chapter 2\n \"\"\"\n return load(_os.path.join(\"npc\", \"examples_chapters_1-4\", \"testosterone.csv\"))\n\n\ndef worms():\n \"\"\"The worms dataset from Pesarin and Salmaso Chapter 1\n \"\"\"\n return load(_os.path.join(\"npc\", \"examples_chapters_1-4\", \"worms.csv\"))\n" ]
[ [ "numpy.zeros" ] ]
eee4017/pytorch
[ "1de5c26814e75dcf5479a6732ed5df6ec768a547" ]
[ "test/run_test.py" ]
[ "#!/usr/bin/env python3\n\nimport argparse\nimport copy\nfrom datetime import datetime\nimport json\nimport modulefinder\nimport os\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport tempfile\n\nimport torch\nfrom torch.utils import cpp_extension\nfrom torch.testing._internal.common_utils import TEST_WITH_ROCM, shell, set_cwd, FILE_SCHEMA\nfrom torch.testing._internal.framework_utils import calculate_shards\nimport torch.distributed as dist\nfrom typing import Dict, Optional, Tuple, List, Any\nfrom typing_extensions import TypedDict\n\ntry:\n sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\"))\n from tools.stats_utils.s3_stat_parser import (get_previous_reports_for_branch, Report, HAVE_BOTO3)\nexcept ImportError:\n print(\"Unable to import s3_stat_parser from tools. Running without S3 stats...\")\n HAVE_BOTO3 = False\n\n\nTESTS = [\n 'test_import_time',\n 'test_public_bindings',\n 'test_type_hints',\n 'test_autograd',\n 'benchmark_utils/test_benchmark_utils',\n 'test_binary_ufuncs',\n 'test_bundled_inputs',\n 'test_complex',\n 'test_cpp_api_parity',\n 'test_cpp_extensions_aot_no_ninja',\n 'test_cpp_extensions_aot_ninja',\n 'test_cpp_extensions_jit',\n 'distributed/test_c10d_common',\n 'distributed/test_c10d_gloo',\n 'distributed/test_c10d_nccl',\n 'distributed/test_jit_c10d',\n 'distributed/test_c10d_spawn_gloo',\n 'distributed/test_c10d_spawn_nccl',\n 'test_cuda',\n 'test_jit_cuda_fuser',\n 'test_cuda_primary_ctx',\n 'test_dataloader',\n 'test_datapipe',\n 'distributed/test_data_parallel',\n 'distributed/test_distributed_fork',\n 'distributed/test_distributed_spawn',\n 'distributions/test_constraints',\n 'distributions/test_distributions',\n 'test_dispatch',\n 'test_expecttest',\n 'test_foreach',\n 'test_indexing',\n 'test_jit',\n 'test_linalg',\n 'test_logging',\n 'test_mkldnn',\n 'test_model_dump',\n 'test_module_init',\n 'test_multiprocessing',\n 'test_multiprocessing_spawn',\n 'distributed/test_nccl',\n 'test_native_functions',\n 'test_numba_integration',\n 'test_nn',\n 'test_ops',\n 'test_optim',\n 'test_pytree',\n 'test_mobile_optimizer',\n 'test_set_default_mobile_cpu_allocator',\n 'test_xnnpack_integration',\n 'test_vulkan',\n 'test_sparse',\n 'test_quantization',\n 'test_pruning_op',\n 'test_spectral_ops',\n 'test_serialization',\n 'test_shape_ops',\n 'test_show_pickle',\n 'test_sort_and_select',\n 'test_tensor_creation_ops',\n 'test_testing',\n 'test_torch',\n 'test_type_info',\n 'test_unary_ufuncs',\n 'test_utils',\n 'test_view_ops',\n 'test_vmap',\n 'test_namedtuple_return_api',\n 'test_numpy_interop',\n 'test_jit_profiling',\n 'test_jit_legacy',\n 'test_jit_fuser_legacy',\n 'test_tensorboard',\n 'test_namedtensor',\n 'test_reductions',\n 'test_type_promotion',\n 'test_jit_disabled',\n 'test_function_schema',\n 'test_op_aliases',\n 'test_overrides',\n 'test_jit_fuser_te',\n 'test_tensorexpr',\n 'test_tensorexpr_pybind',\n 'test_openmp',\n 'test_profiler',\n \"distributed/test_launcher\",\n 'distributed/nn/jit/test_instantiator',\n 'distributed/rpc/test_faulty_agent',\n 'distributed/rpc/test_process_group_agent',\n 'distributed/rpc/cuda/test_process_group_agent',\n 'distributed/rpc/test_tensorpipe_agent',\n 'distributed/rpc/cuda/test_tensorpipe_agent',\n 'test_determination',\n 'test_futures',\n 'test_fx',\n 'test_fx_experimental',\n 'test_functional_autograd_benchmark',\n 'test_package',\n 'test_license',\n 'distributed/pipeline/sync/skip/test_api',\n 'distributed/pipeline/sync/skip/test_gpipe',\n 'distributed/pipeline/sync/skip/test_inspect_skip_layout',\n 'distributed/pipeline/sync/skip/test_leak',\n 'distributed/pipeline/sync/skip/test_portal',\n 'distributed/pipeline/sync/skip/test_stash_pop',\n 'distributed/pipeline/sync/skip/test_tracker',\n 'distributed/pipeline/sync/skip/test_verify_skippables',\n 'distributed/pipeline/sync/test_balance',\n 'distributed/pipeline/sync/test_bugs',\n 'distributed/pipeline/sync/test_checkpoint',\n 'distributed/pipeline/sync/test_copy',\n 'distributed/pipeline/sync/test_deferred_batch_norm',\n 'distributed/pipeline/sync/test_dependency',\n 'distributed/pipeline/sync/test_inplace',\n 'distributed/pipeline/sync/test_microbatch',\n 'distributed/pipeline/sync/test_phony',\n 'distributed/pipeline/sync/test_pipe',\n 'distributed/pipeline/sync/test_pipeline',\n 'distributed/pipeline/sync/test_stream',\n 'distributed/pipeline/sync/test_transparency',\n 'distributed/pipeline/sync/test_worker',\n 'distributed/optim/test_zero_redundancy_optimizer',\n 'distributed/elastic/timer/api_test',\n 'distributed/elastic/timer/local_timer_example',\n 'distributed/elastic/timer/local_timer_test',\n 'distributed/elastic/events/lib_test',\n 'distributed/elastic/metrics/api_test',\n 'distributed/elastic/utils/logging_test',\n 'distributed/elastic/utils/util_test',\n 'distributed/elastic/utils/distributed_test',\n 'distributed/elastic/multiprocessing/api_test',\n]\n\n# Tests need to be run with pytest.\nUSE_PYTEST_LIST = [\n 'distributed/pipeline/sync/skip/test_api',\n 'distributed/pipeline/sync/skip/test_gpipe',\n 'distributed/pipeline/sync/skip/test_inspect_skip_layout',\n 'distributed/pipeline/sync/skip/test_leak',\n 'distributed/pipeline/sync/skip/test_portal',\n 'distributed/pipeline/sync/skip/test_stash_pop',\n 'distributed/pipeline/sync/skip/test_tracker',\n 'distributed/pipeline/sync/skip/test_verify_skippables',\n 'distributed/pipeline/sync/test_balance',\n 'distributed/pipeline/sync/test_bugs',\n 'distributed/pipeline/sync/test_checkpoint',\n 'distributed/pipeline/sync/test_copy',\n 'distributed/pipeline/sync/test_deferred_batch_norm',\n 'distributed/pipeline/sync/test_dependency',\n 'distributed/pipeline/sync/test_inplace',\n 'distributed/pipeline/sync/test_microbatch',\n 'distributed/pipeline/sync/test_phony',\n 'distributed/pipeline/sync/test_pipe',\n 'distributed/pipeline/sync/test_pipeline',\n 'distributed/pipeline/sync/test_stream',\n 'distributed/pipeline/sync/test_transparency',\n 'distributed/pipeline/sync/test_worker',\n 'distributions/test_constraints',\n 'distributions/test_transforms',\n 'distributions/test_utils',\n 'test_typing',\n \"distributed/elastic/events/lib_test\",\n \"distributed/elastic/agent/server/test/api_test\",\n]\n\nWINDOWS_BLOCKLIST = [\n 'distributed/nn/jit/test_instantiator',\n 'distributed/rpc/test_faulty_agent',\n 'distributed/rpc/test_process_group_agent',\n 'distributed/rpc/cuda/test_process_group_agent',\n 'distributed/rpc/test_tensorpipe_agent',\n 'distributed/rpc/cuda/test_tensorpipe_agent',\n 'distributed/test_distributed_fork',\n 'distributed/pipeline/sync/skip/test_api',\n 'distributed/pipeline/sync/skip/test_gpipe',\n 'distributed/pipeline/sync/skip/test_inspect_skip_layout',\n 'distributed/pipeline/sync/skip/test_leak',\n 'distributed/pipeline/sync/skip/test_portal',\n 'distributed/pipeline/sync/skip/test_stash_pop',\n 'distributed/pipeline/sync/skip/test_tracker',\n 'distributed/pipeline/sync/skip/test_verify_skippables',\n 'distributed/pipeline/sync/test_balance',\n 'distributed/pipeline/sync/test_bugs',\n 'distributed/pipeline/sync/test_checkpoint',\n 'distributed/pipeline/sync/test_copy',\n 'distributed/pipeline/sync/test_deferred_batch_norm',\n 'distributed/pipeline/sync/test_dependency',\n 'distributed/pipeline/sync/test_inplace',\n 'distributed/pipeline/sync/test_microbatch',\n 'distributed/pipeline/sync/test_phony',\n 'distributed/pipeline/sync/test_pipe',\n 'distributed/pipeline/sync/test_pipeline',\n 'distributed/pipeline/sync/test_stream',\n 'distributed/pipeline/sync/test_transparency',\n 'distributed/pipeline/sync/test_worker',\n 'distributed/optim/test_zero_redundancy_optimizer',\n \"distributed/elastic/agent/server/test/api_test\",\n 'distributed/elastic/multiprocessing/api_test',\n]\n\nROCM_BLOCKLIST = [\n 'distributed/nn/jit/test_instantiator',\n 'distributed/rpc/test_faulty_agent',\n 'distributed/rpc/test_process_group_agent',\n 'distributed/rpc/cuda/test_process_group_agent',\n 'distributed/rpc/test_tensorpipe_agent',\n 'distributed/rpc/cuda/test_tensorpipe_agent',\n 'test_determination',\n 'test_multiprocessing',\n 'test_jit_legacy',\n 'test_type_hints',\n 'test_openmp',\n]\n\nRUN_PARALLEL_BLOCKLIST = [\n 'test_cpp_extensions_jit',\n 'test_expecttest',\n 'test_jit_disabled',\n 'test_mobile_optimizer',\n 'test_multiprocessing',\n 'test_multiprocessing_spawn',\n 'test_namedtuple_return_api',\n 'test_overrides',\n 'test_show_pickle',\n 'test_tensorexpr',\n 'test_cuda_primary_ctx',\n] + [test for test in TESTS if test.startswith('distributed/')]\n\nWINDOWS_COVERAGE_BLOCKLIST = [\n]\n\n\n# These tests are slow enough that it's worth calculating whether the patch\n# touched any related files first. This list was manually generated, but for every\n# run with --determine-from, we use another generated list based on this one and the\n# previous test stats.\nTARGET_DET_LIST = [\n 'distributions/test_distributions',\n 'test_nn',\n 'test_autograd',\n 'test_cpp_extensions_jit',\n 'test_jit_legacy',\n 'test_dataloader',\n 'test_overrides',\n 'test_linalg',\n 'test_jit',\n 'test_jit_profiling',\n 'test_torch',\n 'test_binary_ufuncs',\n 'test_numpy_interop',\n 'test_reductions',\n 'test_shape_ops',\n 'test_sort_and_select',\n 'test_testing',\n 'test_view_ops',\n 'distributed/nn/jit/test_instantiator',\n 'distributed/test_distributed_fork',\n 'distributed/rpc/test_process_group_agent',\n 'distributed/rpc/cuda/test_process_group_agent',\n 'distributed/rpc/test_tensorpipe_agent',\n 'distributed/rpc/cuda/test_tensorpipe_agent',\n 'distributed/algorithms/ddp_comm_hooks/test_ddp_hooks',\n 'distributed/test_distributed_spawn',\n 'test_cuda',\n 'test_cuda_primary_ctx',\n 'test_cpp_extensions_aot_ninja',\n 'test_cpp_extensions_aot_no_ninja',\n 'test_serialization',\n 'test_optim',\n 'test_utils',\n 'test_multiprocessing',\n 'test_tensorboard',\n 'distributed/test_c10d_common',\n 'distributed/test_c10d_gloo',\n 'distributed/test_c10d_nccl',\n 'distributed/test_jit_c10d',\n 'distributed/test_c10d_spawn_gloo',\n 'distributed/test_c10d_spawn_nccl',\n 'test_quantization',\n 'test_pruning_op',\n 'test_determination',\n 'test_futures',\n 'distributed/pipeline/sync/skip/test_api',\n 'distributed/pipeline/sync/skip/test_gpipe',\n 'distributed/pipeline/sync/skip/test_inspect_skip_layout',\n 'distributed/pipeline/sync/skip/test_leak',\n 'distributed/pipeline/sync/skip/test_portal',\n 'distributed/pipeline/sync/skip/test_stash_pop',\n 'distributed/pipeline/sync/skip/test_tracker',\n 'distributed/pipeline/sync/skip/test_verify_skippables',\n 'distributed/pipeline/sync/test_balance',\n 'distributed/pipeline/sync/test_bugs',\n 'distributed/pipeline/sync/test_checkpoint',\n 'distributed/pipeline/sync/test_copy',\n 'distributed/pipeline/sync/test_deferred_batch_norm',\n 'distributed/pipeline/sync/test_dependency',\n 'distributed/pipeline/sync/test_inplace',\n 'distributed/pipeline/sync/test_microbatch',\n 'distributed/pipeline/sync/test_phony',\n 'distributed/pipeline/sync/test_pipe',\n 'distributed/pipeline/sync/test_pipeline',\n 'distributed/pipeline/sync/test_stream',\n 'distributed/pipeline/sync/test_transparency',\n 'distributed/pipeline/sync/test_worker',\n]\n\n# the JSON file to store the S3 test stats\nTEST_TIMES_FILE = '.pytorch-test-times'\n\n# if a test file takes longer than 5 min, we add it to TARGET_DET_LIST\nSLOW_TEST_THRESHOLD = 300\n\n_DEP_MODULES_CACHE: Dict[str, set] = {}\n\nDISTRIBUTED_TESTS_CONFIG = {}\n\n\nif dist.is_available():\n DISTRIBUTED_TESTS_CONFIG['test'] = {\n 'WORLD_SIZE': '1'\n }\n if not TEST_WITH_ROCM and dist.is_mpi_available():\n DISTRIBUTED_TESTS_CONFIG['mpi'] = {\n 'WORLD_SIZE': '3',\n 'TEST_REPORT_SOURCE_OVERRIDE': 'dist-mpi'\n }\n if dist.is_nccl_available():\n DISTRIBUTED_TESTS_CONFIG['nccl'] = {\n 'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',\n 'TEST_REPORT_SOURCE_OVERRIDE': 'dist-nccl'\n }\n if dist.is_gloo_available():\n DISTRIBUTED_TESTS_CONFIG['gloo'] = {\n 'WORLD_SIZE': '2' if torch.cuda.device_count() == 2 else '3',\n 'TEST_REPORT_SOURCE_OVERRIDE': 'dist-gloo'\n }\n\n# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python\nSIGNALS_TO_NAMES_DICT = {getattr(signal, n): n for n in dir(signal)\n if n.startswith('SIG') and '_' not in n}\n\nCPP_EXTENSIONS_ERROR = \"\"\"\nNinja (https://ninja-build.org) is required for some of the C++ extensions\ntests, but it could not be found. Install ninja with `pip install ninja`\nor `conda install ninja`. Alternatively, disable said tests with\n`run_test.py --exclude test_cpp_extensions_aot_ninja test_cpp_extensions_jit`.\n\"\"\"\n\nPYTORCH_COLLECT_COVERAGE = bool(os.environ.get(\"PYTORCH_COLLECT_COVERAGE\"))\n\nJIT_EXECUTOR_TESTS = [\n 'test_jit_cuda_fuser',\n 'test_jit_profiling',\n 'test_jit_legacy',\n 'test_jit_fuser_legacy',\n]\n\ndef print_to_stderr(message):\n print(message, file=sys.stderr)\n\n\n# Convert something like pytorch_windows_vs2019_py36_cuda10.1_build to pytorch_windows_vs2019_py36_cuda10.1\ndef get_stripped_CI_job() -> str:\n job = os.environ.get(\"CIRCLE_JOB\", \"\").rstrip('0123456789')\n if job.endswith('_slow_test'):\n job = job[:len(job) - len('_slow_test')]\n elif job.endswith('_test'):\n job = job[:len(job) - len('_test')]\n elif job.endswith('_build'):\n job = job[:len(job) - len('_build')]\n return job\n\n\ndef calculate_job_times(reports: List[\"Report\"]) -> Dict[str, float]:\n # an entry will be like (\"test_file_name\" -> (current_avg, # values))\n jobs_to_times: Dict[str, Tuple[float, int]] = dict()\n for report in reports:\n assert report.get('format_version') == 2, \"S3 format currently handled is version 2 only\"\n files: Dict[str, Any] = report['files']\n for name, test_file in files.items():\n if name not in jobs_to_times:\n jobs_to_times[name] = (test_file['total_seconds'], 1)\n else:\n curr_avg, curr_count = jobs_to_times[name]\n new_count = curr_count + 1\n new_avg = (curr_avg * curr_count + test_file['total_seconds']) / new_count\n jobs_to_times[name] = (new_avg, new_count)\n\n # if there's 'test_cpp_extensions_aot' entry in jobs_to_times, add 'test_cpp_extensions_aot_ninja'\n # and 'test_cpp_extensions_aot_no_ninja' duplicate entries to ease future computation since\n # test_cpp_extensions_aot_no_ninja and test_cpp_extensions_aot_ninja are Python test jobs that\n # both use the test_cpp_extensions_aot.py file.\n if 'test_cpp_extensions_aot' in jobs_to_times:\n jobs_to_times['test_cpp_extensions_aot_ninja'] = jobs_to_times['test_cpp_extensions_aot']\n jobs_to_times['test_cpp_extensions_aot_no_ninja'] = jobs_to_times['test_cpp_extensions_aot']\n return {job: time for job, (time, _) in jobs_to_times.items()}\n\n\ndef pull_job_times_from_S3() -> Dict[str, float]:\n if HAVE_BOTO3:\n ci_job_prefix = get_stripped_CI_job()\n s3_reports: List[\"Report\"] = get_previous_reports_for_branch('origin/nightly', ci_job_prefix)\n else:\n print('Uh oh, boto3 is not found. Either it is not installed or we failed to import s3_stat_parser.')\n print('If not installed, please install boto3 for automatic sharding and test categorization.')\n s3_reports = []\n\n if len(s3_reports) == 0:\n print('Gathered no reports from S3. Please proceed without them.')\n return dict()\n\n return calculate_job_times(s3_reports)\n\n\ndef get_past_job_times() -> Dict[str, float]:\n if os.path.exists(TEST_TIMES_FILE):\n with open(TEST_TIMES_FILE) as file:\n test_times_json: JobTimeJSON = json.load(file)\n\n curr_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding=\"ascii\").strip()\n file_commit = test_times_json.get('commit', '')\n curr_ci_job = get_stripped_CI_job()\n file_ci_job = test_times_json.get('CIRCLE_JOB', 'N/A')\n if curr_commit != file_commit:\n print(f'Current test times file is from different commit {file_commit}.')\n elif curr_ci_job != file_ci_job:\n print(f'Current test times file is for different CI job {file_ci_job}.')\n else:\n print(f'Found stats for current commit: {curr_commit} and job: {curr_ci_job}. Proceeding with those values.')\n return test_times_json.get('job_times', {})\n\n # Found file, but commit or CI job in JSON doesn't match\n print(f'Overwriting current file with stats based on current commit: {curr_commit} and CI job: {curr_ci_job}')\n\n job_times = pull_job_times_from_S3()\n print(f'Exporting S3 test stats to {TEST_TIMES_FILE}.')\n export_S3_test_times(TEST_TIMES_FILE, job_times)\n\n return job_times\n\n\nclass JobTimeJSON(TypedDict):\n commit: str\n job_times: Dict[str, float]\n\n\ndef get_job_times_json(job_times: Dict[str, float]) -> JobTimeJSON:\n return {\n 'commit': subprocess.check_output(['git', 'rev-parse', 'HEAD'], encoding=\"ascii\").strip(),\n 'CIRCLE_JOB': get_stripped_CI_job(),\n 'job_times': job_times,\n }\n\n\ndef get_shard(which_shard: int, num_shards: int, tests: List[str]) -> List[str]:\n jobs_to_times = get_past_job_times()\n\n # Got no stats from S3, returning early to save runtime\n if len(jobs_to_times) == 0:\n print('Gathered no stats from S3. Proceeding with default sharding plan.')\n return tests[which_shard - 1 :: num_shards]\n\n shards = calculate_shards(num_shards, tests, jobs_to_times)\n _, tests_from_shard = shards[which_shard - 1]\n return tests_from_shard\n\n\ndef get_slow_tests_based_on_S3() -> List[str]:\n jobs_to_times: Dict[str, float] = get_past_job_times()\n\n # Got no stats from S3, returning early to save runtime\n if len(jobs_to_times) == 0:\n print('Gathered no stats from S3. No new slow tests calculated.')\n return []\n\n slow_tests: List[str] = []\n for test in TESTS:\n if test in jobs_to_times and test not in TARGET_DET_LIST:\n if jobs_to_times[test] > SLOW_TEST_THRESHOLD:\n slow_tests.append(test)\n return slow_tests\n\n\ndef get_executable_command(options, allow_pytest, disable_coverage=False):\n if options.coverage and not disable_coverage:\n executable = ['coverage', 'run', '--parallel-mode', '--source=torch']\n else:\n executable = [sys.executable]\n if options.pytest:\n if allow_pytest:\n executable += ['-m', 'pytest']\n else:\n print_to_stderr('Pytest cannot be used for this test. Falling back to unittest.')\n return executable\n\n\ndef run_test(test_module, test_directory, options, launcher_cmd=None, extra_unittest_args=None):\n unittest_args = options.additional_unittest_args.copy()\n if options.verbose:\n unittest_args.append(f'-{\"v\"*options.verbose}') # in case of pytest\n if test_module in RUN_PARALLEL_BLOCKLIST:\n unittest_args = [arg for arg in unittest_args if not arg.startswith('--run-parallel')]\n if extra_unittest_args:\n assert isinstance(extra_unittest_args, list)\n unittest_args.extend(extra_unittest_args)\n\n # If using pytest, replace -f with equivalent -x\n if options.pytest:\n unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]\n\n # Can't call `python -m unittest test_*` here because it doesn't run code\n # in `if __name__ == '__main__': `. So call `python test_*.py` instead.\n argv = [test_module + '.py'] + unittest_args\n\n # Multiprocessing related tests cannot run with coverage.\n # Tracking issue: https://github.com/pytorch/pytorch/issues/50661\n disable_coverage = sys.platform == 'win32' and test_module in WINDOWS_COVERAGE_BLOCKLIST\n\n # Extra arguments are not supported with pytest\n executable = get_executable_command(options, allow_pytest=not extra_unittest_args,\n disable_coverage=disable_coverage)\n\n command = (launcher_cmd or []) + executable + argv\n print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))\n return shell(command, test_directory)\n\n\ndef test_cuda_primary_ctx(test_module, test_directory, options):\n return run_test(test_module, test_directory, options, extra_unittest_args=['--subprocess'])\n\n\ndef _test_cpp_extensions_aot(test_module, test_directory, options, use_ninja):\n if use_ninja:\n try:\n cpp_extension.verify_ninja_availability()\n except RuntimeError:\n print(CPP_EXTENSIONS_ERROR)\n return 1\n\n # Wipe the build folder, if it exists already\n cpp_extensions_test_dir = os.path.join(test_directory, 'cpp_extensions')\n cpp_extensions_test_build_dir = os.path.join(cpp_extensions_test_dir, 'build')\n if os.path.exists(cpp_extensions_test_build_dir):\n shutil.rmtree(cpp_extensions_test_build_dir)\n\n # Build the test cpp extensions modules\n shell_env = os.environ.copy()\n shell_env['USE_NINJA'] = str(1 if use_ninja else 0)\n cmd = [sys.executable, 'setup.py', 'install', '--root', './install']\n return_code = shell(cmd, cwd=cpp_extensions_test_dir, env=shell_env)\n if return_code != 0:\n return return_code\n if sys.platform != 'win32':\n return_code = shell(cmd,\n cwd=os.path.join(cpp_extensions_test_dir, 'no_python_abi_suffix_test'),\n env=shell_env)\n if return_code != 0:\n return return_code\n\n # \"install\" the test modules and run tests\n python_path = os.environ.get('PYTHONPATH', '')\n try:\n cpp_extensions = os.path.join(test_directory, 'cpp_extensions')\n install_directory = ''\n # install directory is the one that is named site-packages\n for root, directories, _ in os.walk(os.path.join(cpp_extensions, 'install')):\n for directory in directories:\n if '-packages' in directory:\n install_directory = os.path.join(root, directory)\n\n assert install_directory, 'install_directory must not be empty'\n os.environ['PYTHONPATH'] = os.pathsep.join([install_directory, python_path])\n return run_test(test_module, test_directory, options)\n finally:\n os.environ['PYTHONPATH'] = python_path\n\n\ndef test_cpp_extensions_aot_ninja(test_module, test_directory, options):\n return _test_cpp_extensions_aot('test_cpp_extensions_aot', test_directory,\n options, use_ninja=True)\n\n\ndef test_cpp_extensions_aot_no_ninja(test_module, test_directory, options):\n return _test_cpp_extensions_aot('test_cpp_extensions_aot',\n test_directory, options, use_ninja=False)\n\n\ndef test_distributed(test_module, test_directory, options):\n # MPI tests are broken with Python-3.9\n mpi_available = subprocess.call('command -v mpiexec', shell=True) == 0 and sys.version_info < (3, 9)\n if options.verbose and not mpi_available:\n print_to_stderr(\n 'MPI not available -- MPI backend tests will be skipped')\n config = DISTRIBUTED_TESTS_CONFIG\n for backend, env_vars in config.items():\n if sys.platform == 'win32' and backend != 'gloo':\n continue\n if backend == 'mpi' and not mpi_available:\n continue\n for with_init_file in {True, False}:\n if sys.platform == 'win32' and not with_init_file:\n continue\n tmp_dir = tempfile.mkdtemp()\n if options.verbose:\n init_str = \"with {} init_method\"\n with_init = init_str.format(\"file\" if with_init_file else \"env\")\n print_to_stderr(\n 'Running distributed tests for the {} backend {}'.format(\n backend, with_init))\n os.environ['TEMP_DIR'] = tmp_dir\n os.environ['BACKEND'] = backend\n os.environ['INIT_METHOD'] = 'env://'\n os.environ.update(env_vars)\n if with_init_file:\n if test_module in [\"test_distributed_fork\", \"test_distributed_spawn\"]:\n init_method = f'{FILE_SCHEMA}{tmp_dir}/'\n else:\n init_method = f'{FILE_SCHEMA}{tmp_dir}/shared_init_file'\n os.environ['INIT_METHOD'] = init_method\n try:\n os.mkdir(os.path.join(tmp_dir, 'barrier'))\n os.mkdir(os.path.join(tmp_dir, 'test_dir'))\n if backend == 'mpi':\n # test mpiexec for --noprefix option\n with open(os.devnull, 'w') as devnull:\n allowrunasroot_opt = '--allow-run-as-root' if subprocess.call(\n 'mpiexec --allow-run-as-root -n 1 bash -c \"\"', shell=True,\n stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''\n noprefix_opt = '--noprefix' if subprocess.call(\n f'mpiexec {allowrunasroot_opt} -n 1 --noprefix bash -c \"\"', shell=True,\n stdout=devnull, stderr=subprocess.STDOUT) == 0 else ''\n\n mpiexec = ['mpiexec', '-n', '3', noprefix_opt, allowrunasroot_opt]\n\n return_code = run_test(test_module, test_directory, options,\n launcher_cmd=mpiexec)\n else:\n return_code = run_test(test_module, test_directory, options)\n if return_code != 0:\n return return_code\n finally:\n shutil.rmtree(tmp_dir)\n return 0\n\n\nCUSTOM_HANDLERS = {\n 'test_cuda_primary_ctx': test_cuda_primary_ctx,\n 'test_cpp_extensions_aot_no_ninja': test_cpp_extensions_aot_no_ninja,\n 'test_cpp_extensions_aot_ninja': test_cpp_extensions_aot_ninja,\n 'distributed/test_distributed_fork': test_distributed,\n 'distributed/test_distributed_spawn': test_distributed,\n}\n\n\ndef parse_test_module(test):\n return test.split('.')[0]\n\n\nclass TestChoices(list):\n def __init__(self, *args, **kwargs):\n super(TestChoices, self).__init__(args[0])\n\n def __contains__(self, item):\n return list.__contains__(self, parse_test_module(item))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Run the PyTorch unit test suite',\n epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))\n parser.add_argument(\n '-v',\n '--verbose',\n action='count',\n default=0,\n help='print verbose information and test-by-test results')\n parser.add_argument(\n '--jit',\n '--jit',\n action='store_true',\n help='run all jit tests')\n parser.add_argument(\n '-pt', '--pytest', action='store_true',\n help='If true, use `pytest` to execute the tests. E.g., this runs '\n 'TestTorch with pytest in verbose and coverage mode: '\n 'python run_test.py -vci torch -pt')\n parser.add_argument(\n '-c', '--coverage', action='store_true', help='enable coverage',\n default=PYTORCH_COLLECT_COVERAGE)\n parser.add_argument(\n '-i',\n '--include',\n nargs='+',\n choices=TestChoices(TESTS),\n default=TESTS,\n metavar='TESTS',\n help='select a set of tests to include (defaults to ALL tests).'\n ' tests can be specified with module name, module.TestClass'\n ' or module.TestClass.test_method')\n parser.add_argument(\n '-x',\n '--exclude',\n nargs='+',\n choices=TESTS,\n metavar='TESTS',\n default=[],\n help='select a set of tests to exclude')\n parser.add_argument(\n '-f',\n '--first',\n choices=TESTS,\n metavar='TESTS',\n help='select the test to start from (excludes previous tests)')\n parser.add_argument(\n '-l',\n '--last',\n choices=TESTS,\n metavar='TESTS',\n help='select the last test to run (excludes following tests)')\n parser.add_argument(\n '--bring-to-front',\n nargs='+',\n choices=TestChoices(TESTS),\n default=[],\n metavar='TESTS',\n help='select a set of tests to run first. This can be used in situations'\n ' where you want to run all tests, but care more about some set, '\n 'e.g. after making a change to a specific component')\n parser.add_argument(\n '--ignore-win-blocklist',\n action='store_true',\n help='always run blocklisted windows tests')\n parser.add_argument(\n '--determine-from',\n help='File of affected source filenames to determine which tests to run.')\n parser.add_argument(\n '--continue-through-error',\n action='store_true',\n help='Runs the full test suite despite one of the tests failing')\n parser.add_argument(\n 'additional_unittest_args',\n nargs='*',\n help='additional arguments passed through to unittest, e.g., '\n 'python run_test.py -i sparse -- TestSparse.test_factory_size_check')\n parser.add_argument(\n '--export-past-test-times',\n nargs='?',\n type=str,\n const=TEST_TIMES_FILE,\n help='dumps test times from previous S3 stats into a file, format JSON',\n )\n parser.add_argument(\n '--shard',\n nargs=2,\n type=int,\n help='runs a shard of the tests (taking into account other selections), e.g., '\n '--shard 2 3 will break up the selected tests into 3 shards and run the tests '\n 'in the 2nd shard (the first number should not exceed the second)',\n )\n parser.add_argument(\n '--exclude-jit-executor',\n action='store_true',\n help='exclude tests that are run for a specific jit config'\n )\n return parser.parse_args()\n\n\ndef find_test_index(test, selected_tests, find_last_index=False):\n \"\"\"Find the index of the first or last occurrence of a given test/test module in the list of selected tests.\n\n This function is used to determine the indices when slicing the list of selected tests when\n ``options.first``(:attr:`find_last_index`=False) and/or ``options.last``(:attr:`find_last_index`=True) are used.\n\n :attr:`selected_tests` can be a list that contains multiple consequent occurrences of tests\n as part of the same test module, e.g.:\n\n ```\n selected_tests = ['autograd', 'cuda', **'torch.TestTorch.test_acos',\n 'torch.TestTorch.test_tan', 'torch.TestTorch.test_add'**, 'utils']\n ```\n\n If :attr:`test`='torch' and :attr:`find_last_index`=False, result should be **2**.\n If :attr:`test`='torch' and :attr:`find_last_index`=True, result should be **4**.\n\n Args:\n test (str): Name of test to lookup\n selected_tests (list): List of tests\n find_last_index (bool, optional): should we lookup the index of first or last\n occurrence (first is default)\n\n Returns:\n index of the first or last occurrence of the given test\n \"\"\"\n idx = 0\n found_idx = -1\n for t in selected_tests:\n if t.startswith(test):\n found_idx = idx\n if not find_last_index:\n break\n idx += 1\n return found_idx\n\n\ndef exclude_tests(exclude_list, selected_tests, exclude_message=None):\n for exclude_test in exclude_list:\n tests_copy = selected_tests[:]\n for test in tests_copy:\n if test.startswith(exclude_test):\n if exclude_message is not None:\n print_to_stderr('Excluding {} {}'.format(test, exclude_message))\n selected_tests.remove(test)\n return selected_tests\n\n\ndef get_selected_tests(options):\n selected_tests = options.include\n\n if options.bring_to_front:\n to_front = set(options.bring_to_front)\n selected_tests = options.bring_to_front + list(filter(lambda name: name not in to_front,\n selected_tests))\n\n if options.first:\n first_index = find_test_index(options.first, selected_tests)\n selected_tests = selected_tests[first_index:]\n\n if options.last:\n last_index = find_test_index(options.last, selected_tests, find_last_index=True)\n selected_tests = selected_tests[:last_index + 1]\n\n if options.shard:\n assert len(options.shard) == 2, \"Unexpected shard format\"\n assert min(options.shard) > 0, \"Shards must be positive numbers\"\n which_shard, num_shards = options.shard\n assert which_shard <= num_shards, \"Selected shard must be less or equal that total number of shards\"\n assert num_shards <= len(selected_tests), f\"Number of shards must be less than {len(selected_tests)}\"\n selected_tests = get_shard(which_shard, num_shards, selected_tests)\n\n if options.exclude_jit_executor:\n options.exclude.extend(JIT_EXECUTOR_TESTS)\n\n selected_tests = exclude_tests(options.exclude, selected_tests)\n\n if sys.platform == 'win32' and not options.ignore_win_blocklist:\n target_arch = os.environ.get('VSCMD_ARG_TGT_ARCH')\n if target_arch != 'x64':\n WINDOWS_BLOCKLIST.append('cpp_extensions_aot_no_ninja')\n WINDOWS_BLOCKLIST.append('cpp_extensions_aot_ninja')\n WINDOWS_BLOCKLIST.append('cpp_extensions_jit')\n WINDOWS_BLOCKLIST.append('jit')\n WINDOWS_BLOCKLIST.append('jit_fuser')\n\n selected_tests = exclude_tests(WINDOWS_BLOCKLIST, selected_tests, 'on Windows')\n\n elif TEST_WITH_ROCM:\n selected_tests = exclude_tests(ROCM_BLOCKLIST, selected_tests, 'on ROCm')\n\n return selected_tests\n\n\ndef test_impact_of_file(filename):\n \"\"\"Determine what class of impact this file has on test runs.\n\n Possible values:\n TORCH - torch python code\n CAFFE2 - caffe2 python code\n TEST - torch test code\n UNKNOWN - may affect all tests\n NONE - known to have no effect on test outcome\n CI - CI configuration files\n \"\"\"\n parts = filename.split(os.sep)\n if parts[0] in ['.jenkins', '.circleci']:\n return 'CI'\n if parts[0] in ['docs', 'scripts', 'CODEOWNERS', 'README.md']:\n return 'NONE'\n elif parts[0] == 'torch':\n if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):\n return 'TORCH'\n elif parts[0] == 'caffe2':\n if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):\n return 'CAFFE2'\n elif parts[0] == 'test':\n if parts[-1].endswith('.py') or parts[-1].endswith('.pyi'):\n return 'TEST'\n\n return 'UNKNOWN'\n\n\ndef log_test_reason(file_type, filename, test, options):\n if options.verbose:\n print_to_stderr(\n 'Determination found {} file {} -- running {}'.format(\n file_type,\n filename,\n test,\n )\n )\n\n\ndef get_dep_modules(test):\n # Cache results in case of repetition\n if test in _DEP_MODULES_CACHE:\n return _DEP_MODULES_CACHE[test]\n\n repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n test_location = os.path.join(repo_root, 'test', test + '.py')\n finder = modulefinder.ModuleFinder(\n # Ideally exclude all third party modules, to speed up calculation.\n excludes=[\n 'scipy',\n 'numpy',\n 'numba',\n 'multiprocessing',\n 'sklearn',\n 'setuptools',\n 'hypothesis',\n 'llvmlite',\n 'joblib',\n 'email',\n 'importlib',\n 'unittest',\n 'urllib',\n 'json',\n 'collections',\n # Modules below are excluded because they are hitting https://bugs.python.org/issue40350\n # Trigger AttributeError: 'NoneType' object has no attribute 'is_package'\n 'mpl_toolkits',\n 'google',\n 'onnx',\n # Triggers RecursionError\n 'mypy'\n ],\n )\n # HACK: some platforms default to ascii, so we can't just run_script :(\n with open(test_location, 'r', encoding='utf-8') as fp:\n finder.load_module('__main__', fp, test_location, ('', 'r', 1))\n\n dep_modules = set(finder.modules.keys())\n _DEP_MODULES_CACHE[test] = dep_modules\n return dep_modules\n\n\ndef determine_target(target_det_list, test, touched_files, options):\n test = parse_test_module(test)\n # Some tests are faster to execute than to determine.\n if test not in target_det_list:\n if options.verbose:\n print_to_stderr(f'Running {test} without determination')\n return True\n # HACK: \"no_ninja\" is not a real module\n if test.endswith('_no_ninja'):\n test = test[:(-1 * len('_no_ninja'))]\n if test.endswith('_ninja'):\n test = test[:(-1 * len('_ninja'))]\n\n dep_modules = get_dep_modules(test)\n\n for touched_file in touched_files:\n file_type = test_impact_of_file(touched_file)\n if file_type == 'NONE':\n continue\n elif file_type == 'CI':\n # Force all tests to run if any change is made to the CI\n # configurations.\n log_test_reason(file_type, touched_file, test, options)\n return True\n elif file_type == 'UNKNOWN':\n # Assume uncategorized source files can affect every test.\n log_test_reason(file_type, touched_file, test, options)\n return True\n elif file_type in ['TORCH', 'CAFFE2', 'TEST']:\n parts = os.path.splitext(touched_file)[0].split(os.sep)\n touched_module = \".\".join(parts)\n # test/ path does not have a \"test.\" namespace\n if touched_module.startswith('test.'):\n touched_module = touched_module.split('test.')[1]\n if (\n touched_module in dep_modules\n or touched_module == test.replace('/', '.')\n ):\n log_test_reason(file_type, touched_file, test, options)\n return True\n\n # If nothing has determined the test has run, don't run the test.\n if options.verbose:\n print_to_stderr(f'Determination is skipping {test}')\n\n return False\n\n\ndef run_test_module(test: str, test_directory: str, options) -> Optional[str]:\n test_module = parse_test_module(test)\n\n # Printing the date here can help diagnose which tests are slow\n print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))\n handler = CUSTOM_HANDLERS.get(test_module, run_test)\n return_code = handler(test_module, test_directory, options)\n assert isinstance(return_code, int) and not isinstance(\n return_code, bool), 'Return code should be an integer'\n if return_code == 0:\n return None\n\n message = f'{test} failed!'\n if return_code < 0:\n # subprocess.Popen returns the child process' exit signal as\n # return code -N, where N is the signal number.\n signal_name = SIGNALS_TO_NAMES_DICT[-return_code]\n message += f' Received signal: {signal_name}'\n return message\n\ndef export_S3_test_times(test_times_filename: str, test_times: Dict[str, float]) -> None:\n if os.path.exists(test_times_filename):\n print(f'Overwriting existent file: {test_times_filename}')\n with open(test_times_filename, 'w+') as file:\n job_times_json = get_job_times_json(test_times)\n json.dump(job_times_json, file, indent=' ', separators=(',', ': '))\n file.write('\\n')\n\n\ndef query_changed_test_files() -> List[str]:\n cmd = [\"git\", \"diff\", \"--name-only\", \"origin/master\", \"HEAD\"]\n proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n if proc.returncode != 0:\n raise RuntimeError(\"Unable to get changed files\")\n\n lines = proc.stdout.decode().strip().split(\"\\n\")\n lines = [line.strip() for line in lines]\n return lines\n\n\ndef reorder_tests(tests: List[str]) -> List[str]:\n try:\n changed_files = query_changed_test_files()\n except Exception:\n # If unable to get changed files from git, quit without doing any sorting\n return tests\n\n prefix = f\"test{os.path.sep}\"\n changed_tests = [f for f in changed_files if f.startswith(prefix) and f.endswith(\".py\")]\n changed_tests = [f[len(prefix):] for f in changed_tests]\n changed_tests = [f[:-len(\".py\")] for f in changed_tests]\n\n bring_to_front = []\n the_rest = []\n\n for test in tests:\n if test in changed_tests:\n bring_to_front.append(test)\n else:\n the_rest.append(test)\n\n sorted_tests = bring_to_front + the_rest\n\n if len(sorted_tests) != len(tests):\n # Something went wrong, bail out without doing any sorting\n return tests\n\n return sorted_tests\n\n\ndef main():\n options = parse_args()\n\n test_times_filename = options.export_past_test_times\n if test_times_filename:\n print(f'Exporting past test times from S3 to {test_times_filename}, no tests will be run.')\n export_S3_test_times(test_times_filename, pull_job_times_from_S3())\n return\n\n test_directory = os.path.dirname(os.path.abspath(__file__))\n selected_tests = get_selected_tests(options)\n\n if options.verbose:\n print_to_stderr('Selected tests: {}'.format(', '.join(selected_tests)))\n\n if options.coverage and not PYTORCH_COLLECT_COVERAGE:\n shell(['coverage', 'erase'])\n\n if options.jit:\n selected_tests = filter(lambda test_name: \"jit\" in test_name, TESTS)\n\n if options.determine_from is not None and os.path.exists(options.determine_from):\n slow_tests = get_slow_tests_based_on_S3()\n print('Added the following tests to target_det tests as calculated based on S3:')\n print(slow_tests)\n with open(options.determine_from, 'r') as fh:\n touched_files = [\n os.path.normpath(name.strip()) for name in fh.read().split('\\n')\n if len(name.strip()) > 0\n ]\n # HACK: Ensure the 'test' paths can be traversed by Modulefinder\n sys.path.append('test')\n selected_tests = [\n test for test in selected_tests\n if determine_target(TARGET_DET_LIST + slow_tests, test, touched_files, options)\n ]\n sys.path.remove('test')\n\n\n selected_tests = reorder_tests(selected_tests)\n\n has_failed = False\n failure_messages = []\n try:\n for test in selected_tests:\n options_clone = copy.deepcopy(options)\n if test in USE_PYTEST_LIST:\n options_clone.pytest = True\n err_message = run_test_module(test, test_directory, options_clone)\n if err_message is None:\n continue\n has_failed = True\n failure_messages.append(err_message)\n if not options_clone.continue_through_error:\n raise RuntimeError(err_message)\n print_to_stderr(err_message)\n finally:\n if options.coverage:\n from coverage import Coverage\n test_dir = os.path.dirname(os.path.abspath(__file__))\n with set_cwd(test_dir):\n cov = Coverage()\n if PYTORCH_COLLECT_COVERAGE:\n cov.load()\n cov.combine(strict=False)\n cov.save()\n if not PYTORCH_COLLECT_COVERAGE:\n cov.html_report()\n\n if options.continue_through_error and has_failed:\n for err in failure_messages:\n print_to_stderr(err)\n sys.exit(1)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.distributed.is_available", "torch.distributed.is_nccl_available", "torch.distributed.is_mpi_available", "torch.distributed.is_gloo_available", "torch.utils.cpp_extension.verify_ninja_availability", "torch.cuda.device_count", "torch.testing._internal.common_utils.set_cwd", "torch.testing._internal.framework_utils.calculate_shards", "torch.testing._internal.common_utils.shell" ] ]
wzy810103882/MegaDepth
[ "ef57fd4a568084fc7e93f6bf1caf67dcf19ac6c1" ]
[ "models/HG_model.py" ]
[ "import numpy as np\nimport torch\nimport os\nfrom torch.autograd import Variable\nfrom .base_model import BaseModel\nimport sys\nimport pytorch_DIW_scratch\n\nclass HGModel(BaseModel):\n def name(self):\n return 'HGModel'\n\n def __init__(self, opt):\n BaseModel.initialize(self, opt)\n\n print(\"===========================================LOADING Hourglass NETWORK====================================================\")\n model = pytorch_DIW_scratch.pytorch_DIW_scratch\n model= torch.nn.parallel.DataParallel(model, device_ids = [0,1])\n #model_parameters = self.load_network(model, 'G', 'best_vanila')\n model_parameters = self.load_network(model, 'G', 'best_generalization') # load with best generalization\n model.load_state_dict(model_parameters)\n self.netG = model.cuda()\n\n\n def batch_classify(self, z_A_arr, z_B_arr, ground_truth ):\n threashold = 1.1\n depth_ratio = torch.div(z_A_arr, z_B_arr)\n\n depth_ratio = depth_ratio.cpu()\n\n estimated_labels = torch.zeros(depth_ratio.size(0))\n\n estimated_labels[depth_ratio > (threashold)] = 1\n estimated_labels[depth_ratio < (1/threashold)] = -1\n\n diff = estimated_labels - ground_truth\n diff[diff != 0] = 1\n\n # error \n inequal_error_count = diff[ground_truth != 0]\n inequal_error_count = torch.sum(inequal_error_count)\n\n error_count = torch.sum(diff) #diff[diff !=0]\n # error_count = error_count.size(0)\n\n equal_error_count = error_count - inequal_error_count\n\n\n # total \n total_count = depth_ratio.size(0)\n ground_truth[ground_truth !=0 ] = 1\n\n inequal_count_total = torch.sum(ground_truth)\n equal_total_count = total_count - inequal_count_total\n\n\n error_list = [equal_error_count, inequal_error_count, error_count]\n count_list = [equal_total_count, inequal_count_total, total_count]\n\n return error_list, count_list \n\n\n def computeSDR(self, prediction_d, targets):\n # for each image \n total_error = [0,0,0]\n total_samples = [0,0,0]\n\n for i in range(0, prediction_d.size(0)):\n\n if targets['has_SfM_feature'][i] == False:\n continue\n \n x_A_arr = targets[\"sdr_xA\"][i].squeeze(0)\n x_B_arr = targets[\"sdr_xB\"][i].squeeze(0)\n y_A_arr = targets[\"sdr_yA\"][i].squeeze(0)\n y_B_arr = targets[\"sdr_yB\"][i].squeeze(0)\n\n predict_depth = torch.exp(prediction_d[i,:,:])\n predict_depth = predict_depth.squeeze(0)\n ground_truth = targets[\"sdr_gt\"][i]\n\n # print(x_A_arr.size())\n # print(y_A_arr.size())\n\n z_A_arr = torch.gather( torch.index_select(predict_depth, 1 ,x_A_arr.cuda()) , 0, y_A_arr.view(1, -1).cuda())# predict_depth:index(2, x_A_arr):gather(1, y_A_arr:view(1, -1))\n z_B_arr = torch.gather( torch.index_select(predict_depth, 1 ,x_B_arr.cuda()) , 0, y_B_arr.view(1, -1).cuda())\n\n z_A_arr = z_A_arr.squeeze(0)\n z_B_arr = z_B_arr.squeeze(0)\n\n error_list, count_list = self.batch_classify(z_A_arr, z_B_arr,ground_truth)\n\n for j in range(0,3):\n total_error[j] += error_list[j]\n total_samples[j] += count_list[j]\n\n return total_error, total_samples\n\n\n def evaluate_SDR(self, input_, targets):\n input_images = Variable(input_.cuda() )\n prediction_d = self.netG.forward(input_images) \n\n total_error, total_samples = self.computeSDR(prediction_d.data, targets)\n\n return total_error, total_samples\n\n def rmse_Loss(self, log_prediction_d, mask, log_gt):\n N = torch.sum(mask)\n log_d_diff = log_prediction_d - log_gt\n log_d_diff = torch.mul(log_d_diff, mask)\n s1 = torch.sum( torch.pow(log_d_diff,2) )/N \n\n s2 = torch.pow(torch.sum(log_d_diff),2)/(N*N) \n data_loss = s1 - s2\n\n data_loss = torch.sqrt(data_loss)\n\n return data_loss\n\n def evaluate_RMSE(self, input_images, prediction_d, targets):\n count = 0 \n total_loss = Variable(torch.cuda.FloatTensor(1))\n total_loss[0] = 0\n mask_0 = Variable(targets['mask_0'].cuda(), requires_grad = False)\n d_gt_0 = torch.log(Variable(targets['gt_0'].cuda(), requires_grad = False))\n\n for i in range(0, mask_0.size(0)):\n \n total_loss += self.rmse_Loss(prediction_d[i,:,:], mask_0[i,:,:], d_gt_0[i,:,:])\n count += 1\n\n return total_loss.data[0], count\n\n\n def evaluate_sc_inv(self, input_, targets):\n input_images = Variable(input_.cuda() )\n prediction_d = self.netG.forward(input_images) \n rmse_loss , count= self.evaluate_RMSE(input_images, prediction_d, targets)\n\n return rmse_loss, count\n\n\n def switch_to_train(self):\n self.netG.train()\n\n def switch_to_eval(self):\n self.netG.eval()\n\n" ]
[ [ "torch.mul", "torch.sqrt", "torch.pow", "torch.cuda.FloatTensor", "torch.div", "torch.nn.parallel.DataParallel", "torch.exp", "torch.sum" ] ]
sharp-rmf/fleet_driver_mir
[ "ca4ee124510a33f8da44b112cf825b7c8b3e8315" ]
[ "fleet_driver_mir/jsontocsv.py" ]
[ "import json\nimport sys\nimport pandas as pd\n\nif len(sys.argv)!=2:\n print(f'Please supply one json file')\n exit()\ntry:\n f=pd.read_json(sys.argv[1])\n new_file=f'{sys.argv[1]}'[:-4]+'csv'\n print(f'saving to {new_file}')\n f.to_csv(f'{sys.argv[1]}'[:-4]+'csv')\nexcept:\n print(f'Could not open file')\n\n \n\n\n " ]
[ [ "pandas.read_json" ] ]
deebuls/pytorch-classification-uncertainty
[ "06fb2f6a2ceeabb06b17a0767078699ae61dbdaa" ]
[ "losses.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom helpers import get_device\n\n\ndef relu_evidence(y):\n return F.relu(y)\n\n\ndef exp_evidence(y):\n return torch.exp(torch.clamp(y, -10, 10))\n\n\ndef softplus_evidence(y):\n return F.softplus(y)\n\n\ndef kl_divergence(alpha, num_classes, device=None):\n if not device:\n device = get_device()\n beta = torch.ones([1, num_classes], dtype=torch.float32, device=device)\n S_alpha = torch.sum(alpha, dim=1, keepdim=True)\n S_beta = torch.sum(beta, dim=1, keepdim=True)\n lnB = torch.lgamma(S_alpha) - \\\n torch.sum(torch.lgamma(alpha), dim=1, keepdim=True)\n lnB_uni = torch.sum(torch.lgamma(beta), dim=1,\n keepdim=True) - torch.lgamma(S_beta)\n\n dg0 = torch.digamma(S_alpha)\n dg1 = torch.digamma(alpha)\n\n kl = torch.sum((alpha - beta) * (dg1 - dg0), dim=1,\n keepdim=True) + lnB + lnB_uni\n return kl\n\n\ndef loglikelihood_loss(y, alpha, device=None):\n if not device:\n device = get_device()\n y = y.to(device)\n alpha = alpha.to(device)\n S = torch.sum(alpha, dim=1, keepdim=True)\n loglikelihood_err = torch.sum(\n (y - (alpha / S)) ** 2, dim=1, keepdim=True)\n loglikelihood_var = torch.sum(\n alpha * (S - alpha) / (S * S * (S + 1)), dim=1, keepdim=True)\n loglikelihood = loglikelihood_err + loglikelihood_var\n return loglikelihood\n\n\ndef mse_loss(y, alpha, epoch_num, num_classes, annealing_step, device=None):\n if not device:\n device = get_device()\n y = y.to(device)\n alpha = alpha.to(device)\n loglikelihood = loglikelihood_loss(y, alpha, device=device)\n\n annealing_coef = torch.min(torch.tensor(\n 1.0, dtype=torch.float32), torch.tensor(epoch_num / annealing_step, dtype=torch.float32))\n\n kl_alpha = (alpha - 1) * (1 - y) + 1\n kl_div = annealing_coef * \\\n kl_divergence(kl_alpha, num_classes, device=device)\n return loglikelihood + kl_div\n\n\ndef edl_loss(func, y, alpha, epoch_num, num_classes, annealing_step, device=None):\n y = y.to(device)\n alpha = alpha.to(device)\n S = torch.sum(alpha, dim=1, keepdim=True)\n\n A = torch.sum(y * (func(S) - func(alpha)), dim=1, keepdim=True)\n\n annealing_coef = torch.min(torch.tensor(\n 1.0, dtype=torch.float32), torch.tensor(epoch_num / annealing_step, dtype=torch.float32))\n\n kl_alpha = (alpha - 1) * (1 - y) + 1\n kl_div = annealing_coef * \\\n kl_divergence(kl_alpha, num_classes, device=device)\n return A + kl_div\n\n\ndef edl_mse_loss(output, target, epoch_num, num_classes, annealing_step, device=None):\n if not device:\n device = get_device()\n evidence = relu_evidence(output)\n alpha = evidence + 1\n loss = torch.mean(mse_loss(target, alpha, epoch_num,\n num_classes, annealing_step, device=device))\n return loss\n\n\ndef edl_log_loss(output, target, epoch_num, num_classes, annealing_step, device=None):\n if not device:\n device = get_device()\n evidence = relu_evidence(output)\n alpha = evidence + 1\n loss = torch.mean(edl_loss(torch.log, target, alpha,\n epoch_num, num_classes, annealing_step, device))\n return loss\n\n\ndef edl_digamma_loss(output, target, epoch_num, num_classes, annealing_step, device=None):\n if not device:\n device = get_device()\n evidence = relu_evidence(output)\n alpha = evidence + 1\n loss = torch.mean(edl_loss(torch.digamma, target, alpha,\n epoch_num, num_classes, annealing_step, device))\n return loss\n" ]
[ [ "torch.nn.functional.softplus", "torch.clamp", "torch.ones", "torch.digamma", "torch.tensor", "torch.nn.functional.relu", "torch.lgamma", "torch.sum" ] ]
celine-alameda/HOI_toolbox
[ "6cc06c6d0f81746d3fcb92005da6d819faa9b16b" ]
[ "toolbox/hoi_toolbox.py" ]
[ "\"\"\"Entry point for the HOI_Toolbox. Fir\"\"\"\nimport numpy as np\nimport pandas as pd\nimport scipy.io\nimport time\nfrom toolbox.higher_order_information.Oinfo import OInfoCalculator\nfrom toolbox.higher_order_information.dOinfo import DOInfoCalculator\nfrom toolbox.higher_order_information.local_o_info import LocalOHOI\nfrom toolbox.states_probabilities import StatesProbabilities\nfrom toolbox.utils import save_obj, load_obj\n\n\nclass HOIToolbox:\n\n def __init__(self, config):\n \"\"\"Create a new HOI Toolbox object, using a configuration file\"\"\"\n self.config = config\n if \"metric\" not in config:\n print(\"ERROR : no metric specified\")\n exit(1)\n if \"input\" not in config:\n print(\"Please provide input data location in config\")\n exit(1)\n if \"input_type\" not in config:\n self.config[\"input_type\"] = \"tsv\"\n\n def run(self):\n input_file = \"data/\" + self.config[\"input\"]\n if self.config[\"input_type\"] == \"tsv\":\n # df = pd.read_csv(\"data/timeseries.tsv.gz\", compression='gzip', delimiter='\\t')\n # df = df.loc[:, (df != 0.0).any(axis=0)]\n # df.to_csv('data/cleaned_timeseries.tsv', sep='\\t',index=False)\n ts = np.genfromtxt(input_file, delimiter='\\t', )\n self.ts = ts[1:, :].T\n # print(ts.shape)\n elif self.config[\"input_type\"] == \"mat\":\n ts = scipy.io.loadmat(input_file)\n ts = np.array(ts['ts'])\n self.ts = ts.T\n # print(ts.shape)\n else:\n print(\"Unknown input type\")\n exit(1)\n output_file = self.config[\"metric\"] + \"_\" + self.config[\"input\"].split(\".\")[0]\n\n if self.config[\"metric\"] == \"Oinfo\":\n t = time.time()\n o_info_calculator = OInfoCalculator(self.config)\n Odict = o_info_calculator.run(self.ts, self.config)\n elapsed = time.time() - t\n print(\"Elapsed time is \", elapsed, \" seconds.\")\n save_name = self.config[\"input\"].split(\".\")[0] + \"_O\"\n print(\"Saving and trying to load again\")\n save_obj(Odict, save_name)\n Odict_Oinfo = load_obj('Odict_Oinfo')\n print(\"Done.\")\n\n elif self.config[\"metric\"] == \"dOinfo\":\n t = time.time()\n d_o_info_calculator = DOInfoCalculator(self.config)\n dOdict = d_o_info_calculator.run(self.ts, self.config)\n elapsed = time.time() - t\n print(\"Elapsed time is \", elapsed, \" seconds.\")\n save_name = self.config[\"input\"].split(\".\")[0] + \"_dO\"\n save_obj(dOdict, save_name)\n print(\"done.\")\n\n elif self.config[\"metric\"] == \"local_o\":\n t = time.time()\n ts = pd.read_csv(\"data/\"+self.config[\"input\"], sep='\\t')\n #ts = pd.DataFrame(self.ts.transpose())\n if \"workers\" in self.config:\n n_workers = self.config[\"workers\"]\n else:\n n_workers = 8\n local_o_hoi = LocalOHOI(probability_estimator=StatesProbabilities(ts), n_workers=n_workers)\n local_o = local_o_hoi.exhaustive_local_o(ts)\n elapsed = time.time() - t\n print(\"Elapsed time is \", elapsed, \" seconds.\")\n print(\"Saving \" + output_file + \" and trying to load again\")\n save_obj(local_o, output_file)\n local_o = load_obj(output_file)\n print(\"Done.\")\n else:\n print(\"ERROR : Unknown metric\")\n exit(1)\n" ]
[ [ "numpy.array", "numpy.genfromtxt", "pandas.read_csv" ] ]
BigBugX/TensorFlow_MobileNetV2_PortraitMatting
[ "2f299900fd50bb32806cd05a725f42e6cc0cd91d" ]
[ "portrait_plus.py" ]
[ "__author__ = 'Will@PCVG'\n# An implementation based on \"TF_PetroWU\"\n\nimport numpy as np\nimport scipy.io as sio\nimport os\nfrom PIL import Image\nimport math\nfrom scipy import misc\n\nclass BatchDatset:\n imgs = []\n max_batch = 0\n batch_size = 1\n cur_imgs = []\n cur_labels = []\n cur_batch = 0 # index of batch generated\n cur_ind = 0 # index of current image in imgs\n img_width = 600\n img_height = 800\n\n def __init__(self, imgs_path, batch_size=1):\n self.imgs = sio.loadmat(imgs_path)['trainlist'][0]\n #self.labels = sio.loadmat(labels_path)['test_list'][0]\n self.batch_size = batch_size\n #self.max_batch = len(self.imgs) * 9 / batch_size\n self.cur_imgs, self.cur_labels = self.get_variations(self.imgs[0])\n\n def next_batch(self):\n while len(self.cur_imgs) < self.batch_size: # if not enough, get the next image\n self.cur_ind += 1\n #print('appending', self.cur_ind)\n if self.cur_ind >= len(self.imgs):\n #print('leaving', self.cur_ind)\n break\n cur_name = self.imgs[self.cur_ind]\n tmp_imgs, tmp_labels = self.get_variations(cur_name)\n self.cur_imgs += tmp_imgs\n self.cur_labels += tmp_labels\n if len(self.cur_imgs) >= self.batch_size:\n #print('getting', self.cur_ind)\n rimat = np.zeros((self.batch_size, self.img_height, self.img_width, 6), dtype=np.float)\n ramat = np.zeros((self.batch_size, self.img_height, self.img_width, 1), dtype=np.int)\n self.cur_batch += 1 # output a new batch\n for i in range(self.batch_size):\n rimat[i] = self.cur_imgs.pop(0)\n ramat[i, :, :, 0] = self.cur_labels.pop(0)\n #print('batch:', self.cur_batch, 'at img:', self.imgs[self.cur_ind], 'generate image shape', rimat.shape, 'and label shape', ramat.shape)\n return rimat, ramat\n return [], []\n\n def rotateNormalizedCord(self, matx, maty, angle):\n h, w = matx.shape\n x_avg = np.mean(matx)\n x_min = np.min(matx)\n y_avg = np.mean(maty)\n y_min = np.min(maty)\n xmat = np.zeros((h, w), dtype=np.float)\n ymat = np.zeros((h, w), dtype=np.float)\n for k in range(h):\n for j in range(w):\n cor_y = k - h / 2\n cor_x = j - w / 2\n if cor_x == 0 and cor_y == 0:\n xmat[k][j] = x_avg\n ymat[k][j] = y_avg\n else:\n x_dis = math.cos(math.pi / 2 - angle) * (-math.tan(math.pi / 2 - angle) * cor_x + cor_y)\n xmat[k][j] = x_avg - (x_avg - x_min) * x_dis * 2 / w\n y_dis = math.cos(angle) * (math.tan(angle) * cor_x + cor_y)\n ymat[k][j] = y_avg + (y_avg - y_min) * y_dis * 2 / h\n return xmat, ymat\n\n def scaleNormalizedCord(self, matx, maty, scale):\n h, w = matx.shape\n x_avg = np.mean(matx)\n x_max = np.max(matx)\n y_avg = np.mean(maty)\n y_max = np.max(maty)\n xmat = np.zeros((h, w), dtype=np.float)\n ymat = np.zeros((h, w), dtype=np.float)\n for k in range(h):\n for j in range(w):\n cor_y = k - h / 2\n cor_x = j - w / 2\n xmat[k][j] = x_avg + (x_max - x_avg) * cor_x / scale\n ymat[k][j] = y_avg + (y_max - y_avg) * cor_y / scale\n return xmat, ymat\n\n def get_variations(self, img_name):\n imgs = []\n labels = []\n stp = str(img_name)\n if img_name < 10:\n stp = '0000' + stp\n elif img_name < 100:\n stp = '000' + stp\n elif img_name < 1000:\n stp = '00' + stp\n else:\n stp = '0' + stp\n img_path = 'data/portraitFCN+_data/' + stp + '.mat'\n alpha_path = 'data/images_mask/' + stp + '_mask.mat'\n if os.path.exists(img_path) and os.path.exists(alpha_path):\n imat = sio.loadmat(img_path)['img']\n amat = sio.loadmat(alpha_path)['mask']\n nimat = np.array(imat, dtype=np.float)\n namat = np.array(amat, dtype=np.int)\n imgs.append(nimat)\n labels.append(namat)\n\n angs = [-45, -22, 22, 45]\n gammas = [0.5, 0.8, 1.2, 1.5]\n scales = [0.6, 0.8, 1.2, 1.5]\n h, w, _ = nimat.shape\n org_mat = np.zeros((h, w, 3), dtype=np.int)\n app_mat = np.zeros((h, w, 3), dtype=np.int)\n min3 = np.min(nimat[:, :, 3])\n min4 = np.min(nimat[:, :, 4])\n min5 = np.min(nimat[:, :, 5])\n ran3 = np.max(nimat[:, :, 3]) - min3\n ran4 = np.max(nimat[:, :, 4]) - min4\n ran5 = np.max(nimat[:, :, 5]) - min5\n org_mat[:, :, 0] = np.round(nimat[:, :, 2] * 255 + 122.675)\n org_mat[:, :, 1] = np.round(nimat[:, :, 1] * 255 + 116.669)\n org_mat[:, :, 2] = np.round(nimat[:, :, 0] * 255 + 104.008)\n if ran3 != 0:\n app_mat[:, :, 0] = np.round((nimat[:, :, 3] - min3) * 255 / ran3)\n else:\n app_mat[:, :, 0] = min3\n if ran4 != 0:\n app_mat[:, :, 1] = np.round((nimat[:, :, 4] - min4) * 255 / ran4)\n else:\n app_mat[:, :, 0] = min4\n if ran5 != 0:\n app_mat[:, :, 2] = np.round((nimat[:, :, 5] - min5) * 255 / ran5)\n else:\n app_mat[:, :, 0] = min5\n i_img = Image.fromarray(np.uint8(org_mat))\n p_img = Image.fromarray(np.uint8(app_mat))\n a_img = Image.fromarray(np.uint8(amat))\n for i in range(4):\n # rotation\n tmpi_img = i_img.rotate(angs[i])\n tmpp_img = p_img.rotate(angs[i])\n tmpa_img = a_img.rotate(angs[i])\n tmpri_img = np.array(tmpi_img, dtype=np.int)\n tmprp_img = np.array(tmpp_img, dtype=np.int)\n rot_p1, rot_p2 = self.rotateNormalizedCord(nimat[:, :, 3], nimat[:, :, 4], angs[i] * math.pi / 180)\n rimat = np.zeros((h, w, 6), dtype=np.float)\n rimat[:, :, 0] = (tmpri_img[:, :, 2] - 104.008) / 255\n rimat[:, :, 1] = (tmpri_img[:, :, 1] - 116.669) / 255\n rimat[:, :, 2] = (tmpri_img[:, :, 0] - 122.675) / 255\n rimat[:, :, 3] = rot_p1\n rimat[:, :, 4] = rot_p2\n rimat[:, :, 5] = tmprp_img[:, :, 2] * ran5 / 255 + min5\n imgs.append(rimat)\n labels.append(np.array(tmpa_img, dtype=np.int))\n # gamma transformation\n tmp_nimat = np.array(imat, dtype=np.float)\n tmp_nimat[:, :, 0] = tmp_nimat[:, :, 0] + 104.008 / 255\n tmp_nimat[:, :, 0] = (pow(tmp_nimat[:, :, 0], gammas[i]) - pow(104.008 / 255, gammas[i]))\n tmp_nimat[:, :, 1] = tmp_nimat[:, :, 1] + 116.669 / 255\n tmp_nimat[:, :, 1] = (pow(tmp_nimat[:, :, 1], gammas[i]) - pow(116.669 / 255, gammas[i]))\n tmp_nimat[:, :, 2] = tmp_nimat[:, :, 2] + 122.675 / 255\n tmp_nimat[:, :, 2] = (pow(tmp_nimat[:, :, 2], gammas[i]) - pow(122.675 / 255, gammas[i]))\n imgs.append(tmp_nimat)\n labels.append(namat)\n # scale transformation\n if scales[i] > 1.0:\n resize_box = (round(scales[i] * w), round(scales[i] * h))\n si_img = i_img.resize(resize_box, Image.ANTIALIAS)\n sp_img = p_img.resize(resize_box, Image.ANTIALIAS)\n sa_img = a_img.resize(resize_box, Image.ANTIALIAS)\n crop_up, crop_down = (scales[i] - 1) / 2, (scales[i] + 1) / 2\n crop_box = (round(crop_up * w), round(crop_up * h), round(crop_down * w), round(crop_down * h))\n ci_img = si_img.crop(crop_box)\n cp_img = sp_img.crop(crop_box)\n ca_img = sa_img.crop(crop_box)\n tmpsi_img = np.array(ci_img, dtype=np.int)\n tmpsp_img = np.array(cp_img, dtype=np.int)\n tmpsa_img = np.array(ca_img, dtype=np.int)\n simat = np.zeros(imat.shape, dtype=np.float)\n simat[:, :, 0] = (tmpsi_img[:, :, 2] - 104.008) / 255\n simat[:, :, 1] = (tmpsi_img[:, :, 1] - 116.669) / 255\n simat[:, :, 2] = (tmpsi_img[:, :, 0] - 122.675) / 255\n xmat, ymat = self.scaleNormalizedCord(nimat[:, :, 3], nimat[:, :, 4], scales[i] * 300)\n simat[:, :, 3] = xmat\n simat[:, :, 4] = ymat\n simat[:, :, 5] = tmpsp_img[:, :, 2] * ran5 / 255 + min5\n imgs.append(simat)\n labels.append(tmpsa_img)\n else:\n resize_box = (round(scales[i] * w), round(scales[i] * h))\n si_img = i_img.resize(resize_box, Image.ANTIALIAS)\n sp_img = p_img.resize(resize_box, Image.ANTIALIAS)\n sa_img = a_img.resize(resize_box, Image.ANTIALIAS)\n tmpsi_img = np.array(si_img, dtype=np.int)\n tmpsp_img = np.array(sp_img, dtype=np.int)\n tmpsa_img = np.array(sa_img, dtype=np.int)\n simat = np.zeros(imat.shape, dtype=np.float)\n samat = np.zeros(amat.shape, dtype=np.int)\n crop_up, crop_down = (1 - scales[i]) / 2, (1 + scales[i]) / 2\n simat[round(crop_up * h):round(crop_down * h), round(crop_up * w):round(crop_down * w), 0] = (tmpsi_img[:, :, 2] - 104.008) / 255\n simat[round(crop_up * h):round(crop_down * h), round(crop_up * w):round(crop_down * w), 1] = (tmpsi_img[:, :, 1] - 116.669) / 255\n simat[round(crop_up * h):round(crop_down * h), round(crop_up * w):round(crop_down * w), 2] = (tmpsi_img[:, :, 0] - 122.675) / 255\n simat[round(crop_up * h):round(crop_down * h), round(crop_up * w):round(crop_down * w), 5] = tmpsp_img[:, :, 2] * ran5 / 255 + min5\n samat[round(crop_up * h):round(crop_down * h), round(crop_up * w):round(crop_down * w)] = tmpsa_img\n xmat, ymat = self.scaleNormalizedCord(nimat[:, :, 3], nimat[:, :, 4], scales[i] * 300)\n simat[:, :, 3] = xmat\n simat[:, :, 4] = ymat\n imgs.append(simat)\n labels.append(samat)\n return imgs, labels\n\n\nclass TestDataset:\n imgs = []\n max_batch = 0\n batch_size = 0\n cur_batch = 0 # index of batch generated\n cur_ind = -1 # index of current image in imgs\n img_width = 600\n img_height = 800\n\n def __init__(self, imgs_path, batch_size=1):\n self.imgs = sio.loadmat(imgs_path)['testlist'][0]\n #self.labels = sio.loadmat(labels_path)['test_list'][0]\n self.batch_size = batch_size\n #self.max_batch = len(self.imgs) * 9 / batch_size\n #self.cur_imgs, self.cur_labels = self.get_images(self.imgs[0])\n\n def next_batch(self):\n cur_imgs = []\n cur_labels = []\n cur_orgs = []\n while len(cur_imgs) < self.batch_size: # if not enough, get the next image\n self.cur_ind += 1\n #print('appending', self.cur_ind)\n if self.cur_ind >= len(self.imgs):\n #print('leaving', self.cur_ind)\n break\n cur_name = self.imgs[self.cur_ind]\n tmp_img, tmp_label, tmp_org = self.get_images(cur_name)\n if tmp_img is not None:\n cur_imgs.append(tmp_img)\n cur_labels.append(tmp_label)\n cur_orgs.append(tmp_org)\n if len(cur_imgs) == self.batch_size:\n #print('getting', self.cur_ind)\n rimat = np.zeros((self.batch_size, self.img_height, self.img_width, 6), dtype=np.float)\n org_mat = np.zeros((self.batch_size, self.img_height, self.img_width, 3), dtype=np.int)\n ramat = np.zeros((self.batch_size, self.img_height, self.img_width, 1), dtype=np.int)\n self.cur_batch += 1 # output a new batch\n for i in range(self.batch_size):\n rimat[i] = cur_imgs.pop(0)\n org_mat[i] = cur_orgs.pop(0)\n ramat[i, :, :, 0] = cur_labels.pop(0)\n #print('getting', ramat[0, 200:210, 200:220])\n #print('batch:', self.cur_batch, 'at img:', self.imgs[self.cur_ind], 'generate image shape', rimat.shape, 'and label shape', ramat.shape)\n return rimat, ramat, org_mat\n return [], [], []\n\n def get_images(self, img_name):\n stp = str(img_name)\n if img_name < 10:\n stp = '0000' + stp\n elif img_name < 100:\n stp = '000' + stp\n elif img_name < 1000:\n stp = '00' + stp\n else:\n stp = '0' + stp\n img_path = 'data/portraitFCN+_data/' + stp + '.mat'\n alpha_path = 'data/images_mask/' + stp + '_mask.mat'\n if os.path.exists(img_path) and os.path.exists(alpha_path):\n imat = sio.loadmat(img_path)['img']\n amat = sio.loadmat(alpha_path)['mask']\n nimat = np.array(imat, dtype=np.float)\n namat = np.array(amat, dtype=np.int)\n h, w, _ = nimat.shape\n org_mat = np.zeros((h, w, 3), dtype=np.int)\n for i in range(h):\n for j in range(w):\n org_mat[i][j][0] = round(nimat[i][j][2] * 255 + 122.675)\n org_mat[i][j][1] = round(nimat[i][j][1] * 255 + 116.669)\n org_mat[i][j][2] = round(nimat[i][j][0] * 255 + 104.008)\n return nimat, namat, org_mat\n return None, None, None\n\n\nif __name__ == '__main__':\n dat = BatchDatset('data/trainlist.mat', batch_size=13)\n rimat, ramat = dat.next_batch()\n for i in range(13):\n imat = rimat[i]\n amat = ramat[i]\n rgb = np.zeros((imat.shape[0], imat.shape[1], 3), dtype=np.int)\n rgb[:, :, 0] = np.round(imat[:, :, 2] * 255 + 122.675)\n rgb[:, :, 1] = np.round(imat[:, :, 1] * 255 + 116.669)\n rgb[:, :, 2] = np.round(imat[:, :, 0] * 255 + 104.008)\n misc.imsave('res/org' + str(i) + '.jpg', rgb)\n xxc = imat[:, :, 3]\n xxc = np.round((xxc - np.min(xxc) / (np.max(xxc) - np.min(xxc))) * 255)\n misc.imsave('res/xxc' + str(i) + '.jpg', xxc)\n yyc = imat[:, :, 4]\n yyc = np.round((yyc - np.min(yyc) / (np.max(yyc) - np.min(yyc))) * 255)\n misc.imsave('res/yyc' + str(i) + '.jpg', yyc)\n mean = imat[:, :, 5] * 255\n misc.imsave('res/mean' + str(i) + '.jpg', mean)\n alpha = amat * 255\n alpha = alpha.reshape((alpha.shape[0], alpha.shape[1]))\n misc.imsave('res/alpha' + str(i) + '.jpg', alpha)\n" ]
[ [ "numpy.max", "numpy.array", "numpy.uint8", "numpy.zeros", "numpy.round", "scipy.io.loadmat", "numpy.min", "numpy.mean" ] ]
philip-fu/UniTrack
[ "9bc78d162251195650a4af76ec35cba97ba3fc44" ]
[ "tools/gen_mot16_gt.py" ]
[ "import os.path as osp\nimport os\nimport numpy as np\n\n# Modify here\nmot16_root = 'C:/Users/Philip Fu/datasets/MOT16/images'\nseq_root = osp.join(mot16_root, 'train')\n\nlabel_root = osp.join(mot16_root, 'obs', 'gt', 'train')\nos.makedirs(label_root, exist_ok=True)\nseqs = [s for s in os.listdir(seq_root)]\n\ntid_curr = 0\ntid_last = -1\nfor seq in seqs:\n seq_info = open(osp.join(seq_root, seq, 'seqinfo.ini')).read()\n seq_width = int(seq_info[seq_info.find('imWidth=') + 8:seq_info.find('\\nimHeight')])\n seq_height = int(seq_info[seq_info.find('imHeight=') + 9:seq_info.find('\\nimExt')])\n\n gt_txt = osp.join(seq_root, seq, 'gt', 'gt.txt')\n gt = np.loadtxt(gt_txt, dtype=np.float64, delimiter=',')\n\n seq_label_root = osp.join(label_root, seq, 'img1')\n os.makedirs(seq_label_root, exist_ok=True)\n\n for fid, tid, x, y, w, h, mark, label, _ in gt:\n if mark == 0 or not label == 1:\n continue\n fid = int(fid)\n tid = int(tid)\n if not tid == tid_last:\n tid_curr += 1\n tid_last = tid\n x += w / 2\n y += h / 2\n label_fpath = osp.join(seq_label_root, '{:06d}.txt'.format(fid))\n label_str = '{:.6f} {:.6f} {:.6f} {:.6f} 1.0\\n'.format(\n x / seq_width, y / seq_height, w / seq_width, h / seq_height)\n with open(label_fpath, 'a') as f:\n f.write(label_str)\n" ]
[ [ "numpy.loadtxt" ] ]
TiagoTostas/NeuroKit
[ "664350463fc1c03eb81f0bba37296762be7c81ae", "664350463fc1c03eb81f0bba37296762be7c81ae" ]
[ "neurokit2/ecg/ecg_fixpeaks.py", "neurokit2/complexity/embedding.py" ]
[ "# - * - coding: utf-8 - * -\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.patches\n\n\ndef ecg_fixpeaks(rpeaks, sampling_rate=1000, iterative=True, show=False):\n \"\"\"Correct R-peaks location based on their interval (RRi).\n\n Identify erroneous inter-beat-intervals. Lipponen & Tarvainen (2019).\n\n Parameters\n ----------\n rpeaks : dict\n The samples at which the R-peak occur. Dict returned by\n `ecg_findpeaks()`.\n sampling_rate : int\n The sampling frequency of the signal that contains the peaks (in Hz,\n i.e., samples/second).\n iterative : bool\n Whether or not to apply the artifact correction repeatedly (results\n in superior artifact correction).\n show : bool\n Whether or not to visualize artifacts and artifact thresholds.\n\n Returns\n -------\n artifacts : dict\n A dictionary containing the indices of artifacts, accessible with the\n keys \"ectopic\", \"missed\", \"extra\", and \"longshort\".\n\n See Also\n --------\n ecg_clean, ecg_findpeaks, ecg_peaks, ecg_rate, ecg_process, ecg_plot\n\n Examples\n --------\n >>> import neurokit2 as nk\n >>> import matplotlib.pyplot as plt\n\n >>> ecg = nk.ecg_simulate(duration=240, noise=0.1, heart_rate=70,\n >>> random_state=41)\n >>> rpeaks_uncorrected = nk.ecg_findpeaks(ecg)\n >>> artifacts, rpeaks_corrected = nk.ecg_fixpeaks(rpeaks_uncorrected,\n >>> iterative=True,\n >>> show=True)\n >>> rate_corrected = nk.ecg_rate(rpeaks_uncorrected,\n >>> desired_length=len(ecg))\n >>> rate_uncorrected = nk.ecg_rate(rpeaks, desired_length=len(ecg_signal))\n >>>\n >>> fig, ax = plt.subplots()\n >>> ax.plot(rate_uncorrected, label=\"heart rate without artifact correction\")\n >>> ax.plot(rate_corrected, label=\"heart rate with artifact correction\")\n >>> ax.legend(loc=\"upper right\")\n\n References\n ----------\n - Lipponen, J. A., & Tarvainen, M. P. (2019). A robust algorithm for heart\n rate variability time series artefact correction using novel beat\n classification. Journal of medical engineering & technology, 43(3),\n 173-181. 10.1080/03091902.2019.1640306\n\n \"\"\"\n # Format input.\n rpeaks = rpeaks[\"ECG_R_Peaks\"]\n # Get corrected peaks and normal-to-normal intervals.\n artifacts, subspaces = _find_artifacts(rpeaks, sampling_rate=sampling_rate)\n peaks_clean = _correct_artifacts(artifacts, rpeaks)\n\n if iterative:\n\n # Iteratively apply the artifact correction until the number of artifact\n # reaches an equilibrium (i.e., the number of artifacts does not change\n # anymore from one iteration to the next).\n n_artifacts_previous = np.inf\n n_artifacts_current = sum([len(i) for i in artifacts.values()])\n\n previous_diff = 0\n\n while n_artifacts_current - n_artifacts_previous != previous_diff:\n\n previous_diff = n_artifacts_previous - n_artifacts_current\n\n artifacts, subspaces = _find_artifacts(peaks_clean,\n sampling_rate=sampling_rate)\n peaks_clean = _correct_artifacts(artifacts, peaks_clean)\n\n n_artifacts_previous = n_artifacts_current\n n_artifacts_current = sum([len(i) for i in artifacts.values()])\n\n if show:\n _plot_artifacts_lipponen2019(artifacts, subspaces)\n\n return artifacts, {\"ECG_R_Peaks\": peaks_clean}\n\n\n# =============================================================================\n# Lipponen & Tarvainen (2019).\n# =============================================================================\ndef _find_artifacts(rpeaks, c1=0.13, c2=0.17, alpha=5.2, window_width=91,\n medfilt_order=11, sampling_rate=1000):\n\n peaks = np.ravel(rpeaks)\n\n # Compute period series (make sure it has same numer of elements as peaks);\n # peaks are in samples, convert to seconds.\n rr = np.ediff1d(peaks, to_begin=0) / sampling_rate\n # For subsequent analysis it is important that the first element has\n # a value in a realistic range (e.g., for median filtering).\n rr[0] = np.mean(rr[1:])\n\n # Artifact identification #################################################\n ###########################################################################\n\n # Compute dRRs: time series of differences of consecutive periods (dRRs).\n drrs = np.ediff1d(rr, to_begin=0)\n drrs[0] = np.mean(drrs[1:])\n # Normalize by threshold.\n th1 = _compute_threshold(drrs, alpha, window_width)\n drrs /= th1\n\n # Cast dRRs to subspace s12.\n # Pad drrs with one element.\n padding = 2\n drrs_pad = np.pad(drrs, padding, \"reflect\")\n\n s12 = np.zeros(drrs.size)\n for d in np.arange(padding, padding + drrs.size):\n\n if drrs_pad[d] > 0:\n s12[d - padding] = np.max([drrs_pad[d - 1], drrs_pad[d + 1]])\n elif drrs_pad[d] < 0:\n s12[d - padding] = np.min([drrs_pad[d - 1], drrs_pad[d + 1]])\n\n # Cast dRRs to subspace s22.\n s22 = np.zeros(drrs.size)\n for d in np.arange(padding, padding + drrs.size):\n\n if drrs_pad[d] >= 0:\n s22[d - padding] = np.min([drrs_pad[d + 1], drrs_pad[d + 2]])\n elif drrs_pad[d] < 0:\n s22[d - padding] = np.max([drrs_pad[d + 1], drrs_pad[d + 2]])\n\n # Compute mRRs: time series of deviation of RRs from median.\n df = pd.DataFrame({'signal': rr})\n medrr = df.rolling(medfilt_order, center=True,\n min_periods=1).median().signal.to_numpy()\n mrrs = rr - medrr\n mrrs[mrrs < 0] = mrrs[mrrs < 0] * 2\n # Normalize by threshold.\n th2 = _compute_threshold(mrrs, alpha, window_width)\n mrrs /= th2\n\n # Artifact classification #################################################\n ###########################################################################\n\n # Artifact classes.\n extra_idcs = []\n missed_idcs = []\n ectopic_idcs = []\n longshort_idcs = []\n\n i = 0\n while i < rr.size - 2: # The flow control is implemented based on Figure 1\n\n if np.abs(drrs[i]) <= 1: # Figure 1\n i += 1\n continue\n eq1 = np.logical_and(drrs[i] > 1, s12[i] < (-c1 * drrs[i] - c2)) # Figure 2a\n eq2 = np.logical_and(drrs[i] < -1, s12[i] > (-c1 * drrs[i] + c2)) # Figure 2a\n\n if np.any([eq1, eq2]):\n # If any of the two equations is true.\n ectopic_idcs.append(i)\n i += 1\n continue\n # If none of the two equations is true.\n if ~np.any([np.abs(drrs[i]) > 1, np.abs(mrrs[i]) > 3]): # Figure 1\n i += 1\n continue\n longshort_candidates = [i]\n # Check if the following beat also needs to be evaluated.\n if np.abs(drrs[i + 1]) < np.abs(drrs[i + 2]):\n longshort_candidates.append(i + 1)\n\n for j in longshort_candidates:\n # Long beat.\n eq3 = np.logical_and(drrs[j] > 1, s22[j] < -1) # Figure 2b\n # Long or short.\n eq4 = np.abs(mrrs[j]) > 3 # Figure 1\n # Short beat.\n eq5 = np.logical_and(drrs[j] < -1, s22[j] > 1) # Figure 2b\n\n if ~np.any([eq3, eq4, eq5]):\n # If none of the three equations is true: normal beat.\n i += 1\n continue\n # If any of the three equations is true: check for missing or extra\n # peaks.\n\n # Missing.\n eq6 = np.abs(rr[j] / 2 - medrr[j]) < th2[j] # Figure 1\n # Extra.\n eq7 = np.abs(rr[j] + rr[j + 1] - medrr[j]) < th2[j] # Figure 1\n\n # Check if extra.\n if np.all([eq5, eq7]):\n extra_idcs.append(j)\n i += 1\n continue\n # Check if missing.\n if np.all([eq3, eq6]):\n missed_idcs.append(j)\n i += 1\n continue\n # If neither classified as extra or missing, classify as \"long or\n # short\".\n longshort_idcs.append(j)\n i += 1\n\n # Prepare output\n artifacts = {\"ectopic\": ectopic_idcs, \"missed\": missed_idcs,\n \"extra\": extra_idcs, \"longshort\": longshort_idcs}\n\n subspaces = {\"rr\": rr, \"drrs\": drrs, \"mrrs\": mrrs, \"s12\": s12, \"s22\": s22,\n \"c1\": c1, \"c2\": c2}\n\n return artifacts, subspaces\n\n\ndef _compute_threshold(signal, alpha, window_width):\n\n df = pd.DataFrame({'signal': np.abs(signal)})\n q1 = df.rolling(window_width, center=True,\n min_periods=1).quantile(.25).signal.to_numpy()\n q3 = df.rolling(window_width, center=True,\n min_periods=1).quantile(.75).signal.to_numpy()\n th = alpha * ((q3 - q1) / 2)\n\n return th\n\n\ndef _correct_artifacts(artifacts, peaks):\n\n # Artifact correction\n #####################\n # The integrity of indices must be maintained if peaks are inserted or\n # deleted: for each deleted beat, decrease indices following that beat in\n # all other index lists by 1. Likewise, for each added beat, increment the\n # indices following that beat in all other lists by 1.\n extra_idcs = artifacts[\"extra\"]\n missed_idcs = artifacts[\"missed\"]\n ectopic_idcs = artifacts[\"ectopic\"]\n longshort_idcs = artifacts[\"longshort\"]\n\n # Delete extra peaks.\n if extra_idcs:\n peaks = _correct_extra(extra_idcs, peaks)\n # Update remaining indices.\n missed_idcs = _update_indices(extra_idcs, missed_idcs, -1)\n ectopic_idcs = _update_indices(extra_idcs, ectopic_idcs, -1)\n longshort_idcs = _update_indices(extra_idcs, longshort_idcs, -1)\n\n # Add missing peaks.\n if missed_idcs:\n peaks = _correct_missed(missed_idcs, peaks)\n # Update remaining indices.\n ectopic_idcs = _update_indices(missed_idcs, ectopic_idcs, 1)\n longshort_idcs = _update_indices(missed_idcs, longshort_idcs, 1)\n\n if ectopic_idcs:\n peaks = _correct_misaligned(ectopic_idcs, peaks)\n\n if longshort_idcs:\n peaks = _correct_misaligned(longshort_idcs, peaks)\n\n return peaks\n\n\ndef _correct_extra(extra_idcs, peaks):\n\n corrected_peaks = peaks.copy()\n corrected_peaks = np.delete(corrected_peaks, extra_idcs)\n\n return corrected_peaks\n\n\ndef _correct_missed(missed_idcs, peaks):\n\n corrected_peaks = peaks.copy()\n missed_idcs = np.array(missed_idcs)\n # Calculate the position(s) of new beat(s). Make sure to not generate\n # negative indices. prev_peaks and next_peaks must have the same\n # number of elements.\n valid_idcs = np.logical_and(missed_idcs > 1,\n missed_idcs < len(corrected_peaks))\n missed_idcs = missed_idcs[valid_idcs]\n prev_peaks = corrected_peaks[[i - 1 for i in missed_idcs]]\n next_peaks = corrected_peaks[missed_idcs]\n added_peaks = prev_peaks + (next_peaks - prev_peaks) / 2\n # Add the new peaks before the missed indices (see numpy docs).\n corrected_peaks = np.insert(corrected_peaks, missed_idcs, added_peaks)\n\n return corrected_peaks\n\n\ndef _correct_misaligned(misaligned_idcs, peaks):\n\n corrected_peaks = peaks.copy()\n misaligned_idcs = np.array(misaligned_idcs)\n # Make sure to not generate negative indices, or indices that exceed\n # the total number of peaks. prev_peaks and next_peaks must have the\n # same number of elements.\n valid_idcs = np.logical_and(misaligned_idcs > 1,\n misaligned_idcs < len(corrected_peaks))\n misaligned_idcs = misaligned_idcs[valid_idcs]\n prev_peaks = corrected_peaks[[i - 1 for i in misaligned_idcs]]\n next_peaks = corrected_peaks[[i + 1 for i in misaligned_idcs]]\n half_ibi = (next_peaks - prev_peaks) / 2\n peaks_interp = prev_peaks + half_ibi\n # Shift the R-peaks from the old to the new position.\n corrected_peaks = np.delete(corrected_peaks, misaligned_idcs)\n corrected_peaks = np.concatenate((corrected_peaks,\n peaks_interp)).astype(int)\n corrected_peaks.sort(kind=\"mergesort\")\n\n return corrected_peaks\n\n\ndef _update_indices(source_idcs, update_idcs, update):\n \"\"\"\n For every element s in source_idcs, change every element u in update_idcs\n according to update, if u is larger than s.\n \"\"\"\n if not update_idcs:\n return update_idcs\n\n for s in source_idcs:\n update_idcs = [u + update if u > s else u for u in update_idcs]\n\n return update_idcs\n\n\ndef _plot_artifacts_lipponen2019(artifacts, info):\n \"\"\"\n \"\"\"\n # Extract parameters\n longshort_idcs = artifacts[\"longshort\"]\n ectopic_idcs = artifacts[\"ectopic\"]\n extra_idcs = artifacts[\"extra\"]\n missed_idcs = artifacts[\"missed\"]\n\n rr = info[\"rr\"]\n drrs = info[\"drrs\"]\n mrrs = info[\"mrrs\"]\n s12 = info[\"s12\"]\n s22 = info[\"s22\"]\n c1 = info[\"c1\"]\n c2 = info[\"c2\"]\n\n # Visualize artifact type indices.\n\n # Set grids\n gs = matplotlib.gridspec.GridSpec(ncols=4, nrows=3,\n width_ratios=[1, 2, 2, 2])\n fig = plt.figure(constrained_layout=False)\n ax0 = fig.add_subplot(gs[0, :-2])\n ax1 = fig.add_subplot(gs[1, :-2])\n ax2 = fig.add_subplot(gs[2, :-2])\n ax3 = fig.add_subplot(gs[:, -1])\n ax4 = fig.add_subplot(gs[:, -2])\n\n ax0.set_title(\"Artifact types\", fontweight=\"bold\")\n ax0.plot(rr, label=\"heart period\")\n ax0.scatter(longshort_idcs, rr[longshort_idcs], marker='x', c='m',\n s=100, zorder=3, label=\"long/short\")\n ax0.scatter(ectopic_idcs, rr[ectopic_idcs], marker='x', c='g', s=100,\n zorder=3, label=\"ectopic\")\n ax0.scatter(extra_idcs, rr[extra_idcs], marker='x', c='y', s=100,\n zorder=3, label=\"false positive\")\n ax0.scatter(missed_idcs, rr[missed_idcs], marker='x', c='r', s=100,\n zorder=3, label=\"false negative\")\n ax0.legend(loc=\"upper right\")\n\n # Visualize first threshold.\n ax1.set_title(\"Consecutive-difference criterion\", fontweight=\"bold\")\n ax1.plot(np.abs(drrs), label=\"difference consecutive heart periods\")\n ax1.axhline(1, c='r', label=\"artifact threshold\")\n ax1.legend(loc=\"upper right\")\n\n # Visualize second threshold.\n ax2.set_title(\"Difference-from-median criterion\", fontweight=\"bold\")\n ax2.plot(np.abs(mrrs), label=\"difference from median over 11 periods\")\n ax2.axhline(3, c=\"r\", label=\"artifact threshold\")\n ax2.legend(loc=\"upper right\")\n\n # Visualize subspaces.\n ax4.set_title(\"Subspace 1\", fontweight=\"bold\")\n ax4.set_xlabel(\"S11\")\n ax4.set_ylabel(\"S12\")\n ax4.scatter(drrs, s12, marker=\"x\", label=\"heart periods\")\n verts0 = [(min(drrs), max(s12)),\n (min(drrs), -c1 * min(drrs) + c2),\n (-1, -c1 * -1 + c2),\n (-1, max(s12))]\n poly0 = matplotlib.patches.Polygon(verts0, alpha=0.3, facecolor=\"r\",\n edgecolor=None, label=\"ectopic periods\")\n ax4.add_patch(poly0)\n verts1 = [(1, -c1 * 1 - c2),\n (1, min(s12)),\n (max(drrs), min(s12)),\n (max(drrs), -c1 * max(drrs) - c2)]\n poly1 = matplotlib.patches.Polygon(verts1, alpha=0.3, facecolor=\"r\",\n edgecolor=None)\n ax4.add_patch(poly1)\n ax4.legend(loc=\"upper right\")\n\n ax3.set_title(\"Subspace 2\", fontweight=\"bold\")\n ax3.set_xlabel(\"S21\")\n ax3.set_ylabel(\"S22\")\n ax3.scatter(drrs, s22, marker=\"x\", label=\"heart periods\")\n verts2 = [(min(drrs), max(s22)),\n (min(drrs), 1),\n (-1, 1),\n (-1, max(s22))]\n poly2 = matplotlib.patches.Polygon(verts2, alpha=0.3, facecolor=\"r\",\n edgecolor=None, label=\"short periods\")\n ax3.add_patch(poly2)\n verts3 = [(1, -1),\n (1, min(s22)),\n (max(drrs), min(s22)),\n (max(drrs), -1)]\n poly3 = matplotlib.patches.Polygon(verts3, alpha=0.3, facecolor=\"y\",\n edgecolor=None, label=\"long periods\")\n ax3.add_patch(poly3)\n ax3.legend(loc=\"upper right\")\n", "# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.animation\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.mplot3d\n\n\ndef embedding(signal, delay=1, dimension=3, show=False):\n \"\"\"Time-delay embedding of a time series (a signal)\n\n A dynamical system can be described by a vector of numbers, called its 'state', that aims to provide a complete description of the system at some point in time. The set of all possible states is called the 'state space'.\n\n Takens's (1981) embedding theorem suggests that a sequence of measurements of a dynamic system includes in itself all the information required to completely reconstruct the state space. Delay coordinate embedding attempts to identify the state s of the system at some time t by searching the past history of observations for similar states, and, by studying the evolution of similar states, infer information about the future of the system.\n\n How to visualize the dynamics of a system? A sequence of state values over time is called a trajectory. Depending on the system, different trajectories can evolve to a common subset of state space called an attractor. The presence and behavior of attractors gives intuition about the underlying dynamical system. We can visualize the system and its attractors by plotting the trajectory of many different initial state values and numerically integrating them to approximate their continuous time evolution on discrete computers.\n\n This function is adapted from `EntroPy <https://github.com/raphaelvallat/entropy>`_ and is equivalent to\n the `delay_embedding()` function from 'nolds'.\n\n Parameters\n ----------\n signal : list, array or Series\n The signal (i.e., a time series) in the form of a vector of values.\n delay : int\n Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003), or to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).\n dimension : int\n Embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order'). Typically 2 or 3. It corresponds to the number of compared runs of lagged data. If 2, the embedding returns an array with two columns corresponding to the original signal and its delayed (by Tau) version.\n show : bool\n Plot the reconstructed attractor.\n\n Returns\n -------\n array\n Embedded time-series, of shape (n_times - (order - 1) * delay, order)\n\n See Also\n ------------\n embedding_delay, embedding_dimension\n\n Examples\n ---------\n >>> import neurokit2 as nk\n >>>\n >>> # Artifical example\n >>> signal = nk.signal_simulate(duration=2, frequency=5, noise=0.01)\n >>>\n >>> embedded = nk.embedding(signal, delay=50, dimension=2, show=True)\n >>> embedded = nk.embedding(signal, delay=50, dimension=3, show=True)\n >>> embedded = nk.embedding(signal, delay=50, dimension=4, show=True)\n >>>\n >>> # Realistic example\n >>> ecg = nk.ecg_simulate(duration=60*4, sampling_rate=200)\n >>> signal = nk.ecg_rate(nk.ecg_peaks(ecg, sampling_rate=200)[0], sampling_rate=200)\n >>>\n >>> embedded = nk.embedding(signal, delay=250, dimension=2, show=True)\n >>> embedded = nk.embedding(signal, delay=250, dimension=3, show=True)\n >>> embedded = nk.embedding(signal, delay=250, dimension=4, show=True)\n\n References\n -----------\n - Gautama, T., Mandic, D. P., & Van Hulle, M. M. (2003, April). A differential entropy based method for determining the optimal embedding parameters of a signal. In 2003 IEEE International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings.(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.\n \"\"\"\n N = len(signal)\n\n # Sanity checks\n if dimension * delay > N:\n raise ValueError(\"NeuroKit error: embedding(): dimension * delay should be lower than length of signal.\")\n if delay < 1:\n raise ValueError(\"NeuroKit error: embedding(): 'delay' has to be at least 1.\")\n\n Y = np.zeros((dimension, N - (dimension - 1) * delay))\n for i in range(dimension):\n Y[i] = signal[i * delay:i * delay + Y.shape[1]]\n embedded = Y.T\n\n if show is True:\n _embedding_plot(embedded)\n\n return embedded\n\n\n# =============================================================================\n# Internals\n# =============================================================================\n\n\ndef _embedding_plot(embedded):\n \"\"\"Plot reconstructed attractor.\n\n The input for this function must be obtained via `nk.embedding()`\n \"\"\"\n if embedded.shape[1] == 2:\n figure = _embedding_plot_2D(embedded)\n elif embedded.shape[1] == 3:\n figure = _embedding_plot_3D(embedded)\n else:\n figure = _embedding_plot_4D(embedded)\n\n return figure\n\n\n# =============================================================================\n# Internal plots\n# =============================================================================\n\ndef _embedding_plot_2D(embedded):\n figure = plt.plot(embedded[:, 0], embedded[:, 1], color='#3F51B5')\n return figure\n\n\ndef _embedding_plot_3D(embedded):\n figure = _plot_3D_colored(x=embedded[:, 0],\n y=embedded[:, 1],\n z=embedded[:, 2],\n color=embedded[:, 2],\n rotate=False)\n return figure\n\ndef _embedding_plot_4D(embedded):\n figure = _plot_3D_colored(x=embedded[:, 0],\n y=embedded[:, 1],\n z=embedded[:, 2],\n color=embedded[:, 3],\n rotate=False)\n return figure\n\n\n\n# =============================================================================\n# Plotting\n# =============================================================================\ndef _plot_3D_colored(x, y, z, color=None, rotate=False):\n if color is None:\n color = z\n\n # Create a set of line segments\n points = np.array([x, y, z]).T.reshape(-1, 1, 3)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n # Color\n norm = plt.Normalize(color.min(), color.max())\n cmap = plt.get_cmap('plasma')\n colors = cmap(norm(color))\n\n # Plot\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n for i in range(len(x) - 1):\n seg = segments[i]\n l, = ax.plot(seg[:, 0], seg[:, 1], seg[:, 2], color=colors[i])\n l.set_solid_capstyle('round')\n\n if rotate is True:\n fig = _plot_3D_colored_rotate(fig, ax)\n\n return fig\n\n\n\ndef _plot_3D_colored_rotate(fig, ax):\n\n def rotate(angle):\n ax.view_init(azim=angle)\n\n fig = matplotlib.animation.FuncAnimation(fig,\n rotate,\n frames=np.arange(0, 361, 1),\n interval=10,\n cache_frame_data=False)\n\n return fig\n" ]
[ [ "numpy.max", "numpy.concatenate", "numpy.pad", "numpy.delete", "numpy.array", "numpy.zeros", "pandas.DataFrame", "numpy.min", "numpy.mean", "matplotlib.pyplot.figure", "numpy.logical_and", "numpy.any", "numpy.ediff1d", "numpy.ravel", "numpy.arange", "numpy.abs", "numpy.all", "numpy.insert" ], [ "numpy.concatenate", "numpy.array", "numpy.zeros", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "numpy.arange" ] ]
mkirby42/financeApp
[ "34759d43899fc3b067a83476a6371f1def5bbd21" ]
[ "financeApp/pages/page_1.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom app import app\nimport datetime\nimport pandas as pd\nfrom pandas_datareader import data as wb\nimport plotly.graph_objects as go\nfrom alpha_vantage.timeseries import TimeSeries\nfrom alpha_vantage.techindicators import TechIndicators\nfrom alpha_vantage.cryptocurrencies import CryptoCurrencies\nkey = 'X6UBMW7UKN8HP76V'\nts = TimeSeries(key, output_format='pandas')\nti = TechIndicators(key, output_format='pandas')\ncc = CryptoCurrencies(key, output_format = 'pandas')\ntoday = datetime.datetime.today()\n\n\nticker = \"VTI\"\nsource = \"av-daily\"\nstart = datetime.datetime(2010, 1, 1)\ntoday = datetime.datetime.today()\n\nticker_data = wb.DataReader(ticker, source, start, end = today, api_key = key)\nticker_index = ticker_data.index\nticker_dataframe = pd.DataFrame(ticker_data, columns=ticker_data.columns)\n\nticker_dataframe.index = ticker_index\n\nfig = go.Figure()\n\nfig.add_trace(go.Candlestick(\n x = ticker_dataframe.index,\n open = ticker_dataframe['open'],\n high = ticker_dataframe['high'],\n low = ticker_dataframe['low'],\n close = ticker_dataframe['close'],\n name = ticker\n))\n\nlayout = html.Div([\n html.H1('Page 1'),\n dcc.Input(\n id=\"ticker_input\".format(\"text\"),\n type=\"text\",\n placeholder=\"input type {}\".format(\"text\"),\n ),\n html.Div(id=\"ticker_callback_output\"),\n dcc.Graph(\n id='ticker-Candlestick-graph',\n figure=fig\n )\n])\n\[email protected](\n Output(\"ticker_callback_output\", \"children\"),\n [Input(\"ticker_input\".format(\"text\"), \"value\")],\n)\ndef cb_render(val):\n if val != None:\n return val\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "pandas.DataFrame" ] ]
veecos1/Grinstead_And_Snell_Python
[ "1ee23dc4a8db14e5e139e4992b6a08b4d1ae7a48" ]
[ "Lesson 2/Set 1/2.py" ]
[ "import random as ra\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nsimulations = 1000\n\nthird = 0\nfourth = 0\nfifth = 0\nsixth = 0\ntwentieth = 0\n\nfor i in range(simulations):\n spin = ra.random()\n if spin < 1/3:\n third += 1\n elif spin < (1/3 + 1/4):\n fourth +=1\n elif spin < (1/3 + 1/4 + 1/5):\n fifth +=1\n elif spin < (1/3 + 1/4 + 1/5 + 1/6):\n sixth +=1\n else:\n twentieth += 1\n\nx = ['third','fourth','fifth','sixth','twentieth']\nwidht = [1/3, 1/4,1/5, 1/6,1/20]\nheights = [3*third/simulations, 4*fourth/simulations,5*fifth/simulations,6*sixth/simulations,\n 20*twentieth/simulations]\nplt.bar(np.arange(len(x)),heights, align='center', alpha=0.5, width=widht)\nplt.xticks(np.arange(len(x)), x)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show" ] ]
kirill-pinigin/NeuralPhotoFilter
[ "a28f136eca25958e1836994ee2d4dab08f7c2b9d" ]
[ "NeuralBlocks.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\nimport numpy as np\n\nclass Identity(torch.nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\nclass ColorFullnesCriterion(torch.nn.Module):\n def __init__(self):\n super(ColorFullnesCriterion, self).__init__()\n\n def colorfulness(self, x):\n assert(x.size(1) == 3)\n result = torch.zeros(x.size(0)).to(x.device)\n for i in range(x.size(0)):\n (R, G, B) = x[i][0], x[i][1], x[i][2],\n rg = torch.abs(R - G)\n yb = torch.abs(0.5 * (R + G)- B)\n rgMean, rgStd = torch.mean(rg), torch.std(rg)\n ybMean, ybStd = torch.mean(yb), torch.std(yb)\n stdRoot = torch.sqrt((rgStd ** 2) + (ybStd ** 2))\n meanRoot = torch.sqrt((rgMean ** 2) + (ybMean ** 2))\n result[i] = stdRoot + (0.3 * meanRoot)\n\n return result\n\n def forward(self, x, y):\n return nn.functional.l1_loss(self.colorfulness(x), self.colorfulness(y))\n\n\nclass ColorHistCriterion(torch.nn.Module):\n def __init__(self):\n super(ColorHistCriterion, self).__init__()\n\n def histogram(self, x):\n assert(x.size(1) == 3)\n result = torch.zeros(x.size(0), x.size(1), 255).to(x.device)\n\n for i in range(x.size(0)):\n R, G, B = torch.round(x[i][0]*255.0), torch.round(x[i][1]*255.0), torch.round(x[i][2]*255.0)\n R, G, B = R.data.cpu().numpy(), G.data.cpu().numpy(), B.data.cpu().numpy()\n rh = np.histogram(R, bins=255)[0]\n gh = np.histogram(G, bins=255)[0]\n bh = np.histogram(B, bins=255)[0]\n result[i][0] = torch.from_numpy(gh).float().view(-1).to(x.device)\n result[i][1] = torch.from_numpy(bh).float().view(-1).to(x.device)\n result[i][2] = torch.from_numpy(rh).float().view(-1).to(x.device)\n\n return result\n\n def forward(self, x, y):\n return nn.functional.l1_loss(self.histogram(x), self.histogram(x))\n\n\nclass HueSaturationValueCriterion(torch.nn.Module):\n def __init__(self):\n super(HueSaturationValueCriterion, self).__init__()\n self.criterion = nn.L1Loss()\n self.eps= 1e-6\n\n def hsv(self, im):\n assert (im.size(1) == 3)\n img = im * 0.5 + 0.5\n hue = torch.Tensor(im.shape[0], im.shape[2], im.shape[3]).to(im.device)\n\n hue[ img[:,2]==img.max(1)[0] ] = 4.0 + ( (img[:,0]-img[:,1]) / ( img.max(1)[0] - img.min(1)[0] + self.eps) ) [ img[:,2]==img.max(1)[0] ]\n hue[ img[:,1]==img.max(1)[0] ] = 2.0 + ( (img[:,2]-img[:,0]) / ( img.max(1)[0] - img.min(1)[0] + self.eps) ) [ img[:,1]==img.max(1)[0] ]\n hue[ img[:,0]==img.max(1)[0] ] = (0.0 + ( (img[:,1]-img[:,2]) / ( img.max(1)[0] - img.min(1)[0] + self.eps) ) [ img[:,0]==img.max(1)[0] ]) % 6\n\n hue[img.min(1)[0]==img.max(1)[0]] = 0.0\n hue = hue/6.0\n\n saturation = ( img.max(1)[0] - img.min(1)[0] ) / ( img.max(1)[0] + self.eps )\n saturation[ img.max(1)[0]==0 ] = 0.0\n\n value = img.max(1)[0]\n\n return torch.cat((hue, saturation, value), dim=1)\n\n def forward(self, x, y):\n x_hsv = self.hsv(x)\n y_hsv = self.hsv(y)\n return nn.functional.l1_loss(x_hsv, y_hsv)\n\n\nclass SILU(torch.nn.Module):\n def __init__(self):\n super(SILU, self).__init__()\n\n def forward(self, x):\n out = torch.mul(x, torch.sigmoid(x))\n return out\n\n\nclass Perceptron(torch.nn.Module):\n def __init__(self, in_features, out_features):\n super(Perceptron, self).__init__()\n self.fc = nn.Linear(in_features, out_features)\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n\nclass Padding(torch.nn.Module):\n def __init__(self, padding_size = 1):\n super(Padding, self).__init__()\n self.pad = torch.nn.ReflectionPad2d(padding_size)\n\n def forward(self, x):\n return self.pad(x)\n\n\nclass ConvLayer(torch.nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias = False):\n super(ConvLayer, self).__init__(in_channels, out_channels, kernel_size, stride=stride, bias = bias)\n padding_size = kernel_size // 2\n self.pad = Padding(padding_size)\n\n def forward(self, x):\n x = self.pad(x)\n x = super(ConvLayer, self).forward(x)\n return x\n\n\nclass AttentionBlock(nn.Module):\n def __init__(self, channels, gate_dimension):\n super(AttentionBlock, self).__init__()\n\n self.tract = nn.Sequential(\n ConvLayer(channels, gate_dimension, kernel_size=1, stride=1, bias=True),\n nn.BatchNorm2d(gate_dimension)\n )\n\n self.skip = nn.Sequential(\n ConvLayer(channels, gate_dimension, kernel_size=1, stride=1, bias=True),\n nn.BatchNorm2d(gate_dimension)\n )\n\n self.gate = nn.Sequential(\n ConvLayer(gate_dimension, channels, kernel_size=1, stride=1, bias=True),\n nn.BatchNorm2d(channels),\n nn.Sigmoid()\n )\n\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, g, x):\n t = self.tract(g)\n s = self.skip(x)\n psi = self.relu(torch.add(t,s))\n psi = self.gate(psi)\n return torch.mul(x, psi)\n\n\nclass BaseBlock(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, activation = Identity(), bias = False, drop_out : float = 0.5):\n super(BaseBlock, self).__init__()\n self.model = nn.Sequential(\n ConvLayer(in_channels, out_channels, kernel_size, stride, bias),\n nn.BatchNorm2d(out_channels, affine=True),\n nn.Dropout(drop_out),\n activation,\n )\n\n def forward(self, x):\n return self.model(x)\n\n\nclass UpsampleDeConv(torch.nn.Module):\n def __init__(self, in_channels, out_channels,):\n super(UpsampleDeConv, self).__init__()\n self.conv2d = ConvLayer(in_channels, out_channels, 3, 1, bias=False)\n\n def forward(self, x):\n x = torch.nn.functional.interpolate(x, mode='nearest', scale_factor=2)\n x = self.conv2d(x)\n return x\n\n\nclass TransposedDeConv(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super(TransposedDeConv, self).__init__()\n self.conv2d = torch.nn.ConvTranspose2d(in_channels, out_channels, 4, 2, 1, bias=False)\n\n def forward(self, x):\n return self.conv2d(x)\n\n\nclass PixelDeConv(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super(PixelDeConv, self).__init__()\n self.conv2d = ConvLayer(in_channels, out_channels * 4, 3, 1)\n self.upsample = nn.PixelShuffle(2)\n\n def forward(self, x):\n return self.upsample(self.conv2d(x))\n\n\nclass ResidualBlock(torch.nn.Module):\n def __init__(self, in_channels, out_channels, stride = 1, activation = nn.LeakyReLU(0.2), drop_out : float = 0.5):\n super(ResidualBlock, self).__init__()\n self.conv1 = BaseBlock(in_channels, out_channels, kernel_size=3, stride=stride, activation = activation)\n self.conv2 = BaseBlock(out_channels, out_channels, kernel_size=3, stride=1)\n self.skip = BaseBlock(in_channels, out_channels, kernel_size=1, stride=stride, bias= False)\n\n def forward(self, x):\n residual = self.skip(x)\n x = self.conv1(x)\n x = self.conv2(x)\n return torch.add(x, residual)\n\n\nclass SimpleEncoder(nn.Module):\n def __init__(self, in_size, out_size, activation=nn.LeakyReLU(0.2), bn = True, drop_out : float = 0.5):\n super(SimpleEncoder, self).__init__()\n layers = [ConvLayer(in_size, out_size, 3, 2)]\n\n if bn:\n layers +=[nn.BatchNorm2d(out_size)]\n\n layers +=[nn.Dropout(drop_out), activation]\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.model(x)\n return x\n\n\nclass SimpleDecoder(nn.Module):\n def __init__(self, in_size, out_size, deconv= UpsampleDeConv, drop_out : float = 0.5):\n super(SimpleDecoder, self).__init__()\n layers = [deconv(in_size, out_size),\n nn.BatchNorm2d(out_size),\n nn.Dropout(drop_out)]\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.model(x)\n return x\n\n\nclass TotalVariation(nn.Module):\n def __init__(self):\n super(TotalVariation, self).__init__()\n\n def forward(self,x):\n batch_size = x.size()[0]\n h_x = x.size()[2]\n w_x = x.size()[3]\n count_h = self._tensor_size(x[:,:,1:,:])\n count_w = self._tensor_size(x[:,:,:,1:])\n h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()\n w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()\n return 2*(h_tv/count_h+w_tv/count_w)/batch_size\n\n def _tensor_size(self,t):\n return t.size()[1]*t.size()[2]*t.size()[3]\n\n\ndef l2normalize(v, eps=1e-12):\n return v / (v.norm() + eps)\n\n\nclass SpectralNorm(nn.Module):\n def __init__(self, module, name='weight', power_iterations=1):\n super(SpectralNorm, self).__init__()\n self.module = module\n self.name = name\n self.power_iterations = power_iterations\n if not self._made_params():\n self._make_params()\n\n def _update_u_v(self):\n u = getattr(self.module, self.name + \"_u\")\n v = getattr(self.module, self.name + \"_v\")\n w = getattr(self.module, self.name + \"_bar\")\n\n height = w.data.shape[0]\n for _ in range(self.power_iterations):\n v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))\n u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))\n\n # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))\n sigma = u.dot(w.view(height, -1).mv(v))\n setattr(self.module, self.name, w / sigma.expand_as(w))\n\n def _made_params(self):\n try:\n u = getattr(self.module, self.name + \"_u\")\n v = getattr(self.module, self.name + \"_v\")\n w = getattr(self.module, self.name + \"_bar\")\n return True\n except AttributeError:\n return False\n\n\n def _make_params(self):\n w = getattr(self.module, self.name)\n\n height = w.data.shape[0]\n width = w.view(height, -1).data.shape[1]\n\n u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)\n v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)\n u.data = l2normalize(u.data)\n v.data = l2normalize(v.data)\n w_bar = Parameter(w.data)\n\n del self.module._parameters[self.name]\n\n self.module.register_parameter(self.name + \"_u\", u)\n self.module.register_parameter(self.name + \"_v\", v)\n self.module.register_parameter(self.name + \"_bar\", w_bar)\n\n\n def forward(self, *args):\n self._update_u_v()\n return self.module.forward(*args)\n" ]
[ [ "torch.nn.Linear", "torch.round", "torch.cat", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.nn.Parameter", "torch.sigmoid", "numpy.histogram", "torch.mul", "torch.sqrt", "torch.nn.ConvTranspose2d", "torch.abs", "torch.nn.ReflectionPad2d", "torch.Tensor", "torch.nn.functional.l1_loss", "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.PixelShuffle", "torch.pow", "torch.nn.Dropout", "torch.nn.Sigmoid", "torch.nn.functional.interpolate", "torch.std", "torch.add", "torch.nn.L1Loss", "torch.from_numpy", "torch.mean" ] ]
infradust/s3access
[ "86d15d01480484e14c3d97eadc4d231da4b80ff9" ]
[ "tests/test_pandas_reader.py" ]
[ "import tempfile\nimport unittest\nfrom datetime import date, datetime\n\nimport numpy as np\nimport pandas as pd\n\nfrom s3access.reader import Options\nfrom s3access.s3pandas.reader import Pandas, empty\n\n\nclass StrictPandasReader(unittest.TestCase):\n\n def test_categorical_combine(self):\n a = b\"a\\nb\\n\"\n b = b\"b\\nc\\n\"\n columns = {'a': 'category'}\n reader = Pandas(strict=True)\n a_df = reader.read(a, columns, options=Options())\n self.assertIn('a', a_df.columns)\n self.assertIsInstance(a_df['a'].dtype, pd.CategoricalDtype)\n b_df = reader.read(b, columns, options=Options())\n self.assertIn('a', b_df.columns)\n self.assertIsInstance(b_df['a'].dtype, pd.CategoricalDtype)\n\n c_df = reader.combine([a_df, b_df], options=Options())\n self.assertIn('a', c_df.columns)\n self.assertIsInstance(c_df['a'].dtype, pd.CategoricalDtype)\n\n def test_date_parsing(self):\n a = b\"2021-06-05,2021-06-05T00:10:10\\n2021-06-01,2021-06-01T23:59:59\"\n\n columns = {'a': date, 'b': datetime}\n reader = Pandas(strict=True)\n a_df = reader.read(a, columns, options=Options())\n self.assertIn('a', a_df.columns)\n self.assertEqual(a_df['a'].dtype, np.dtype('<M8[ns]'))\n self.assertEqual(a_df['b'].dtype, np.dtype('<M8[ns]'))\n\n def test_categorical_write_support(self):\n # some version of pyarrow did not support saving categorical data\n a = pd.DataFrame({\"a\": ['a', 'b', 'a', 'a', 'c', 'd', 'a', 'b', 'c', 'd']})\n a['a'] = a['a'].astype('category')\n\n with tempfile.TemporaryFile() as fp:\n a.to_parquet(fp)\n fp.seek(0)\n restored = pd.read_parquet(fp)\n self.assertIn('a', restored.columns)\n self.assertIsInstance(restored['a'].dtype, pd.CategoricalDtype)\n self.assertTrue(np.all(a['a'] == restored['a']))\n\n def test_distinct(self):\n a = b\"a\\nb\\na\\n\"\n b = b\"b\\nc\\n\\na\"\n columns = {'a': str}\n reader = Pandas()\n opts = Options(distinct=True)\n a_df = reader.read(a, columns, options=opts)\n self.assertEqual({'a', 'b'}, {x for x in a_df['a']})\n self.assertEqual(2, len(a_df['a'].tolist()))\n b_df = reader.read(b, columns, options=opts)\n self.assertEqual({'a', 'b', 'c'}, {x for x in b_df['a']})\n self.assertEqual(3, len(b_df['a'].tolist()))\n c_df = reader.combine([a_df, b_df], options=opts)\n self.assertEqual({'a', 'b', 'c'}, {x for x in c_df['a']})\n self.assertEqual(3, len(c_df['a'].tolist()))\n\n\nclass EmptyDataFrame(unittest.TestCase):\n\n def test_empty(self):\n columns = {'a': 'string', 'b': 'int64', 'c': 'float64', 'd': 'category'}\n result = empty(columns)\n self.assertTrue(result.empty)\n self.assertEqual(len(result.columns), len(columns))\n for col, t in columns.items():\n self.assertEqual(result[col].dtype, pd.Series([], dtype=t).dtype)\n" ]
[ [ "pandas.DataFrame", "pandas.Series", "numpy.all", "pandas.read_parquet", "numpy.dtype" ] ]
niklasmaki/BGCN
[ "a9e28f138319fd660e212110952fb67385c33ddf" ]
[ "BGCN_main.py" ]
[ "'''\nCopyright (C) 2019. Huawei Technologies Co., Ltd and McGill University. All rights reserved.\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the MIT License.\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nMIT License for more details.\n'''\nimport tensorflow as tf\nimport numpy as np\nfrom src.GNN_models import GnnModel\nfrom src.data_partition import data_partition_fix, data_partition_random\nimport random\nfrom src.utils import save_log_func\nimport argparse\nimport os\nfrom src.flags import flags\n\ncode_path = os.path.abspath('')\nFLAGS = flags()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str, default='citeseer', help='which dataset to use')\n parser.add_argument('--label_n_per_class', type=int, default=10, help='trial index')\n parser.add_argument('--data_partition_seed', type=int, default=101,\n help='The seed to use split the data for trial.')\n parser.add_argument('--trial_index', type=int, default=0, help='trial index')\n parser.add_argument('--model_name', type=str, default='BGCN', help='which model we use for training (GCN or BGCN)')\n parser.add_argument('--save_log', type=lambda s: s.lower() in ['true', 't', 'yes', '1'], default=False,\n help='Save log or not')\n parser.add_argument('--random_partition', type=lambda s: s.lower() in ['true', 't', 'yes', '1'], default=True,\n help='Save log or not')\n parser.add_argument('--gpu', type=int, default=0, help='which gpu to use')\n\n args = parser.parse_args()\n data_partition_seed = args.data_partition_seed\n trial_index = args.trial_index\n dataset = args.dataset\n model_name = args.model_name\n save_log = args.save_log\n random_partition = args.random_partition\n label_n_per_class = args.label_n_per_class\n gpu = args.gpu\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu)\n\n print(\"Dataset: {}\".format(dataset))\n print(\"Model: {}\".format(model_name))\n print(\"Trial index: {}\".format(trial_index))\n print(\"Data partition seed: {}\".format(data_partition_seed))\n if save_log:\n file_name = dataset + '_' + model_name + '_softmax_trail_' + str(trial_index) + '_random_seed_' + str(\n data_partition_seed) + '.txt'\n print(\"Save log mode activated, training log will be saved to /log/\" + file_name)\n\n # ==================================Set random seed for result reproduce===============================\n\n tf.set_random_seed(data_partition_seed)\n np.random.seed(data_partition_seed)\n random.seed(data_partition_seed)\n\n # =============================================Save log=================================================\n\n if save_log:\n save_log_func(code_path, dataset, model_name, trial_index, data_partition_seed)\n\n # =============================Load data=================================================\n\n dataset_dir = code_path + '' + '/data'\n if not random_partition:\n adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, labels = data_partition_fix(\n dataset_dir=dataset_dir, dataset_name=dataset, label_n_per_class=label_n_per_class)\n elif random_partition:\n adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, labels = data_partition_random(\n dataset_dir=dataset_dir, dataset_name=dataset, label_n_per_class=label_n_per_class)\n else:\n \"Wrong input data format\"\n\n # ==================================Train Model===========================================\n\n GNN_Model = GnnModel(FLAGS, features, labels, adj, y_train, y_val, y_test, train_mask, val_mask,\n test_mask, checkpt_name='model_1', model_name=model_name)\n GNN_Model.model_initialization()\n GNN_Model.train()\n\n" ]
[ [ "tensorflow.set_random_seed", "numpy.random.seed" ] ]
praeclarumjj3/OLIE
[ "c0a27e7409f7db51b190bfac114677cb7b5dd669" ]
[ "modules/solov2/solo_utils.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom PIL import Image\nimport os\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\nimport collections\nimport torchvision.transforms as transforms\n\ndef visualize_maps(maps, name):\n x = maps.cpu() \n dim = int(x.shape[0])\n x = x.permute(1, 2, 0).numpy()\n f, axarr = plt.subplots(int(dim**0.5)+1,int(dim**0.5)+1,figsize=(16,16))\n for j in range(dim):\n r = int(j/int((dim**0.5)+1))\n c = int(j%int((dim**0.5)+1))\n axarr[r,c].imshow(x[:,:,j])\n axarr[r,c].axis('off')\n f.savefig('visualizations/{}.jpg'.format(name))\n\ndef visualize_single_map(mapi, name):\n x = mapi.cpu()\n x = x.permute(1,2,0)\n x = np.uint8(x)\n im = transforms.ToPILImage()(x).convert(\"RGB\")\n im.save(('visulaizations/{}.png'.format(name)))\n\ndef point_nms(heat, kernel=2):\n # kernel must be 2\n hmax = F.max_pool2d(heat, (kernel, kernel), stride=1, padding=1)\n keep = (hmax[:, :, :-1, :-1] == heat).float()\n return heat * keep\n\ndef matrix_nms(cate_labels, seg_masks, sum_masks, cate_scores, sigma=2.0, kernel='gaussian'):\n n_samples = len(cate_labels)\n if n_samples == 0:\n return []\n\n seg_masks = seg_masks.reshape(n_samples, -1).float()\n # inter.\n inter_matrix = torch.mm(seg_masks, seg_masks.transpose(1, 0))\n # union.\n sum_masks_x = sum_masks.expand(n_samples, n_samples)\n # iou.\n iou_matrix = (inter_matrix / (sum_masks_x + sum_masks_x.transpose(1, 0) - inter_matrix)).triu(diagonal=1)\n # label_specific matrix.\n cate_labels_x = cate_labels.expand(n_samples, n_samples)\n label_matrix = (cate_labels_x == cate_labels_x.transpose(1, 0)).float().triu(diagonal=1)\n\n # IoU compensation\n compensate_iou, _ = (iou_matrix * label_matrix).max(0)\n compensate_iou = compensate_iou.expand(n_samples, n_samples).transpose(1, 0)\n\n # IoU decay / soft nms\n delay_iou = iou_matrix * label_matrix\n\n # matrix nms\n if kernel == 'linear':\n delay_matrix = (1 - delay_iou) / (1 - compensate_iou)\n delay_coefficient, _ = delay_matrix.min(0)\n else:\n delay_matrix = torch.exp(-1 * sigma * (delay_iou ** 2))\n compensate_matrix = torch.exp(-1 * sigma * (compensate_iou ** 2))\n delay_coefficient, _ = (delay_matrix / compensate_matrix).min(0)\n\n # update the score.\n cate_scores_update = cate_scores * delay_coefficient\n\n return cate_scores_update\n\n\ndef mask_nms(cate_labels, seg_masks, sum_masks, cate_scores, nms_thr=0.5):\n n_samples = len(cate_scores)\n if n_samples == 0:\n return []\n\n keep = seg_masks.new_ones(cate_scores.shape)\n seg_masks = seg_masks.float()\n\n for i in range(n_samples - 1):\n if not keep[i]:\n continue\n mask_i = seg_masks[i]\n label_i = cate_labels[i]\n for j in range(i + 1, n_samples, 1):\n if not keep[j]:\n continue\n mask_j = seg_masks[j]\n label_j = cate_labels[j]\n if label_i != label_j:\n continue\n # overlaps\n inter = (mask_i * mask_j).sum()\n union = sum_masks[i] + sum_masks[j] - inter\n if union > 0:\n if inter / union > nms_thr:\n keep[j] = False\n else:\n keep[j] = False\n return keep\n\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\n ar = np.asanyarray(ar).flatten()\n\n optional_indices = return_index or return_inverse\n optional_returns = optional_indices or return_counts\n\n if ar.size == 0:\n if not optional_returns:\n ret = ar\n else:\n ret = (ar,)\n if return_index:\n ret += (np.empty(0, np.bool),)\n if return_inverse:\n ret += (np.empty(0, np.bool),)\n if return_counts:\n ret += (np.empty(0, np.intp),)\n return ret\n if optional_indices:\n perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')\n aux = ar[perm]\n else:\n ar.sort()\n aux = ar\n flag = np.concatenate(([True], aux[1:] != aux[:-1]))\n\n if not optional_returns:\n ret = aux[flag]\n else:\n ret = (aux[flag],)\n if return_index:\n ret += (perm[flag],)\n if return_inverse:\n iflag = np.cumsum(flag) - 1\n inv_idx = np.empty(ar.shape, dtype=np.intp)\n inv_idx[perm] = iflag\n ret += (inv_idx,)\n if return_counts:\n idx = np.concatenate(np.nonzero(flag) + ([ar.size],))\n ret += (np.diff(idx),)\n return ret\n\ndef colorEncode(labelmap, colors, mode='RGB'):\n labelmap = labelmap.astype('int')\n labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),\n dtype=np.uint8)\n for label in unique(labelmap):\n if label < 0:\n continue\n labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \\\n np.tile(colors[label],\n (labelmap.shape[0], labelmap.shape[1], 1))\n\n if mode == 'BGR':\n return labelmap_rgb[:, :, ::-1]\n else:\n return labelmap_rgb\n\ndef as_numpy(obj):\n if isinstance(obj, collections.Sequence):\n return [as_numpy(v) for v in obj]\n elif isinstance(obj, collections.Mapping):\n return {k: as_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, Variable):\n return obj.data.cpu().numpy()\n elif torch.is_tensor(obj):\n return obj.cpu().numpy()\n else:\n return np.array(obj)\n\ndef visualize_instance_map(img, pred, name):\n colors = loadmat('datasets/color150.mat')['colors']\n \n img = img.cpu() \n img = img.permute(1, 2, 0).numpy()\n img = img.astype(np.uint8)\n \n pred = as_numpy(pred.squeeze(0).cpu())\n pred = np.int32(pred)\n \n # colorize prediction\n pred_color = colorEncode(pred, colors).astype(np.uint8)\n\n # aggregate images and save\n im_vis = np.concatenate((img, pred_color), axis=1)\n\n Image.fromarray(im_vis).save(\n os.path.join('visualizations', name+'.jpg'))\n" ]
[ [ "numpy.concatenate", "numpy.uint8", "numpy.array", "numpy.empty", "numpy.zeros", "torch.is_tensor", "scipy.io.loadmat", "numpy.tile", "numpy.nonzero", "numpy.diff", "numpy.cumsum", "numpy.int32", "torch.nn.functional.max_pool2d", "torch.exp", "numpy.asanyarray" ] ]
ashva7/data_analysis
[ "aefe1bd4f99f6f934b16c89eb90bc34321f51f94" ]
[ "data_analysis/_util.py" ]
[ "import array\nfrom threading import Thread\nfrom collections import defaultdict\nimport numpy as np\nimport scipy.sparse\n\nfrom tweepy.error import RateLimitError\nfrom PyQt5 import QtWidgets, QtCore\n\ndef get_text_cleaned(tweet):\n text = tweet['text']\n\n slices = []\n #Strip out the urls.\n if 'urls' in tweet['entities']:\n for url in tweet['entities']['urls']:\n slices += [{'start': url['indices'][0], 'stop': url['indices'][1]}]\n\n #Strip out the hashtags.\n if 'hashtags' in tweet['entities']:\n for tag in tweet['entities']['hashtags']:\n slices += [{'start': tag['indices'][0], 'stop': tag['indices'][1]}]\n\n #Strip out the user mentions.\n if 'user_mentions' in tweet['entities']:\n for men in tweet['entities']['user_mentions']:\n slices += [{'start': men['indices'][0], 'stop': men['indices'][1]}]\n\n #Strip out the media.\n if 'media' in tweet['entities']:\n for med in tweet['entities']['media']:\n slices += [{'start': med['indices'][0], 'stop': med['indices'][1]}]\n\n #Strip out the symbols.\n if 'symbols' in tweet['entities']:\n for sym in tweet['entities']['symbols']:\n slices += [{'start': sym['indices'][0], 'stop': sym['indices'][1]}]\n\n # Sort the slices from highest start to lowest.\n slices = sorted(slices, key=lambda x: -x['start'])\n\n #No offsets, since we're sorted from highest to lowest.\n for s in slices:\n text = text[:s['start']] + text[s['stop']:]\n\n return text\n\n\ndef make_document_term_matrix(token_list):\n \"\"\"Function for creating a document term matrix. Taken from SciKit-Learn. \n returns: `vocabulary` and a sparse matrix document term matrix\n \"\"\"\n vocabulary = defaultdict()\n vocabulary.default_factory = vocabulary.__len__\n j_indices = []\n \"\"\"Construct an array.array of a type suitable for scipy.sparse indices.\"\"\"\n indptr = array.array(str(\"i\"))\n values = array.array(str(\"i\"))\n indptr.append(0)\n\n for tokens in token_list:\n feature_counter = {}\n for token in tokens:\n feature_idx = vocabulary[token]\n if feature_idx not in feature_counter:\n feature_counter[feature_idx] = 1\n else:\n feature_counter[feature_idx] += 1\n j_indices.extend(feature_counter.keys())\n values.extend(feature_counter.values())\n indptr.append(len(j_indices))\n\n vocabulary = dict(vocabulary)\n j_indices = np.asarray(j_indices, dtype=np.intc)\n indptr = np.frombuffer(indptr, dtype=np.intc)\n values = np.frombuffer(values, dtype=np.intc)\n\n X = scipy.sparse.csr_matrix((values, j_indices, indptr),\n shape=(len(indptr) - 1, len(vocabulary)),\n dtype=np.int64)\n X.sort_indices()\n return X, vocabulary\n\n\nclass WorkerThread(Thread):\n \"\"\"\n A simple thread to be used in a thread pool\n \"\"\"\n def __init__(self,\n task: 'queue.Queue',\n fallback_call=None):\n super().__init__()\n self.task = task\n self.daemon = True\n self.fallback_call = fallback_call\n self.start()\n\n def run(self):\n while True:\n func, args, kwargs = self.task.get()\n try:\n func(*args, **kwargs)\n except RateLimitError:\n with self.task.mutex:\n self.task.queue.clear()\n print('Twitter Rate limit reached!')\n if self.fallback_call:\n self.fallback_call()\n\n except Exception as e:\n print(e)\n finally:\n self.task.task_done()\n\n\ndef add_progress_bar(qmainwindow, progress_signal):\n status = qmainwindow.statusBar()\n progress_bar = QtWidgets.QProgressBar()\n def inner():\n progress_bar.setMinimum(0)\n # 4268 iterations\n progress_bar.setMaximum(4268)\n progress_signal.connect(progress_bar.setValue)\n\n status.addWidget(progress_bar, 0)\n return progress_bar, inner\n\n\ndef remove_progress_bar(qmainwindow, progress_bar):\n def inner():\n status = qmainwindow.statusBar()\n status.removeWidget(progress_bar)\n return inner\n\n\nclass TabThread(QtCore.QThread):\n \"\"\"\n Responsible for uploading the map data and keeping track of progress\n \"\"\"\n progress_bar_signal = QtCore.pyqtSignal()\n def __init__(self, tab_widget, parent=None):\n super().__init__(parent)\n self._tab_widget = tab_widget\n\n def run(self):\n self.progress_bar_signal.emit()\n sentiment_widget = self._tab_widget._sentiment_map_widget\n sentiment_widget._detailed_map_setup()\n sentiment_widget.update_canvas()\n self._tab_widget.remove_progress_bar.emit()\n" ]
[ [ "numpy.asarray", "numpy.frombuffer" ] ]
yniad/Monte-Carlo-Tree-Search
[ "60ed3fc68aec7036b29d5360ed25296adc9958fb" ]
[ "codigos/_agent.py" ]
[ "import numpy as np\nimport chess\n\n\nclass simple_agent():\n def get_move_values(self,board,both_players=False):\n moves=list(board.legal_moves)\n values = np.zeros([len(moves),2])\n for i,move in enumerate(moves):\n whites=0\n blacks=0\n b=str(board).replace(' ','').replace('\\n','').replace('.','')\n for l in b:\n if l.islower():\n blacks+=simple_agent.weights[l]\n else:\n whites+=simple_agent.weights[l]\n suma = whites+blacks\n relacion = whites/suma\n values[i,:]=[relacion,1-relacion]\n #if relacion>0.5:\n # values[i,:]=[1,0]\n #elif relacion<0.5:\n # values[i,:]=[0,1]\n #else:\n # tmp = np.random.randint(2)\n # values[i,:]=[tmp,(tmp+1)%2]\n if not both_players:\n values = values[:,0] if board.turn else values[:,1]\n return moves,values\n\n def select_move(self,board):\n moves,values=self.get_move_values(board)\n index=np.argmax(values)\n return moves[index]\n\n weights={\n '.':0,\n 'p':1,\n 'P':1,\n 'b':3,\n 'B':3,\n 'n':3,\n 'N':3,\n 'r':5,\n 'R':5,\n 'q':9,\n 'Q':9,\n 'k':15,\n 'K':15\n }\n\n \nclass random_agent():\n def get_move_values(self,board,both_players=False):\n moves=list(board.legal_moves)\n values = np.random.rand(len(moves),1)\n if both_players:\n values = np.concatenate((values,1-values),axis=1)\n return moves,values\n def select_move(self,board):\n moves,values=self.get_move_values(board)\n index=np.argmax(values)\n return moves[index]" ]
[ [ "numpy.concatenate", "numpy.argmax" ] ]
lingtengqiu/Open-PIFuhd
[ "3a66b647bcf5591e818af62735e64a93c4aaef85" ]
[ "engineer/models/PIFu/BasePIFu.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom engineer.utils.geometry import index, orthogonal, perspective\nfrom torch.nn import init\nclass _BasePIFuNet(nn.Module):\n def __init__(self,\n projection_mode:str='orthogonal',\n error_term:str='mse',\n ):\n '''\n Parameters\n backbone: Which backbone you use in your PIFu model {Res32|Hourglass ....}\n head: Which function you want to learn, default: iso-surface--->surface_classifier\n depth: The network aims at predict depth of 3-D points\n projection_model : Either orthogonal or perspective.\n param error_term: nn Loss between the predicted [B, Res, N] and the label [B, Res, N]\n '''\n\n super(_BasePIFuNet, self).__init__()\n self.__name = 'basePIFu'\n\n self.error_term = error_term\n \n if error_term == 'mse':\n self.error_term = nn.MSELoss()\n elif error_term == 'bce':\n self.error_term = nn.BCELoss()\n else:\n raise NotImplementedError\n\n self.index = index\n self.projection = orthogonal if projection_mode == 'orthogonal' else perspective\n\n self.preds = None\n self.labels = None\n\n def forward(self, points, images, calibs, transforms=None)->torch.Tensor:\n '''\n Parameters:\n points: [B, 3, N] world space coordinates of points\n images: [B, C, H, W] input images\n calibs: [B, 3, 4] calibration matrices for each image\n transforms: Optional [B, 2, 3] image space coordinate transforms\n Return: \n [B, Res, N] predictions for each point\n '''\n\n self.filter(images)\n self.query(points, calibs, transforms)\n return self.get_preds()\n\n\n def extract_features(self, images):\n '''\n Filter the input images\n store all intermediate features.\n\n Parameters:\n images: [B, C, H, W] input images\n '''\n raise NotImplementedError\n\n def query(self, points, calibs, transforms=None, labels=None):\n '''\n Given 3D points, query the network predictions for each point.\n Image features should be pre-computed before this call.\n store all intermediate features.\n query() function may behave differently during training/testing.\n\n Parameters:\n points: [B, 3, N] world space coordinates of points\n calibs: [B, 3, 4] calibration matrices for each image\n transforms: Optional [B, 2, 3] image space coordinate transforms\n labels: Optional [B, Res, N] gt labeling\n Return: \n [B, Res, N] predictions for each point\n '''\n None\n\n def get_preds(self):\n '''\n Get the predictions from the last query\n :return: [B, Res, N] network prediction for the last query\n '''\n return self.preds\n\n def get_error(self):\n '''\n Get the network loss from the last query\n\n return: \n loss term\n '''\n return self.error_term(self.preds, self.labels)\n\n\n\n @property\n def name(self):\n __repr = \"{}(Parameters: \".format(self.__name)\n for key in self.input_para.keys():\n __repr+=\"{}:{}, \".format(key,self.input_para[key])\n __repr=__repr[:-2]\n return __repr+')'\n \n @name.setter\n def name(self,v):\n self.__name = v\n\n\n @staticmethod\n def init_weights(net, init_type='normal', init_gain=0.02):\n \"\"\"Initialize network weights.\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n \"\"\"\n\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find(\n 'BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n init.normal_(m.weight.data, 1.0, init_gain)\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network with %s' % init_type)\n net.apply(init_func) # apply the initialization function <init_func>\n\n @staticmethod\n def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):\n '''\n Initialize a network:\n Parameters:\n net (network) -- the network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n gain (float) -- scaling factor for normal, xavier and orthogonal.\n Return:\n None\n '''\n __class__.init_weights(net, init_type, init_gain=init_gain)" ]
[ [ "torch.nn.MSELoss", "torch.nn.init.constant_", "torch.nn.init.kaiming_normal_", "torch.nn.init.normal_", "torch.nn.BCELoss", "torch.nn.init.orthogonal_", "torch.nn.init.xavier_normal_" ] ]
greembow/skynet_mod_bot_discord
[ "fa5c0acef497228b72a6456c1bd4df7f3d94adc6" ]
[ "train.py" ]
[ "import nltk\r\nnltk.download('punkt')\r\nnltk.download('wordnet')\r\nfrom nltk.stem import WordNetLemmatizer\r\nlemmatizer = WordNetLemmatizer()\r\nimport json\r\nimport pickle\r\n\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Activation, Dropout\r\nfrom keras.optimizers import SGD\r\nimport random\r\n#imports \r\n\r\n\r\nwords=[]\r\nclasses = []\r\ndocuments = []\r\nignore_words = ['?', '!']\r\ndata_file = open('intents.json').read()\r\nintents = json.loads(data_file)\r\n#creating word variables model for the bot\r\n\r\nfor intent in intents['intents']:\r\n for pattern in intent['patterns']:\r\n\r\n # take each word and tokenize it\r\n w = nltk.word_tokenize(pattern)\r\n words.extend(w)\r\n # adding documents\r\n documents.append((w, intent['tag']))\r\n\r\n # adding classes to our class list\r\n if intent['tag'] not in classes:\r\n classes.append(intent['tag'])\r\n #sorting through the intents.json file for \r\n #each word and tag to make a model around\r\n \r\nwords = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]\r\nwords = sorted(list(set(words)))\r\n\r\nclasses = sorted(list(set(classes)))\r\n\r\nprint (len(documents), \"documents\")\r\n\r\nprint (len(classes), \"classes\", classes)\r\n\r\nprint (len(words), \"unique lemmatized words\", words)\r\n\r\n\r\npickle.dump(words,open('words.pkl','wb'))\r\npickle.dump(classes,open('classes.pkl','wb'))\r\n\r\n# initializing training data\r\ntraining = []\r\noutput_empty = [0] * len(classes)\r\nfor doc in documents:\r\n # initializing bag of words\r\n bag = []\r\n # list of tokenized words for the pattern\r\n pattern_words = doc[0]\r\n # lemmatize each word - create base word, in attempt to represent related words\r\n pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]\r\n # create our bag of words array with 1, if word match found in current pattern\r\n for w in words:\r\n bag.append(1) if w in pattern_words else bag.append(0)\r\n\r\n # output is a '0' for each tag and '1' for current tag (for each pattern)\r\n output_row = list(output_empty)\r\n output_row[classes.index(doc[1])] = 1\r\n\r\n training.append([bag, output_row])\r\n# dissasembling words into raw data to analize patterns for each intent list in intents.json\r\n\r\n\r\nrandom.shuffle(training)\r\ntraining = np.array(training)\r\n# create train and test lists. X - patterns, Y - intents\r\ntrain_x = list(training[:,0])\r\ntrain_y = list(training[:,1])\r\nprint(\"Training data created\")\r\n\r\n\r\n# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons\r\n# equal to number of intents to predict output intent with softmax\r\nmodel = Sequential()\r\nmodel.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(len(train_y[0]), activation='softmax'))\r\n#\r\n# creating aspects of the machine learning model\r\n\r\n\r\nsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\r\n#compiling the model, message is printed when model is trained.\r\n\r\n\r\n#fitting and saving the model\r\nhist = model.fit(np.array(train_x), np.array(train_y), epochs=500, batch_size=5, verbose=1)\r\nmodel.save('chatbot_model.h5', hist)\r\n#sorting model and working around it. to be used later\r\nprint(\"model created\")\r\n#end of code\r\n" ]
[ [ "numpy.array" ] ]
thisis-nkul/ML-DL-implementation
[ "235c8c57666efe3666f08ffd047e2df776ea485e" ]
[ "MLlib/loss_func.py" ]
[ "import numpy as np\nfrom MLlib.activations import Sigmoid\nfrom MLlib import Tensor\nfrom MLlib import autograd\nfrom MLlib.utils.misc_utils import unbroadcast\n\n\nclass MeanSquaredError(autograd.Function):\n \"\"\"\n Calculate Mean Squared Error.\n \"\"\"\n\n __slots__ = ()\n\n @staticmethod\n def forward(ctx, prediction, target):\n if not (type(prediction).__name__ == 'Tensor' and\n type(target).__name__ == 'Tensor'):\n\n raise RuntimeError(\"Expected Tensors, got {} and {}. Please use \"\n \".loss() method for non-Tensor data\"\n .format(type(prediction).__name__,\n type(target).__name__))\n\n requires_grad = prediction.requires_grad\n\n batch_size = target.data.shape[0]\n\n out = prediction.data - target.data\n\n if requires_grad:\n ctx.derivative_core = out\n\n out = np.sum(np.power(out, 2)) / (2*batch_size)\n\n output = Tensor(out, requires_grad=requires_grad,\n is_leaf=not requires_grad)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n derivative = ctx.derivative_core\n\n grad_prediction = (derivative / derivative.shape[0]) * grad_output.data\n\n return Tensor(unbroadcast(grad_prediction, derivative.shape))\n\n @staticmethod\n def loss(X, Y, W):\n \"\"\"\n Calculate loss by mean square method.\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n W:ndarray(dtype=float)\n Weights\n\n RETURNS\n =======\n\n array of mean squared losses\n \"\"\"\n M = X.shape[0]\n return np.sum((np.dot(X, W).T - Y) ** 2) / (2 * M)\n\n @staticmethod\n def derivative(X, Y, W):\n \"\"\"\n Calculate derivative for mean square method.\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n W:ndarray(dtype=float)\n Weights\n\n RETURNS\n =======\n\n array of derivates\n \"\"\"\n M = X.shape[0]\n return np.dot((np.dot(X, W).T - Y), X).T / M\n\n\nclass MSELoss(MeanSquaredError):\n pass\n\n\nclass LogarithmicError():\n \"\"\"\n Calculate Logarithmic Error.\n \"\"\"\n\n @staticmethod\n def loss(X, Y, W):\n \"\"\"\n Calculate loss by logarithmic error method.\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n W:ndarray(dtype=float)\n Weights\n\n RETURNS\n =======\n\n array of logarithmic losses\n \"\"\"\n M = X.shape[0]\n sigmoid = Sigmoid()\n H = sigmoid.activation(np.dot(X, W).T)\n return (1/M)*(np.sum((-Y)*np.log(H)-(1-Y)*np.log(1-H)))\n\n @staticmethod\n def derivative(X, Y, W):\n \"\"\"\n Calculate derivative for logarithmic error method.\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n W:ndarray(dtype=float)\n Weights\n\n RETURNS\n =======\n\n array of derivates\n \"\"\"\n M = X.shape[0]\n sigmoid = Sigmoid()\n H = sigmoid.activation(np.dot(X, W).T)\n return (1/M)*(np.dot(X.T, (H-Y).T))\n\n\nclass AbsoluteError():\n \"\"\"\n Calculate Absolute Error.\n \"\"\"\n\n @staticmethod\n def loss(X, Y, W):\n \"\"\"\n Calculate loss by absolute error method.\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n W:ndarray(dtype=float)\n Weights\n\n RETURNS\n =======\n\n array of absolute losses\n \"\"\"\n M = X.shape[0]\n return np.sum(np.absolute(np.dot(X, W).T - Y)) / M\n\n @staticmethod\n def derivative(X, Y, W):\n \"\"\"\n Calculate derivative for absolute error method.\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n W:ndarray(dtype=float)\n Weights\n\n RETURNS\n =======\n\n array of derivates\n \"\"\"\n M = X.shape[0]\n AbsError = (np.dot(X, W).T-Y)\n return np.dot(\n np.divide(\n AbsError,\n np.absolute(AbsError),\n out=np.zeros_like(AbsError),\n where=(np.absolute(AbsError)) != 0),\n X\n ).T/M\n\n\nclass CosineSimilarity():\n \"\"\"\n Calculate Similarity between actual value and similarity value.\n \"\"\"\n\n @staticmethod\n def loss(X, Y, W):\n \"\"\"\n Calculate error by cosine similarity method\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n W:ndarray(dtype=float)\n Weights\n\n RETURNS\n =======\n\n Percentage of error in the actural value and predicted value\n \"\"\"\n sigmoid = Sigmoid()\n H = sigmoid.activation(np.dot(X, W).T)\n DP = np.sum(np.dot(H, Y))\n S = DP/((np.sum(np.square(H))**(0.5))*(np.sum(np.square(Y))**(0.5)))\n dissimilarity = 1-S\n return dissimilarity*(np.sum(np.square(Y))**(0.5))\n\n\nclass log_cosh:\n\n @staticmethod\n def logcosh_loss(X, Y):\n \"\"\"\n Calculate Error by log cosh method\n\n PARAMETERS\n ==========\n\n X: ndarray(dtype=float,ndim=1)\n Actual values\n Y: ndarray (dtpye=float,ndim=1)\n Predicted values\n\n RETURNS\n =======\n\n Logarithm of the hyperbolic cosine of the prediction error\n \"\"\"\n p = np.cosh(Y - X)\n loss = np.log(p)\n error = np.sum(loss)\n return error\n\n @staticmethod\n def derivative_logcosh(X, Y):\n \"\"\"\n Calculate the derivative of \"log cosh\" loss method\n\n PARAMETERS\n ==========\n\n X: ndarray(dtype=float,ndim=1)\n Actual values\n Y: ndarray (dtpye=float,ndim=1)\n Predicted values\n\n RETURNS\n =======\n\n Derivative of Log cosh prediction error\n \"\"\"\n t = np.tanh(Y-X)\n derivative = np.sum(t)\n return derivative\n\n\nclass Huber():\n \"\"\"\n Calculate Huber loss.\n \"\"\"\n\n @staticmethod\n def loss(X, Y, delta=1.0):\n\n \"\"\"\n Calculate loss by Huber method.\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n\n RETURNS\n =======\n\n array of Huber losses\n \"\"\"\n return np.where(np.abs(X-Y) <= delta,\n 0.5*(X-Y)**2,\n delta*(np.abs(X-Y)-0.5*delta))\n\n @staticmethod\n def derivative(X, Y, delta=1.0):\n\n \"\"\"\n Calculate derivative for Huber method.\n\n PARAMETERS\n ==========\n\n X:ndarray(dtype=float,ndim=1)\n input vector\n Y:ndarray(dtype=float)\n output vector\n\n RETURNS\n =======\n\n array of derivates\n \"\"\"\n\n return np.where(np.abs(X - Y) <= delta, X - Y, delta * np.sign(X - Y))\n" ]
[ [ "numpy.square", "numpy.zeros_like", "numpy.dot", "numpy.log", "numpy.cosh", "numpy.sum", "numpy.tanh", "numpy.sign", "numpy.power", "numpy.abs", "numpy.absolute" ] ]
theoseo/Online-Realtime-Action-Recognition-based-on-OpenPose
[ "19f51a4b083943d550c9573307be21880bd5b920" ]
[ "Pose/pose_visualizer.py" ]
[ "# -*- coding: UTF-8 -*-\nimport cv2 as cv\nimport numpy as np\nimport tensorflow as tf\nfrom .coco_format import CocoPart, CocoColors, CocoPairsRender\nfrom .pose_estimator import estimate\n\n\nclass TfPoseVisualizer:\n # the thickness of showing skeleton\n Thickness_ratio = 2\n\n def __init__(self, graph_path, target_size=(368, 368)):\n self.target_size = target_size\n # load graph\n with tf.gfile.GFile(graph_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n self.graph = tf.get_default_graph()\n tf.import_graph_def(graph_def, name='TfPoseEstimator')\n self.persistent_sess = tf.Session(graph=self.graph)\n\n self.tensor_image = self.graph.get_tensor_by_name('TfPoseEstimator/image:0')\n self.tensor_output = self.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0')\n self.heatMat = self.pafMat = None\n\n @staticmethod\n def draw_pose_rgb(npimg, humans, imgcopy=False):\n if imgcopy:\n npimg = np.copy(npimg)\n image_h, image_w = npimg.shape[:2]\n joints, bboxes, xcenter = [], [], []\n\n # for record and get dataset\n record_joints_norm = []\n\n for human in humans:\n xs, ys, centers = [], [], {}\n # 将所有关节点绘制到图像上\n for i in range(CocoPart.Background.value):\n if i not in human.body_parts.keys():\n\n # 对于缺失的数据,补0\n record_joints_norm += [0.0, 0.0]\n continue\n\n body_part = human.body_parts[i]\n center_x = body_part.x * image_w + 0.5\n center_y = body_part.y * image_h + 0.5\n center = (int(center_x), int(center_y))\n centers[i] = center\n\n record_joints_norm += [round(center_x/1280, 2), round(center_y/720, 2)]\n\n xs.append(center[0])\n ys.append(center[1])\n # 绘制关节点\n cv.circle(npimg, center, 3, CocoColors[i], thickness=TfPoseVisualizer.Thickness_ratio * 2,\n lineType=8, shift=0)\n # 将属于同一人的关节点按照各个部位相连\n for pair_order, pair in enumerate(CocoPairsRender):\n if pair[0] not in human.body_parts.keys() or pair[1] not in human.body_parts.keys():\n continue\n cv.line(npimg, centers[pair[0]], centers[pair[1]], CocoColors[pair_order],\n thickness=TfPoseVisualizer.Thickness_ratio, lineType=8, shift=0)\n\n # 根据每个人的关节点信息生成ROI区域\n tl_x = min(xs)\n tl_y = min(ys)\n width = max(xs) - min(xs)\n height = max(ys) - min(ys)\n bboxes.append([tl_x, tl_y, width, height])\n\n # 记录每一帧的所有关节点\n joints.append(centers)\n\n # 记录coco的1号点作为xcenter\n if 1 in centers:\n xcenter.append(centers[1][0])\n return npimg, joints, bboxes, xcenter, record_joints_norm\n\n @staticmethod\n def draw_pose_only(npimg, humans):\n image_h, image_w = npimg.shape[:2]\n back_ground = np.ones((image_h, image_w), dtype=np.uint8)\n back_ground = cv.cvtColor(back_ground, cv.COLOR_GRAY2BGR)\n back_ground[:, :, :] = 0 # Black background\n result = TfPoseVisualizer.draw_pose_rgb(back_ground, humans)\n return result\n\n def inference(self, npimg):\n if npimg is None:\n raise Exception('The frame does not exist.')\n\n rois = []\n infos = []\n # _get_scaled_img\n if npimg.shape[:2] != (self.target_size[1], self.target_size[0]):\n # resize\n npimg = cv.resize(npimg, self.target_size)\n rois.extend([npimg])\n infos.extend([(0.0, 0.0, 1.0, 1.0)])\n\n output = self.persistent_sess.run(self.tensor_output, feed_dict={self.tensor_image: rois})\n\n heat_mats = output[:, :, :, :19]\n paf_mats = output[:, :, :, 19:]\n\n output_h, output_w = output.shape[1:3]\n max_ratio_w = max_ratio_h = 10000.0\n for info in infos:\n max_ratio_w = min(max_ratio_w, info[2])\n max_ratio_h = min(max_ratio_h, info[3])\n mat_w, mat_h = int(output_w / max_ratio_w), int(output_h / max_ratio_h)\n\n resized_heat_mat = np.zeros((mat_h, mat_w, 19), dtype=np.float32)\n resized_paf_mat = np.zeros((mat_h, mat_w, 38), dtype=np.float32)\n resized_cnt_mat = np.zeros((mat_h, mat_w, 1), dtype=np.float32)\n resized_cnt_mat += 1e-12\n\n for heatMat, pafMat, info in zip(heat_mats, paf_mats, infos):\n w, h = int(info[2] * mat_w), int(info[3] * mat_h)\n heatMat = cv.resize(heatMat, (w, h))\n pafMat = cv.resize(pafMat, (w, h))\n x, y = int(info[0] * mat_w), int(info[1] * mat_h)\n # add up\n resized_heat_mat[max(0, y):y + h, max(0, x):x + w, :] = np.maximum(\n resized_heat_mat[max(0, y):y + h, max(0, x):x + w, :], heatMat[max(0, -y):, max(0, -x):, :])\n resized_paf_mat[max(0, y):y + h, max(0, x):x + w, :] += pafMat[max(0, -y):, max(0, -x):, :]\n resized_cnt_mat[max(0, y):y + h, max(0, x):x + w, :] += 1\n\n self.heatMat = resized_heat_mat\n self.pafMat = resized_paf_mat / (np.log(resized_cnt_mat) + 1)\n\n humans = estimate(self.heatMat, self.pafMat)\n return humans\n" ]
[ [ "numpy.zeros", "tensorflow.get_default_graph", "numpy.log", "tensorflow.GraphDef", "numpy.ones", "tensorflow.Session", "tensorflow.import_graph_def", "numpy.copy", "tensorflow.gfile.GFile" ] ]
nitrogl/CoSimul_Platform
[ "41848210deae900a69c5be61e84eeab645e15fc3" ]
[ "SimDSE/analysis.py" ]
[ "'''\nCreated on Jul. 23, 2019\nData analysis of the simulation results.\n\n@file analysis.py\n@author Evandro de Souza\n@date 2019.07.23 \n@version 0.1\n@company University of Alberta - Computing Science\n'''\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sys\n\n\nphasor = 'Phasor_1_001'\nsmartmeter = 'Smartmeter_6_00061'\nestimator = 'Estimator_1'\n\n#--- select dataset file\nif len(sys.argv) > 1:\n storename = sys.argv[1]\nelse:\n storename = 'data/CollectorStore_Small.hd5'\n\n#--- Load data\nstore = pd.HDFStore(storename)\ndf = store['Collector']\nstore.close()\n\n#--- select sets\ndf_sets = []\ndf_names = []\nmax_t = 0\nfor col in df:\n df_sets.append(df[col])\n df_names.append(col)\n if (max_t < max(df[col]['t'])): max_t = max(df[col]['t'])\n\n\nfig, axs = plt.subplots(7)\nfig.set_size_inches(11, 18.5)\nplt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)\nplt.xlabel('Time (ms)')\n\nfor i in range(len(df_sets)):\n if ((df_names[i]).find(phasor) != -1):\n #--- Plot Voltage Magnitude\n axs[0].set_title(df_names[i] + \" - Voltage Magnitude - 3 Phases\")\n VA = []\n VB = []\n VC = []\n keys = (df_sets[i]['v'][0]).keys()\n for j in range(len(df_sets[i]['v'])):\n VA.append(df_sets[i]['v'][j]['VA'][0])\n VB.append(df_sets[i]['v'][j]['VB'][0])\n VC.append(df_sets[i]['v'][j]['VC'][0])\n axs[0].plot(df_sets[i]['t'], VA, '.')\n axs[0].plot(df_sets[i]['t'], VB, '.')\n axs[0].plot(df_sets[i]['t'], VC, '.')\n axs[0].set_xlim(-5, max_t+5)\n axs[0].grid(b=True, which='both', axis='both')\n #--- Plot Voltage Angle\n axs[1].set_title(df_names[i] + \" - Voltage Angle - 3 Phases\")\n VA = []\n VB = []\n VC = []\n keys = (df_sets[i]['v'][0]).keys()\n for j in range(len(df_sets[i]['v'])):\n VA.append(df_sets[i]['v'][j]['VA'][1])\n VB.append(df_sets[i]['v'][j]['VB'][1])\n VC.append(df_sets[i]['v'][j]['VC'][1])\n axs[1].plot(df_sets[i]['t'], VA, '.')\n axs[1].plot(df_sets[i]['t'], VB, '.')\n axs[1].plot(df_sets[i]['t'], VC, '.')\n axs[1].set_xlim(-5, max_t+5)\n axs[1].grid(b=True, which='both', axis='both') \n \n #--- Plot Current Magnitude\n axs[2].set_title(df_names[i] + \" - Current Magnitude - 3 Phases\")\n IA = []\n IB = []\n IC = []\n keys = (df_sets[i]['v'][0]).keys()\n for j in range(len(df_sets[i]['v'])):\n IA.append(df_sets[i]['v'][j]['IA'][0])\n IB.append(df_sets[i]['v'][j]['IB'][0])\n IC.append(df_sets[i]['v'][j]['IC'][0])\n axs[2].plot(df_sets[i]['t'], IA, '.')\n axs[2].plot(df_sets[i]['t'], IB, '.')\n axs[2].plot(df_sets[i]['t'], IC, '.')\n axs[2].set_xlim(-5, max_t+5)\n axs[2].grid(b=True, which='both', axis='both') \n \n #--- Plot Current Angle\n axs[3].set_title(df_names[i] + \" - Current Angle - 3 Phases\")\n IA = []\n IB = []\n IC = []\n keys = (df_sets[i]['v'][0]).keys()\n for j in range(len(df_sets[i]['v'])):\n IA.append(df_sets[i]['v'][j]['IA'][1])\n IB.append(df_sets[i]['v'][j]['IB'][1])\n IC.append(df_sets[i]['v'][j]['IC'][1])\n axs[3].plot(df_sets[i]['t'], IA, '.')\n axs[3].plot(df_sets[i]['t'], IB, '.')\n axs[3].plot(df_sets[i]['t'], IC, '.')\n axs[3].set_xlim(-5, max_t+5)\n axs[3].grid(b=True, which='both', axis='both') \n \n if ((df_names[i]).find(smartmeter) != -1):\n #--- Plot Real Power\n gr_title = df_names[i] + \" - SM Real Power P - Phases:\"\n PA = []\n PB = []\n PC = []\n keys = (df_sets[i]['v'][0]).keys()\n for j in range(len(df_sets[i]['v'])):\n if 'SPA' in (df_sets[i]['v'][j]).keys():\n PA.append(df_sets[i]['v'][j]['SPA'])\n if 'SPB' in (df_sets[i]['v'][j]).keys(): \n PB.append(df_sets[i]['v'][j]['SPB'])\n if 'SPC' in (df_sets[i]['v'][j]).keys():\n PC.append(df_sets[i]['v'][j]['SPC'])\n if(len(df_sets[i]['t']) == len(PA)):\n if 'SPA' in (df_sets[i]['v'][j]).keys():\n axs[4].plot(df_sets[i]['t'], PA, '.')\n if(len(df_sets[i]['t']) == len(PB)): \n if 'SPB' in (df_sets[i]['v'][j]).keys(): \n axs[4].plot(df_sets[i]['t'], PB, '.')\n if(len(df_sets[i]['t']) == len(PC)):\n if 'SPC' in (df_sets[i]['v'][j]).keys():\n axs[4].plot(df_sets[i]['t'], PC, '.')\n \n if (len(PA) > 0):\n gr_title = gr_title + ' A'\n if (len(PB) > 0):\n gr_title = gr_title + ' B'\n if (len(PC) > 0):\n gr_title = gr_title + ' C' \n axs[4].set_title(gr_title) \n axs[4].set_xlim(-5, max_t+5)\n axs[4].grid(b=True, which='both', axis='both') \n \n #--- Plot Voltage Magnitude\n gr_title = df_names[i] + \" - SM Voltage Magnitude - Phases:\"\n VA = []\n VB = []\n VC = []\n keys = (df_sets[i]['v'][0]).keys()\n for j in range(len(df_sets[i]['v'])):\n if 'VA' in (df_sets[i]['v'][j]).keys():\n VA.append(df_sets[i]['v'][j]['VA'])\n if 'VB' in (df_sets[i]['v'][j]).keys(): \n VB.append(df_sets[i]['v'][j]['VB'])\n if 'VC' in (df_sets[i]['v'][j]).keys(): \n VC.append(df_sets[i]['v'][j]['VC'])\n if 'VA' in (df_sets[i]['v'][j]).keys():\n axs[5].plot(df_sets[i]['t'], VA, '.')\n if 'VB' in (df_sets[i]['v'][j]).keys():\n axs[5].plot(df_sets[i]['t'], VB, '.')\n if 'VC' in (df_sets[i]['v'][j]).keys():\n axs[5].plot(df_sets[i]['t'], VC, '.')\n \n if (len(VA) > 0):\n gr_title = gr_title + ' A'\n if (len(VB) > 0):\n gr_title = gr_title + ' B'\n if (len(VC) > 0):\n gr_title = gr_title + ' C' \n axs[5].set_title(gr_title)\n axs[5].set_xlim(-5, max_t+5)\n axs[5].grid(b=True, which='both', axis='both') \n\n #--- Plot Amount of received messages in aperiod of time\n if ((df_names[i]).find(estimator) != -1):\n axs[6].set_title(df_names[i])\n axs[6].plot(df_sets[i]['t'], df_sets[i]['v'], '.')\n axs[6].set_xlim(-5, max_t+5)\n axs[6].grid(b=True, which='both', axis='both')\n \n\nplt.tight_layout()\nfig.savefig('data/Monitor_Small.png', dpi=100)" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.subplots", "matplotlib.pyplot.tight_layout", "pandas.HDFStore", "matplotlib.pyplot.subplots_adjust" ] ]
guillefix/neural-tangents
[ "f22fd3c6b0ee3757b34d2beefe4a7ee4a6cc82d6" ]
[ "examples/function_space.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An example comparing training a neural network with the NTK dynamics.\n\nIn this example, we train a neural network on a small subset of MNIST using an\nMSE loss and SGD. We compare this training with the analytic function space\nprediction using the NTK. Data is loaded using tensorflow datasets.\n\"\"\"\n\nfrom absl import app\nfrom absl import flags\nfrom jax import random\nfrom jax.api import grad\nfrom jax.api import jit\nfrom jax.experimental import optimizers\nimport jax.numpy as np\nimport neural_tangents as nt\nfrom neural_tangents import stax\nfrom examples import datasets\nfrom examples import util\n\n\nflags.DEFINE_float('learning_rate', 1.0,\n 'Learning rate to use during training.')\nflags.DEFINE_integer('train_size', 128,\n 'Dataset size to use for training.')\nflags.DEFINE_integer('test_size', 128,\n 'Dataset size to use for testing.')\nflags.DEFINE_float('train_time', 1000.0,\n 'Continuous time denoting duration of training.')\n\n\nFLAGS = flags.FLAGS\n\nFLAGS = {}\nFLAGS[\"learning_rate\"] = 1.0\nFLAGS[\"train_size\"] = 128\nFLAGS[\"test_size\"] = 128\nFLAGS[\"train_time\"] = 1000.0\nclass Struct:\n def __init__(self, **entries):\n self.__dict__.update(entries)\nFLAGS=Struct(**FLAGS)\n\n#%%\n\ndef main(unused_argv):\n # Build data pipelines.\n print('Loading data.')\n x_train, y_train, x_test, y_test = \\\n datasets.get_dataset('mnist', FLAGS.train_size, FLAGS.test_size)\n\n # Build the network\n init_fn, apply_fn, _ = stax.serial(\n stax.Dense(2048, 1., 0.05),\n # stax.Erf(),\n stax.Relu(),\n stax.Dense(2048, 1., 0.05),\n # stax.Erf(),\n stax.Relu(),\n stax.Dense(10, 1., 0.05))\n\n key = random.PRNGKey(0)\n _, params = init_fn(key, (-1, 784))\n\n # params\n\n # Create and initialize an optimizer.\n opt_init, opt_apply, get_params = optimizers.sgd(FLAGS.learning_rate)\n state = opt_init(params)\n # state\n\n\n # Create an mse loss function and a gradient function.\n loss = lambda fx, y_hat: 0.5 * np.mean((fx - y_hat) ** 2)\n grad_loss = jit(grad(lambda params, x, y: loss(apply_fn(params, x), y)))\n\n # Create an MSE predictor to solve the NTK equation in function space.\n ntk = nt.batch(nt.empirical_ntk_fn(apply_fn), batch_size=4, device_count=0)\n g_dd = ntk(x_train, None, params)\n g_td = ntk(x_test, x_train, params)\n predictor = nt.predict.gradient_descent_mse(g_dd, y_train, g_td)\n # g_dd.shape\n\n m = FLAGS.train_size\n print(m)\n n = m*10\n m_test = FLAGS.test_size\n n_test = m_test*10\n # g_td.shape\n # predictor\n # g_dd\n # type(g_dd)\n # g_dd.shape\n theta = g_dd.transpose((0,2,1,3)).reshape(n,n)\n theta_test = ntk(x_test, None, params).transpose((0,2,1,3)).reshape(n_test,n_test)\n theta_tilde = g_td.transpose((0,2,1,3)).reshape(n_test,n)\n #NNGP\n K = nt.empirical_nngp_fn(apply_fn)(x_train,None,params)\n K = np.kron(theta,np.eye(10))\n K_test = nt.empirical_nngp_fn(apply_fn)(x_test,None,params)\n K_test = np.kron(theta_test,np.eye(10))\n K_tilde = nt.empirical_nngp_fn(apply_fn)(x_test,x_train,params)\n K_tilde = np.kron(theta_tilde,np.eye(10))\n\n decay_matrix = np.eye(n)-scipy.linalg.expm(-t*theta)\n Sigma = K + np.matmul(decay_matrix, np.matmul(K, np.matmul(np.linalg.inv(theta), np.matmul(decay_matrix, theta))) - 2*K)\n\n # K.shape\n theta\n # alpha = np.matmul(np.linalg.inv(K),np.matmul(theta,np.linalg.inv(theta)))\n # y_train\n # alpha = np.matmul(np.linalg.inv(K), y_train.reshape(1280))\n # Sigma = K + np.matmul()\n # K = theta\n sigma_noise = 1.0\n Y = y_train.reshape(n)\n alpha = np.matmul(np.linalg.inv(np.eye(n)*(sigma_noise**2)+K),Y)\n # cov = np.linalg.inv(np.linalg.inv(K)+np.eye(n)/(sigma_noise**2))\n # covi = np.linalg.inv(cov)\n # covi = np.linalg.inv(K)+np.eye(n)/(sigma_noise**2)\n # print(covi)\n # np.linalg.det(K)\n eigs = np.linalg.eigh(K)[0]\n logdetcoviK = np.sum(np.log((eigs+sigma_noise**2) /sigma_noise**2))\n # coviK = np.matmul(covi,K)\n # coviK = np.eye(n) + K/(sigma_noise**2)\n # coviK\n # covi\n # np.linalg.det()\n # KL = 0.5*np.log(np.linalg.det(coviK)) + 0.5*np.trace(np.linalg.inv(coviK)) + 0.5*np.matmul(alpha.T,np.matmul(K,alpha)) - n/2\n KL = 0.5*logdetcoviK + 0.5*np.trace(np.linalg.inv(coviK)) + 0.5*np.matmul(alpha.T,np.matmul(K,alpha)) - n/2\n print(KL)\n\n delta = 2**-10\n bound = (KL+2*np.log(m)+1-np.log(delta))/m\n bound = 1-np.exp(-bound)\n bound\n print(\"bound\", bound)\n\n import numpy\n bigK = numpy.zeros((n+n_test,n+n_test))\n bigK\n bigK[0:n,0:n] = K\n bigK[0:n,n:] = theta_tilde.T\n bigK[n:,0:n] = theta_tilde\n bigK[n:,n:] = theta_test\n init_ntk_f = numpy.random.multivariate_normal(np.zeros(n+n_test),bigK)\n fx_train = init_ntk_f[:n].reshape(m,10)\n fx_test = init_ntk_f[n:].reshape(m_test,10)\n\n # Get initial values of the network in function space.\n # fx_train = apply_fn(params, x_train)\n # fx_test = apply_fn(params, x_test)\n\n # Train the network.\n train_steps = int(FLAGS.train_time // FLAGS.learning_rate)\n print('Training for {} steps'.format(train_steps))\n\n for i in range(train_steps):\n params = get_params(state)\n state = opt_apply(i, grad_loss(params, x_train, y_train), state)\n\n # Get predictions from analytic computation.\n print('Computing analytic prediction.')\n # fx_train, fx_test = predictor(FLAGS.train_time, fx_train, fx_test)\n fx_train, fx_test = predictor(FLAGS.train_time, fx_train, fx_test)\n\n # Print out summary data comparing the linear / nonlinear model.\n util.print_summary('train', y_train, apply_fn(params, x_train), fx_train, loss)\n util.print_summary('test', y_test, apply_fn(params, x_test), fx_test, loss)\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "numpy.zeros" ] ]
Praneethp09/Geo_Test
[ "746323b43e651f1357f27fc2c0c7f5cdd89d5a1a" ]
[ "trimmed_match/design/tests/matched_pairs_rmse_test.py" ]
[ "# Copyright 2020 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Lint: python3\n\"\"\"Tests for ads.amt.geox.trimmed_match.design.\"\"\"\nfrom absl.testing import parameterized\nimport pandas as pd\n\nfrom trimmed_match.design import common_classes\nfrom trimmed_match.design import matched_pairs_rmse\nimport unittest\n\nGeoLevelData = common_classes.GeoLevelData\nGeoXType = common_classes.GeoXType\nGeoLevelPotentialOutcomes = common_classes.GeoLevelPotentialOutcomes\nMatchedPairsRMSE = matched_pairs_rmse.MatchedPairsRMSE\n\n\nclass ConstructPotentialOutcomesTest(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n self._geox_eval_data = pd.DataFrame({\n \"geo\": [1, 2],\n \"response\": [10, 20],\n \"spend\": [10, 20]\n })\n self._budget = 30\n self._hypothesized_iroas = 1\n\n def testGoDark(self):\n potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(\n GeoXType.GO_DARK, self._geox_eval_data,\n (self._budget * 2.0 / self._geox_eval_data.spend.sum()),\n self._hypothesized_iroas)\n expected = {\n 1:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=1, response=10, spend=20),\n treated=GeoLevelData(geo=1, response=0, spend=0)),\n 2:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=2, response=20, spend=40),\n treated=GeoLevelData(geo=2, response=0, spend=0))\n }\n self.assertDictEqual(expected, potential_outcomes)\n\n def testGoDarkWithHeavyUpControl(self):\n potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(\n GeoXType.GO_DARK_TREATMENT_NOT_BAU_CONTROL, self._geox_eval_data,\n (self._budget * 2.0 / self._geox_eval_data.spend.sum()),\n self._hypothesized_iroas)\n expected = {\n 1:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=1, response=20, spend=20),\n treated=GeoLevelData(geo=1, response=0, spend=0)),\n 2:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=2, response=40, spend=40),\n treated=GeoLevelData(geo=2, response=0, spend=0))\n }\n self.assertDictEqual(expected, potential_outcomes)\n\n def testHeavyUp(self):\n potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(\n GeoXType.HEAVY_UP, self._geox_eval_data,\n (self._budget * 2.0 / self._geox_eval_data.spend.sum()),\n self._hypothesized_iroas)\n expected = {\n 1:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=1, response=10, spend=10),\n treated=GeoLevelData(geo=1, response=30.0, spend=30.0)),\n 2:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=2, response=20, spend=20),\n treated=GeoLevelData(geo=2, response=60.0, spend=60.0))\n }\n self.assertDictEqual(expected, potential_outcomes)\n\n def testHeavyDown(self):\n potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(\n GeoXType.HEAVY_DOWN, self._geox_eval_data, self._budget,\n self._hypothesized_iroas)\n expected = {\n 1:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=1, response=10, spend=10),\n treated=GeoLevelData(geo=1, response=0.0, spend=0.0)),\n 2:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=2, response=20, spend=20),\n treated=GeoLevelData(geo=2, response=0.0, spend=0.0))\n }\n self.assertDictEqual(expected, potential_outcomes)\n\n def testHoldBack(self):\n potential_outcomes = matched_pairs_rmse._construct_potential_outcomes(\n GeoXType.HOLD_BACK, self._geox_eval_data,\n (self._budget * 2.0 / self._geox_eval_data.spend.sum()),\n self._hypothesized_iroas)\n expected = {\n 1:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=1, response=10, spend=0.0),\n treated=GeoLevelData(geo=1, response=30.0, spend=20.0)),\n 2:\n GeoLevelPotentialOutcomes(\n controlled=GeoLevelData(geo=2, response=20, spend=0.0),\n treated=GeoLevelData(geo=2, response=60.0, spend=40.0))\n }\n self.assertDictEqual(expected, potential_outcomes)\n\n def testUnknownGeoXType(self):\n \"\"\"Checks an error is raised if the GeoX type is unknown.\"\"\"\n with self.assertRaisesRegex(ValueError, \"Unknown geox_type: \\'UNKNOWN\\'\"):\n matched_pairs_rmse._construct_potential_outcomes(\n \"UNKNOWN\", self._geox_eval_data,\n (self._budget * 2.0 / self._geox_eval_data.spend.sum()),\n self._hypothesized_iroas)\n\n\nclass IsPairedTest(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n self._ordered_list_one = [1, 1, 2, 2, 3, 3]\n self._ordered_list_two = [1, 2, 2, 2, 3, 3]\n\n def testIsPaired(self):\n self.assertTrue(matched_pairs_rmse._is_paired(self._ordered_list_one))\n\n def testIsNotPaired(self):\n self.assertFalse(matched_pairs_rmse._is_paired(self._ordered_list_two))\n\n\nclass MatchedPairsRMSETest(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n self._geo_pairs_eval_data = pd.DataFrame({\n \"geo\": [1, 2, 3, 4],\n \"pair\": [1, 1, 2, 2],\n \"response\": [10, 20, 30, 40],\n \"spend\": [10, 20, 30, 40]\n })\n self._perfect_geo_pairs_eval_data = pd.DataFrame({\n \"geo\": [1, 2, 3, 4],\n \"pair\": [1, 1, 2, 2],\n \"response\": [10, 10, 30, 30],\n \"spend\": [10, 10, 30, 30]\n })\n self._budget = 10\n self._hypothesized_iroas = 1\n\n # TODO(b/147698415): adding a more complex test example here\n\n def AssertEqualGeoLevelData(self, outcome1: GeoLevelData,\n outcome2: GeoLevelData):\n \"\"\"Checks whether two GeoLevelDatas are equal.\"\"\"\n self.assertEqual(outcome1.geo, outcome2.geo)\n self.assertEqual(outcome1.response, outcome2.response)\n self.assertEqual(outcome1.spend, outcome2.spend)\n\n def testHypothesizedIroasNegative(self):\n \"\"\"Checks an error is raised if the hypothesized iROAS is negative.\"\"\"\n with self.assertRaisesRegex(ValueError, \"iROAS must be positive, got -1.0\"):\n MatchedPairsRMSE(GeoXType.GO_DARK, self._geo_pairs_eval_data,\n self._budget, -1.0)\n\n def testGeosNotUnique(self):\n \"\"\"Checks an error is raised if geos are duplicated.\"\"\"\n geo_pairs_eval_data = self._geo_pairs_eval_data.copy()\n geo_pairs_eval_data.loc[geo_pairs_eval_data[\"geo\"] == 3, \"geo\"] = 1\n with self.assertRaisesRegex(ValueError,\n \"Geos are not unique in geo_pairs_eval_data\"):\n MatchedPairsRMSE(GeoXType.GO_DARK, geo_pairs_eval_data, self._budget,\n self._hypothesized_iroas)\n\n def testGeosNotPairedProperly(self):\n \"\"\"Checks an error is raised if geos are not paired properly.\"\"\"\n geo_pairs_eval_data = self._geo_pairs_eval_data.copy()\n geo_pairs_eval_data.loc[geo_pairs_eval_data[\"geo\"] == 3, \"pair\"] = 1\n with self.assertRaisesRegex(\n KeyError, \"Geos in geo_pairs_eval_data are not paired properly\"):\n MatchedPairsRMSE(GeoXType.GO_DARK, geo_pairs_eval_data, self._budget,\n self._hypothesized_iroas)\n\n def testSimulateGeoXDataRandomization(self):\n \"\"\"Checks randomization within the pair.\"\"\"\n mpr = MatchedPairsRMSE(GeoXType.GO_DARK, self._geo_pairs_eval_data,\n self._budget, self._hypothesized_iroas)\n geox_data = mpr._simulate_geox_data(0)\n for pair, value in geox_data.items():\n expected = mpr._paired_geos[pair].values()\n self.assertSetEqual(\n set(expected), set([value.controlled.geo, value.treated.geo]))\n\n def testSimulatedGeoXDataValue(self):\n \"\"\"Checks the data accuracy.\"\"\"\n for geox_type in GeoXType:\n if geox_type == GeoXType.CONTROL:\n continue\n mpr = MatchedPairsRMSE(geox_type, self._geo_pairs_eval_data, self._budget,\n self._hypothesized_iroas)\n geox_data = mpr._simulate_geox_data(0)\n for _, value in geox_data.items():\n treatment_geo = value.treated.geo\n control_geo = value.controlled.geo\n treatment_geo_outcome = mpr._potential_outcomes[treatment_geo].treated\n control_geo_outcome = mpr._potential_outcomes[control_geo].controlled\n self.AssertEqualGeoLevelData(treatment_geo_outcome, value.treated)\n self.AssertEqualGeoLevelData(control_geo_outcome, value.controlled)\n\n def testReportValueError(self):\n mpr = MatchedPairsRMSE(\n GeoXType.HOLD_BACK,\n self._geo_pairs_eval_data,\n self._budget,\n self._hypothesized_iroas,\n base_seed=1000)\n\n @parameterized.parameters((-0.1, -0.2), (0.5, 0.1), (0.25, 0.3))\n def _(self, max_trim_rate, trim_rate):\n with self.assertRaises(ValueError):\n mpr.report(1, max_trim_rate, trim_rate)\n\n def testReportPerfectiROAS(self):\n \"\"\"Checks the calculation with zero RMSE.\"\"\"\n for geox_type in GeoXType:\n if geox_type in [GeoXType.HOLD_BACK, GeoXType.CONTROL, GeoXType.GO_DARK]:\n continue\n mpr = MatchedPairsRMSE(\n geox_type,\n self._geo_pairs_eval_data,\n self._budget,\n self._hypothesized_iroas,\n base_seed=1000)\n (report, _) = mpr.report(num_simulations=100, trim_rate=0.0)\n self.assertEqual(0.0, report)\n\n def testReportPerfectPairs(self):\n \"\"\"Checks the calculation with perfect pairs.\"\"\"\n for geox_type in GeoXType:\n if geox_type == GeoXType.CONTROL:\n continue\n mpr = MatchedPairsRMSE(\n geox_type,\n self._perfect_geo_pairs_eval_data,\n self._budget,\n 0.0,\n base_seed=1000)\n report, _ = mpr.report(num_simulations=100, trim_rate=0.0)\n self.assertEqual(0.0, report)\n\n def testReportNoisy(self):\n \"\"\"Checks the calculation with nonzero RMSE.\"\"\"\n mpr = MatchedPairsRMSE(\n GeoXType.HOLD_BACK,\n self._geo_pairs_eval_data,\n self._budget,\n self._hypothesized_iroas,\n base_seed=100000)\n (report, _) = mpr.report(num_simulations=100, trim_rate=0.0)\n self.assertAlmostEqual(1.5, report, delta=0.1)\n\n def testReportNoisyDifferentGeoOrder(self):\n \"\"\"Checks the calculation with nonzero RMSE when geo_pairs_eval_data order is changed.\"\"\"\n mpr = MatchedPairsRMSE(\n GeoXType.HOLD_BACK,\n self._geo_pairs_eval_data,\n self._budget,\n self._hypothesized_iroas,\n base_seed=100000)\n (report, _) = mpr.report(num_simulations=100)\n mpr_sorted = MatchedPairsRMSE(\n GeoXType.HOLD_BACK,\n self._geo_pairs_eval_data.sort_values(\n by=[\"pair\", \"geo\"], ascending=[True, False]),\n self._budget,\n self._hypothesized_iroas,\n base_seed=100000)\n (report_sorted, _) = mpr_sorted.report(num_simulations=100)\n\n self.assertAlmostEqual(\n abs(report - report_sorted) / report_sorted, 0, delta=0.00001)\n\n def testReportTrimmedPairs(self):\n \"\"\"Checks the reported trimmed pairs in a simulation.\"\"\"\n dataframe = pd.DataFrame({\n \"geo\": [1, 2, 3, 4, 5, 6, 7, 8],\n \"response\": [10, 11, 20, 30, 30, 33, 40, 48],\n \"spend\": [1.0, 2.0, 3.0, 7.0, 3.0, 5.0, 4.0, 9.0],\n \"pair\": [1, 1, 2, 2, 3, 3, 4, 4],\n })\n base_seed = 1000\n trimmed_pairs = {\n GeoXType.GO_DARK: [2, 3],\n GeoXType.HOLD_BACK: [2, 3],\n GeoXType.HEAVY_UP: [3, 4],\n GeoXType.HEAVY_DOWN: [3, 4],\n GeoXType.GO_DARK_TREATMENT_NOT_BAU_CONTROL: [2, 3],\n }\n for geox_type in GeoXType:\n if geox_type == GeoXType.CONTROL:\n continue\n mpr = MatchedPairsRMSE(\n geox_type=geox_type,\n geo_pairs_eval_data=dataframe,\n budget=1.0,\n hypothesized_iroas=0.0,\n base_seed=base_seed)\n _, report = mpr.report(num_simulations=1, trim_rate=0.0)\n self.assertFalse(report.trimmed_pairs.values[0])\n _, report = mpr.report(\n num_simulations=1, trim_rate=0.25, max_trim_rate=0.25)\n\n self.assertCountEqual(report.trimmed_pairs.values[0],\n trimmed_pairs[geox_type])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "pandas.DataFrame" ] ]
mcclow12/chatbot
[ "63d3ca2102d30e9af7b4e4e8a43af3041e578ceb" ]
[ "deploy/recommender_app/app/recommender.py" ]
[ "from heapq import nlargest\n\nimport numpy as np\nfrom pyxdameraulevenshtein import damerau_levenshtein_distance\n#\nimport pickle\n#\n\n\nclass Recommender:\n def __init__(self):\n\n self.V = np.load('./V.npy')\n self.title_to_id = pickle.load(open('title_to_id.p', 'rb'))\n self.movieId_to_column = pickle.load(open('movieId_to_column.p', 'rb'))\n self.column_to_movieId = pickle.load(open('column_to_movieId.p', 'rb'))\n self.id_to_title = pickle.load(open('id_to_title.p', 'rb'))\n self.movies = pickle.load(open('movies.p', 'rb'))\n self.genre_dict = pickle.load(open('genre_dict.p', 'rb'))\n\n\n def make_recommendations(self, movie_ratings):\n query_ratings = []\n input_columns = []\n for movie, rating in movie_ratings.items():\n movieId = self.title_to_id[movie]\n movie_column = self.movieId_to_column[movieId]\n input_columns.append(movie_column)\n\n for j in range(self.V.shape[1]):\n v_query = self.V[:, j]\n query_rating = 0\n for movie, rating in movie_ratings.items():\n movieId = self.title_to_id[movie]\n movie_column = self.movieId_to_column[movieId]\n v_seen = self.V[:, movie_column]\n sim = self._get_similarity(v_query, v_seen, j, movie_column, rating)\n query_rating += rating*sim\n\n query_ratings.append(query_rating)\n\n recommended_columns = nlargest(\n 15,\n [\n (rating, movie_column)\n for movie_column, rating in enumerate(query_ratings)\n if movie_column not in input_columns\n ],\n )\n\n recommended_movies = [self.column_to_movieId[r[1]] for r in recommended_columns]\n recommended_movies = [self.id_to_title[r] for r in recommended_movies]\n return recommended_movies\n\n def _get_similarity(self, v_query, v_seen, query_column, seen_column, rating):\n cos_dist = (\n v_query.dot(v_seen)\n / np.linalg.norm(v_query)\n / np.linalg.norm(v_seen)\n )\n if rating == 1:\n common_genres = self.genre_dict[query_column].intersection(\n self.genre_dict[seen_column]\n )\n scale = 0.7 + 0.3 * min((len(common_genres), 2))\n cos_dist *= scale\n\n return cos_dist\n\n #Should movie search into another module\n def find_movie_matches(self, input_str):\n input_str = input_str.lower()\n input_title = input_str.split('\"')[1].strip()\n input_title_split = input_title.split()\n matches, edit1, edit2, edit3 = [], [], [], []\n for title in self.movies:\n no_date_title = title[:-7].lower().strip()\n readable_title = self.make_readable(no_date_title)\n if input_title in [no_date_title, readable_title]:\n return [title]\n flag = True\n for word in input_title_split:\n if word not in no_date_title:\n flag = False\n break\n if flag:\n matches.append(title)\n if \"Terminator, The\" in title:\n print(input_title, readable_title)\n edit_distance = damerau_levenshtein_distance(input_title, readable_title)\n if edit_distance==1:\n edit1.append(title)\n elif edit_distance==2:\n edit2.append(title)\n elif edit_distance==3:\n edit3.append(title)\n if len(matches)>0:\n return matches\n else:\n edit_distances = edit1 + edit2 + edit3\n return edit_distances[:6]\n\n def make_readable(self, input_str):\n \"\"\"Transforms titles like 'Terminator, The' -> 'The Terminator'\"\"\"\n comma_split = input_str.split(',')\n if len(comma_split)>1 and ' ' not in comma_split[-1].strip():\n return \"\".join([comma_split[-1].strip(), ' ', *comma_split[:-1]])\n return input_str\n \n\n\n\n" ]
[ [ "numpy.linalg.norm", "numpy.load" ] ]
Wayne-Zen/Microbiome-Pipeline
[ "9b188975eb873312c90012dba1d2d1ca44ce44ed" ]
[ "my-data/preprocess_for_galaxy.py" ]
[ "import os\nimport sys\nimport pandas as pd\nfrom itertools import islice\n\n#trim primer\n#change header\n#count sequence number\ndef preprocess_for_galaxy(meta_data_dir, input_dir, output_dir):\n meta_data = pd.read_csv(os.path.join(meta_data_dir, 'meta_data.txt') , sep='\\t')\n \n cnt_list = []\n for i in meta_data.index:\n # forward\n cnt1 = 0\n with open(os.path.join(input_dir, meta_data.ix[i, 'OriginalForwardFileName'])) as fi, \\\n open(os.path.join(output_dir, meta_data.ix[i, 'GalaxyForwardFileName']), 'w') as fo:\n while True:\n next_n = list(islice(fi, 4))\n if not next_n:\n break\n cnt1 += 1\n fo.write('@{}_{}\\n'.format(meta_data.ix[i, '#SampleID'], cnt1))\n fo.write(next_n[1][len(meta_data.ix[i, 'LinkerPrimerSequence']):])\n fo.write(next_n[2])\n fo.write(next_n[3][len(meta_data.ix[i, 'LinkerPrimerSequence']):])\n # reverse\n cnt2 = 0\n with open(input_dir + '/' + meta_data.ix[i, 'OriginalReverseFileName']) as fi, \\\n open(output_dir + '/' + meta_data.ix[i, 'GalaxyReverseFileName'], 'w') as fo:\n while True:\n next_n = list(islice(fi, 4))\n if not next_n:\n break\n cnt2 += 1\n fo.write('@{}_{}\\n'.format(meta_data.ix[i, '#SampleID'], cnt2))\n fo.write(next_n[1][len(meta_data.ix[i, 'ReversePrimer']):])\n fo.write(next_n[2])\n fo.write(next_n[3][len(meta_data.ix[i, 'ReversePrimer']):])\n \n if cnt1 != cnt2:\n raise Exception(\"ERROR: forward sequence number is not equal to reverse sequence number\")\n cnt_list.append(cnt1)\n \n # build sequence_count.txt\n cnt_df = pd.concat([meta_data[['#SampleID']], pd.DataFrame({'Original': cnt_list})], axis=1)\n cnt_df.to_csv(os.path.join(meta_data_dir, 'original_sequence_count.txt'), sep='\\t', index=False)\n \nif __name__ == \"__main__\":\n meta_data_dir = os.path.join('projects', sys.argv[1], 'meta_data')\n input_dir = os.path.join('projects', sys.argv[1], 'sequence_data_original')\n output_dir = os.path.join('projects', sys.argv[1], 'sequence_data_galaxy')\n preprocess_for_galaxy(meta_data_dir, input_dir, output_dir)\n \n" ]
[ [ "pandas.DataFrame" ] ]
xiaofengShi/Gans
[ "03938440f5f83da22602a0b3b78d689f310c1a95" ]
[ "pytorch_sketch2paints/util/image_pool.py" ]
[ "import random\nimport torch\n\n\"\"\" \nThis program desigined to make the imagepool\nthe return_images contains many images, each image is the generated image and the num is pool size\nreturn is a fake image but maybe not generated by this batch input image\n\"\"\"\nclass ImagePool():\n def __init__(self, pool_size):\n self.pool_size = pool_size\n if self.pool_size > 0:\n self.num_imgs = 0\n self.images = []\n\n def query(self, images):\n if self.pool_size == 0:\n return images\n return_images = []\n \n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size:\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5:\n random_id = random.randint(0, self.pool_size - 1) # randint is inclusive\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else:\n return_images.append(image)\n return_images = torch.cat(return_images, 0)\n return return_images\n" ]
[ [ "torch.cat", "torch.unsqueeze" ] ]
chessking94/LichessLastMonth
[ "06ac8b0e826088ffcf02d4aa611b75523fb5e906" ]
[ "DownloadLastMonthLichess.py" ]
[ "import pyodbc as sql\nimport pandas as pd\nimport datetime as dt\nfrom urllib import request, error\nimport requests\nimport os\nimport json\n\ndef main(): \n my_games_flag = 0 # 0/1 value; if 0 use below user, if 1 use file\n\n conn = sql.connect('Driver={ODBC Driver 17 for SQL Server};Server=HUNT-PC1;Database=ChessAnalysis;Trusted_Connection=yes;') \n if my_games_flag == 1: \n qry_text = \"SELECT ISNULL(LastName, '') + ISNULL(FirstName, '') AS PlayerName, Username FROM UsernameXRef WHERE EEHFlag = 1 AND Source = 'Lichess'\"\n else:\n qry_text = \"SELECT ISNULL(LastName, '') + ISNULL(FirstName, '') AS PlayerName, Username FROM UsernameXRef WHERE EEHFlag = 0 AND Source = 'Lichess' AND DownloadFlag = 1\"\n users = pd.read_sql(qry_text, conn).values.tolist()\n rec_ct = len(users)\n if rec_ct == 0:\n conn.close()\n print('No users selected to download!')\n quit()\n conn.close()\n \n today = dt.date.today()\n first = today.replace(day=1)\n lastmonth = first - dt.timedelta(days=1)\n start_dte = dt.datetime(year=lastmonth.year, month=lastmonth.month, day=1, hour=0, minute=0, second=0)\n end_dte = dt.datetime(year=today.year, month=today.month, day=1, hour=0, minute=0, second=0)\n # because I'm lazy I'll hard-code the milli/micro/nanoseconds\n utc_start = str(int(start_dte.replace(tzinfo=dt.timezone.utc).timestamp())) + '000'\n utc_end = str(int(end_dte.replace(tzinfo=dt.timezone.utc).timestamp())) + '000'\n yyyy = lastmonth.strftime('%Y')\n mm = lastmonth.strftime('%m')\n\n dload_path = r'C:\\Users\\eehunt\\Documents\\Chess\\Scripts\\TempProcessing'\n for i in users:\n # get auth token\n fpath = r'C:\\Users\\eehunt\\Repository'\n fname = 'keys.json'\n with open(os.path.join(fpath, fname), 'r') as f:\n json_data = json.load(f)\n token_value = json_data.get('LichessAPIToken')\n\n dload_url = 'https://lichess.org/api/games/user/' + i[1] + '?since=' + utc_start + '&until=' + utc_end\n dload_name = i[1] + '_' + str(yyyy) + str(mm) + '.pgn'\n dload_file = os.path.join(dload_path, dload_name)\n hdr = {'Authorization': 'Bearer ' + token_value}\n # add error handling if url is bad\n with requests.get(dload_url, headers=hdr, stream=True) as resp:\n with open(dload_file, 'wb') as f:\n for chunk in resp.iter_content(chunk_size=8196):\n f.write(chunk)\n\n # verify files exist to continue processing\n file_list = [f for f in os.listdir(dload_path) if os.path.isfile(os.path.join(dload_path, f))]\n if len(file_list) > 0:\n # merge and clean pgn\n if rec_ct == 1:\n merge_name = dload_name\n clean_name = 'Lichess_' + users[0][1] + '_' + str(yyyy) + str(mm) + '.pgn'\n else:\n merge_name = 'LichessMerged_Multiple_' + str(yyyy) + str(mm) + '.pgn'\n clean_name = 'Lichess_Multiple_' + str(yyyy) + str(mm) + '.pgn'\n cmd_text = 'copy /B *.pgn ' + merge_name # /B will avoid the random extra char at the end\n if os.getcwd != dload_path:\n os.chdir(dload_path)\n os.system('cmd /C ' + cmd_text)\n cmd_text = 'pgn-extract -C -N -V -D -pl2 --quiet --nosetuptags --output ' + clean_name + ' ' + merge_name\n if os.getcwd != dload_path:\n os.chdir(dload_path)\n os.system('cmd /C ' + cmd_text)\n\n # delete old files\n dir_files = [f for f in os.listdir(dload_path) if os.path.isfile(os.path.join(dload_path, f))]\n for filename in dir_files:\n if filename != clean_name:\n fname_relpath = os.path.join(dload_path, filename)\n os.remove(fname_relpath)\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "pandas.read_sql" ] ]
odednoam/Adafruit_Blinka_Displayio
[ "3329de4221e8edbfd235e01c826528eccc1779bb" ]
[ "displayio/display.py" ]
[ "# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams for Adafruit Industries\n#\n# SPDX-License-Identifier: MIT\n\n\"\"\"\n`displayio.display`\n================================================================================\n\ndisplayio for Blinka\n\n**Software and Dependencies:**\n\n* Adafruit Blinka:\n https://github.com/adafruit/Adafruit_Blinka/releases\n\n* Author(s): Melissa LeBlanc-Williams\n\n\"\"\"\n\nimport time\nimport struct\nimport threading\nimport digitalio\nfrom PIL import Image\nimport numpy\nfrom recordclass import recordclass\nfrom displayio.colorconverter import ColorConverter\n\n__version__ = \"0.0.0-auto.0\"\n__repo__ = \"https://github.com/adafruit/Adafruit_Blinka_displayio.git\"\n\nRectangle = recordclass(\"Rectangle\", \"x1 y1 x2 y2\")\ndisplays = []\n\nBACKLIGHT_IN_OUT = 1\nBACKLIGHT_PWM = 2\n\n\nclass Display:\n # pylint: disable=too-many-instance-attributes\n \"\"\"This initializes a display and connects it into CircuitPython. Unlike other objects\n in CircuitPython, Display objects live until ``displayio.release_displays()`` is called.\n This is done so that CircuitPython can use the display itself.\n\n Most people should not use this class directly. Use a specific display driver instead\n that will contain the initialization sequence at minimum.\n \"\"\"\n\n def __init__(\n self,\n display_bus,\n init_sequence,\n *,\n width,\n height,\n colstart=0,\n rowstart=0,\n rotation=0,\n color_depth=16,\n grayscale=False,\n pixels_in_byte_share_row=True,\n bytes_per_cell=1,\n reverse_pixels_in_byte=False,\n set_column_command=0x2A,\n set_row_command=0x2B,\n write_ram_command=0x2C,\n set_vertical_scroll=0,\n backlight_pin=None,\n brightness_command=None,\n brightness=1.0,\n auto_brightness=False,\n single_byte_bounds=False,\n data_as_commands=False,\n auto_refresh=True,\n native_frames_per_second=60\n ):\n # pylint: disable=unused-argument,too-many-locals\n \"\"\"Create a Display object on the given display bus (`displayio.FourWire` or\n `displayio.ParallelBus`).\n\n The ``init_sequence`` is bitpacked to minimize the ram impact. Every command begins\n with a command byte followed by a byte to determine the parameter count and if a\n delay is need after. When the top bit of the second byte is 1, the next byte will be\n the delay time in milliseconds. The remaining 7 bits are the parameter count\n excluding any delay byte. The third through final bytes are the remaining command\n parameters. The next byte will begin a new command definition. Here is a portion of\n ILI9341 init code:\n\n .. code-block:: python\n\n init_sequence = (\n b\"\\\\xE1\\\\x0F\\\\x00\\\\x0E\\\\x14\\\\x03\\\\x11\\\\x07\\\\x31\\\n\\\\xC1\\\\x48\\\\x08\\\\x0F\\\\x0C\\\\x31\\\\x36\\\\x0F\"\n b\"\\\\x11\\\\x80\\\\x78\" # Exit Sleep then delay 0x78 (120ms)\n b\"\\\\x29\\\\x80\\\\x78\" # Display on then delay 0x78 (120ms)\n )\n display = displayio.Display(display_bus, init_sequence, width=320, height=240)\n\n The first command is 0xE1 with 15 (0x0F) parameters following. The second and third\n are 0x11 and 0x29 respectively with delays (0x80) of 120ms (0x78) and no parameters.\n Multiple byte literals (b”“) are merged together on load. The parens are needed to\n allow byte literals on subsequent lines.\n\n The initialization sequence should always leave the display memory access inline with\n the scan of the display to minimize tearing artifacts.\n \"\"\"\n print(\"Initializing display, color_depth\", color_depth)\n print(\"Initializing display, colstart\", colstart, \"rowstart\", rowstart)\n self._bus = display_bus\n self._set_column_command = set_column_command\n self._set_row_command = set_row_command\n self._write_ram_command = write_ram_command\n self._brightness_command = brightness_command\n self._data_as_commands = data_as_commands\n self._single_byte_bounds = single_byte_bounds\n self._width = width\n self._height = height\n self._colstart = colstart\n self._rowstart = rowstart\n self._rotation = rotation\n self._auto_brightness = auto_brightness\n self._brightness = 1.0\n self._auto_refresh = auto_refresh\n self._initialize(init_sequence)\n self._buffer = Image.new(\"RGB\" if not grayscale else \"L\", (width, height))\n self._subrectangles = []\n self._bounds_encoding = \">BB\" if single_byte_bounds else \">HH\"\n self._current_group = None\n self._color_depth = color_depth\n displays.append(self)\n self._refresh_thread = None\n if self._auto_refresh:\n self.auto_refresh = True\n self._colorconverter = ColorConverter()\n\n self._backlight_type = None\n if backlight_pin is not None:\n try:\n from pulseio import PWMOut # pylint: disable=import-outside-toplevel\n\n # 100Hz looks decent and doesn't keep the CPU too busy\n self._backlight = PWMOut(backlight_pin, frequency=100, duty_cycle=0)\n self._backlight_type = BACKLIGHT_PWM\n except ImportError:\n # PWMOut not implemented on this platform\n pass\n if self._backlight_type is None:\n self._backlight_type = BACKLIGHT_IN_OUT\n self._backlight = digitalio.DigitalInOut(backlight_pin)\n self._backlight.switch_to_output()\n self.brightness = brightness\n\n def _initialize(self, init_sequence):\n i = 0\n while i < len(init_sequence):\n command = init_sequence[i]\n data_size = init_sequence[i + 1]\n delay = (data_size & 0x80) > 0\n data_size &= ~0x80\n\n self._write(command, init_sequence[i + 2 : i + 2 + data_size])\n delay_time_ms = 10\n if delay:\n data_size += 1\n delay_time_ms = init_sequence[i + 1 + data_size]\n if delay_time_ms == 255:\n delay_time_ms = 500\n time.sleep(delay_time_ms / 1000)\n i += 2 + data_size\n\n def _write(self, command, data):\n self._bus.begin_transaction()\n if self._data_as_commands:\n if command is not None:\n self._bus.send(True, bytes([command]), toggle_every_byte=True)\n self._bus.send(command is not None, data)\n else:\n self._bus.send(True, bytes([command]), toggle_every_byte=True)\n self._bus.send(False, data)\n self._bus.end_transaction()\n\n def _release(self):\n self._bus._release() # pylint: disable=protected-access\n self._bus = None\n\n def show(self, group):\n \"\"\"Switches to displaying the given group of layers. When group is None, the\n default CircuitPython terminal will be shown.\n \"\"\"\n self._current_group = group\n\n def refresh(self, *, target_frames_per_second=60, minimum_frames_per_second=1):\n # pylint: disable=unused-argument\n \"\"\"When auto refresh is off, waits for the target frame rate and then refreshes the\n display, returning True. If the call has taken too long since the last refresh call\n for the given target frame rate, then the refresh returns False immediately without\n updating the screen to hopefully help getting caught up.\n\n If the time since the last successful refresh is below the minimum frame rate, then\n an exception will be raised. Set minimum_frames_per_second to 0 to disable.\n\n When auto refresh is on, updates the display immediately. (The display will also\n update without calls to this.)\n \"\"\"\n self._subrectangles = []\n\n # Go through groups and and add each to buffer\n if self._current_group is not None:\n buffer = Image.new(\"RGBA\", (self._width, self._height))\n # Recursively have everything draw to the image\n self._current_group._fill_area(buffer) # pylint: disable=protected-access\n # save image to buffer (or probably refresh buffer so we can compare)\n self._buffer.paste(buffer)\n\n if self._current_group is not None:\n # Eventually calculate dirty rectangles here\n self._subrectangles.append(Rectangle(0, 0, self._width, self._height))\n\n for area in self._subrectangles:\n self._refresh_display_area(area)\n\n def _refresh_loop(self):\n while self._auto_refresh:\n self.refresh()\n\n def _refresh_display_area(self, rectangle):\n \"\"\"Loop through dirty rectangles and redraw that area.\"\"\"\n\n if self._color_depth == 16:\n img = self._buffer.convert(\"RGB\").crop(rectangle)\n img = img.rotate(self._rotation, expand=True)\n\n display_rectangle = self._apply_rotation(rectangle)\n img = img.crop(self._clip(display_rectangle))\n data = numpy.array(img).astype(\"uint16\")\n pixel_data = (\n (data[:, :, 0] & 0xF0) | ((data[:, :, 1] & 0x0F)>> 4)\n )\n\n pixels = bytes(\n numpy.dstack(color).flatten().tolist()\n )\n elif self._color_depth == 4:\n img = self._buffer.convert(\"L\").crop(rectangle)\n img = img.rotate(self._rotation, expand=True)\n\n display_rectangle = self._apply_rotation(rectangle)\n img = img.crop(self._clip(display_rectangle))\n data = numpy.flipud(numpy.array(img).astype(\"uint8\"))\n pixel_data = (\n (data[::2] & 0xF0)>>4 | ((data[1::2] & 0xF0)>> 0)\n )\n\n pixels = bytes(\n numpy.dstack(pixel_data).flatten().tolist()\n )\n else:\n raise ValueError(\"Unsupported color depth\")\n\n self._write(\n self._set_column_command,\n self._encode_pos(\n display_rectangle.x1 + self._colstart,\n display_rectangle.x2 + self._colstart - 1,\n ),\n )\n\n self._write(\n self._set_row_command,\n self._encode_pos(\n display_rectangle.y1 + self._rowstart,\n display_rectangle.y2 + self._rowstart - 1,\n ),\n )\n\n if self._data_as_commands:\n self._write(None, pixels)\n else:\n self._write(self._write_ram_command, pixels)\n\n def _clip(self, rectangle):\n if self._rotation in (90, 270):\n width, height = self._height, self._width\n else:\n width, height = self._width, self._height\n\n if rectangle.x1 < 0:\n rectangle.x1 = 0\n if rectangle.y1 < 0:\n rectangle.y1 = 0\n if rectangle.x2 > width:\n rectangle.x2 = width\n if rectangle.y2 > height:\n rectangle.y2 = height\n\n return rectangle\n\n def _apply_rotation(self, rectangle):\n \"\"\"Adjust the rectangle coordinates based on rotation\"\"\"\n if self._rotation == 90:\n return Rectangle(\n self._height - rectangle.y2,\n rectangle.x1,\n self._height - rectangle.y1,\n rectangle.x2,\n )\n if self._rotation == 180:\n return Rectangle(\n self._width - rectangle.x2,\n self._height - rectangle.y2,\n self._width - rectangle.x1,\n self._height - rectangle.y1,\n )\n if self._rotation == 270:\n return Rectangle(\n rectangle.y1,\n self._width - rectangle.x2,\n rectangle.y2,\n self._width - rectangle.x1,\n )\n return rectangle\n\n def _encode_pos(self, x, y):\n \"\"\"Encode a postion into bytes.\"\"\"\n return struct.pack(self._bounds_encoding, x, y)\n\n def fill_row(self, y, buffer):\n \"\"\"Extract the pixels from a single row\"\"\"\n for x in range(0, self._width):\n _rgb_565 = self._colorconverter.convert(self._buffer.getpixel((x, y)))\n buffer[x * 2] = (_rgb_565 >> 8) & 0xFF\n buffer[x * 2 + 1] = _rgb_565 & 0xFF\n return buffer\n\n @property\n def auto_refresh(self):\n \"\"\"True when the display is refreshed automatically.\"\"\"\n return self._auto_refresh\n\n @auto_refresh.setter\n def auto_refresh(self, value):\n self._auto_refresh = value\n if self._refresh_thread is None:\n self._refresh_thread = threading.Thread(\n target=self._refresh_loop, daemon=True\n )\n if value and not self._refresh_thread.is_alive():\n # Start the thread\n self._refresh_thread.start()\n elif not value and self._refresh_thread.is_alive():\n # Stop the thread\n self._refresh_thread.join()\n\n @property\n def brightness(self):\n \"\"\"The brightness of the display as a float. 0.0 is off and 1.0 is full `brightness`.\n When `auto_brightness` is True, the value of `brightness` will change automatically.\n If `brightness` is set, `auto_brightness` will be disabled and will be set to False.\n \"\"\"\n return self._brightness\n\n @brightness.setter\n def brightness(self, value):\n if 0 <= float(value) <= 1.0:\n self._brightness = value\n if self._backlight_type == BACKLIGHT_IN_OUT:\n self._backlight.value = round(self._brightness)\n elif self._backlight_type == BACKLIGHT_PWM:\n self._backlight.duty_cycle = self._brightness * 65535\n elif self._brightness_command is not None:\n self._write(self._brightness_command, round(value * 255))\n else:\n raise ValueError(\"Brightness must be between 0.0 and 1.0\")\n\n @property\n def auto_brightness(self):\n \"\"\"True when the display brightness is adjusted automatically, based on an ambient\n light sensor or other method. Note that some displays may have this set to True by\n default, but not actually implement automatic brightness adjustment.\n `auto_brightness` is set to False if `brightness` is set manually.\n \"\"\"\n return self._auto_brightness\n\n @auto_brightness.setter\n def auto_brightness(self, value):\n self._auto_brightness = value\n\n @property\n def width(self):\n \"\"\"Display Width\"\"\"\n return self._width\n\n @property\n def height(self):\n \"\"\"Display Height\"\"\"\n return self._height\n\n @property\n def rotation(self):\n \"\"\"The rotation of the display as an int in degrees.\"\"\"\n return self._rotation\n\n @rotation.setter\n def rotation(self, value):\n if value not in (0, 90, 180, 270):\n raise ValueError(\"Rotation must be 0/90/180/270\")\n self._rotation = value\n\n @property\n def bus(self):\n \"\"\"Current Display Bus\"\"\"\n return self._bus\n" ]
[ [ "numpy.dstack", "numpy.array" ] ]
BrunoSanchez/capsule_N1
[ "a5ee3b74afc27de1a954ae2f9f96c278a4723226" ]
[ "tests/test_plot.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# test_plot.py\n#\n# Copyright 2017 Bruno S <[email protected]>\n#\n# This file is part of ProperImage (https://github.com/toros-astro/ProperImage)\n# License: BSD-3-Clause\n# Full Text: https://github.com/toros-astro/ProperImage/blob/master/LICENSE.txt\n#\n\n\"\"\"\ntest_plot module from ProperImage\nfor analysis of astronomical images\n\nWritten by Bruno SANCHEZ\n\nPhD of Astromoy - UNC\[email protected]\n\nInstituto de Astronomia Teorica y Experimental (IATE) UNC\nCordoba - Argentina\n\nOf 301\n\"\"\"\n\n# el siguiente código va a ser escrito como funciones. Esto se trata mas que\n# nada por que los plots son mucho mas fáciles de probar con esta forma.\n# lo ideal es qur todos los tests sigan esta forma si usamos pytest.\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nfrom unittest import mock\n\nfrom astropy.stats import sigma_clipped_stats\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.testing.decorators import check_figures_equal\n\nimport numpy as np\n\nfrom properimage import plot\n\nimport pytest\n\n# =============================================================================\n# TEST PRIMES\n# =============================================================================\n\n\[email protected](\n \"test_input, expected\", [(9, 3), (45045, 143), (3, 3), (1, 1)]\n)\ndef test_primes(test_input, expected):\n assert plot.primes(test_input) == expected\n\n\n# =============================================================================\n# TEST API\n# =============================================================================\n\n\n@check_figures_equal()\ndef test_plot_default(random_simage, fig_test, fig_ref):\n img = random_simage\n\n # fig test\n test_ax = fig_test.subplots()\n img.plot(ax=test_ax)\n\n # expected\n exp_ax = fig_ref.subplots()\n img.plot.imshow(ax=exp_ax)\n\n\ndef test_plot_invalid_plot(random_simage):\n img = random_simage\n with pytest.raises(ValueError):\n img.plot(\"_foo\")\n\n with pytest.raises(ValueError):\n img.plot(\"foo\")\n\n with pytest.raises(ValueError):\n img.plot(\"si\")\n\n\n# =============================================================================\n# imshow\n# =============================================================================\n\n\ndef test_plot_imshow_default_ax(random_simage):\n img = random_simage\n ax = img.plot.imshow()\n assert ax is plt.gca()\n\n\n@check_figures_equal()\ndef test_plot_imshow(random_simage, fig_test, fig_ref):\n img = random_simage\n\n # fig test\n test_ax = fig_test.subplots()\n img.plot.imshow(ax=test_ax)\n\n # expected\n exp_ax = fig_ref.subplots()\n exp_ax.imshow(img.data, origin=\"lower\")\n exp_ax.set_title(f\"SingleImage {img.data.shape}\")\n\n\n@check_figures_equal()\ndef test_plot_imshow_str(random_simage, fig_test, fig_ref):\n img = random_simage\n\n # fig test\n test_ax = fig_test.subplots()\n img.plot(\"imshow\", ax=test_ax)\n\n # expected\n exp_ax = fig_ref.subplots()\n img.plot.imshow(ax=exp_ax)\n\n\n# =============================================================================\n# TEST auto_psf\n# =============================================================================\n\n\ndef test_plot_autopsf_default_axes(random_4psf_simage):\n simg = random_4psf_simage\n\n a_fields, psf_basis = simg.get_variable_psf(inf_loss=0.15)\n axs = simg.plot.autopsf(inf_loss=0.15)\n\n assert np.size(axs) >= len(psf_basis)\n\n\ndef test_plot_autopsf_too_few_axis(random_4psf_simage):\n simg = random_4psf_simage\n with pytest.raises(ValueError):\n simg.plot.autopsf(inf_loss=0.15, axs=[plt.gca()])\n\n\n@check_figures_equal()\ndef test_plot_autopsf(random_4psf_simage, fig_test, fig_ref):\n simg = random_4psf_simage\n\n # expected\n a_fields, psf_basis = simg.get_variable_psf(inf_loss=0.15)\n\n xsh, ysh = psf_basis[0].shape\n\n N = len(psf_basis)\n p = plot.primes(N)\n\n if N == 2:\n subplots = (1, 2)\n if N == 3:\n subplots = (1, 3)\n elif p == N:\n subplots = (round(np.sqrt(N)), round(np.sqrt(N) + 1))\n else:\n rows = N // p\n rows += N % p\n subplots = (rows, p)\n\n height = plot.DEFAULT_HEIGHT * subplots[0]\n width = plot.DEFAULT_WIDTH * subplots[1]\n\n fig_ref.set_size_inches(w=width, h=height)\n exp_axs = fig_ref.subplots(*subplots)\n\n kwargs = {\"interpolation\": \"none\"}\n cmap_kw = {\"shrink\": 0.85}\n iso_kw = {\"colors\": \"black\", \"alpha\": 0.5}\n\n title_tpl = r\"$\\sum p_{j:d} = {sum:4.3e}$\"\n for idx, psf_basis, ax in zip(range(N), psf_basis, np.ravel(exp_axs)):\n\n img = ax.imshow(psf_basis, **kwargs)\n title = title_tpl.format(j=idx + 1, sum=np.sum(psf_basis))\n ax.set_title(title)\n\n fig_ref.colorbar(img, ax=ax, **cmap_kw)\n\n ax.contour(np.arange(xsh), np.arange(ysh), psf_basis, **iso_kw)\n\n # fig test\n fig_test.set_size_inches(w=width, h=height)\n test_axs = fig_test.subplots(*subplots)\n simg.plot.autopsf(axs=test_axs, inf_loss=0.15, iso=True)\n\n\n@check_figures_equal()\ndef test_plot_autopsf_str(random_4psf_simage, fig_test, fig_ref):\n simg = random_4psf_simage\n\n with mock.patch(\"matplotlib.pyplot.gcf\", return_value=fig_test):\n simg.plot(\"autopsf\", inf_loss=0.15, iso=True)\n\n with mock.patch(\"matplotlib.pyplot.gcf\", return_value=fig_ref):\n simg.plot.autopsf(inf_loss=0.15, iso=True)\n\n\n# =============================================================================\n# TEST auto_psf_coef\n# =============================================================================\n\n\ndef test_plot_autopsf_coef_no_coef(random_4psf_simage):\n simg = random_4psf_simage\n\n with pytest.raises(plot.NoDataToPlot):\n simg.plot.autopsf_coef(inf_loss=1.0)\n\n\ndef test_plot_autopsf_coef_default_axes(random_4psf_simage):\n simg = random_4psf_simage\n\n a_fields, psf_basis = simg.get_variable_psf(inf_loss=0.15)\n axs = simg.plot.autopsf_coef(inf_loss=0.15)\n\n assert np.size(axs) >= len(a_fields)\n\n\ndef test_plot_autopsf_coef_too_few_axis(random_4psf_simage):\n simg = random_4psf_simage\n with pytest.raises(ValueError):\n simg.plot.autopsf_coef(inf_loss=0.15, axs=[plt.gca()])\n\n\n@check_figures_equal()\ndef test_plot_autopsf_coef(random_4psf_simage, fig_test, fig_ref):\n simg = random_4psf_simage\n\n # expected\n a_fields, psf_basis = simg.get_variable_psf(inf_loss=0.15)\n x, y = simg.get_afield_domain()\n\n # here we plot\n N = len(a_fields) # axis needed\n p = plot.primes(N)\n\n if N == 2:\n subplots = (1, 2)\n if N == 3:\n subplots = (1, 3)\n elif p == N:\n subplots = (round(np.sqrt(N)), round(np.sqrt(N) + 1))\n else:\n rows = int((N // p) + (N % p))\n subplots = (rows, p)\n\n width = plot.DEFAULT_WIDTH * subplots[0]\n height = plot.DEFAULT_HEIGHT * subplots[1]\n\n fig_ref.set_size_inches(w=width, h=height)\n exp_axs = fig_ref.subplots(*subplots)\n\n cmap_kw = {\"shrink\": 0.75, \"aspect\": 30}\n\n title_tpl = r\"$a_{j}$,$\\sum a_{j}={sum:4.3e}$\"\n for idx, a_field, ax in zip(range(N), a_fields, np.ravel(exp_axs)):\n\n a = a_field(x, y)\n mean, med, std = sigma_clipped_stats(a)\n\n img = ax.imshow(a, vmax=med + 2 * std, vmin=med - 2 * std)\n fig_ref.colorbar(img, ax=ax, **cmap_kw)\n\n title = title_tpl.format(j=idx + 1, sum=np.sqrt(np.sum(a ** 2)))\n ax.set_title(title)\n\n # fig test\n fig_test.set_size_inches(w=width, h=height)\n test_axs = fig_test.subplots(*subplots)\n simg.plot.autopsf_coef(axs=test_axs, inf_loss=0.15)\n\n\n@check_figures_equal()\ndef test_plot_autopsf_coef_str(random_4psf_simage, fig_test, fig_ref):\n simg = random_4psf_simage\n\n with mock.patch(\"matplotlib.pyplot.gcf\", return_value=fig_test):\n simg.plot(\"autopsf_coef\", inf_loss=0.15)\n\n with mock.patch(\"matplotlib.pyplot.gcf\", return_value=fig_ref):\n simg.plot.autopsf_coef(inf_loss=0.15)\n" ]
[ [ "matplotlib.testing.decorators.check_figures_equal", "numpy.sum", "numpy.ravel", "numpy.arange", "numpy.size", "numpy.sqrt", "matplotlib.pyplot.gca" ] ]
collinskatie/mlmi4_MAML_reproduce
[ "3cbda95ca380de11fce63d961de19847752b526c" ]
[ "Multi_Dim_Sine/data.py" ]
[ "\n'''\nData loaders for sinusoid regression with multiple dimensional sines\nHeavily borrowed and modified from: https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb\n'''\n\nimport numpy as np\nimport torch\nfrom math import pi as PI\n\n# using parameters from original MAML Section 5.1 (https://arxiv.org/pdf/1703.03400.pdf)\namp_min=0.1\namp_max=5.0\nphase_min=0\nphase_max=PI\n\nclass SineWaveTask_multi:\n '''\n Multi-dimensional sine wave generator\n Note, structure and code are from https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb\n Custom modifications have been made for scaling to more dimensions, but base is from cited link\n '''\n def __init__(self,dimensions=20):\n self.dimensions = dimensions\n self.a = []\n self.b = []\n for dim in range(self.dimensions):\n self.a.append(np.random.uniform(amp_min, amp_max))\n self.b.append(np.random.uniform(phase_min, phase_max))\n self.train_x = None\n \n def f(self, x,a,b):\n return a * np.sin(x + b)\n \n def training_set(self, size=10, force_new=False):\n if self.train_x is None and not force_new:\n self.train_x = np.random.uniform(-5, 5, size)\n x = self.train_x\n\n elif not force_new:\n x = self.train_x\n else:\n x = np.random.uniform(-5, 5, size)\n\n y = self.f(x,self.a[0],self.b[0])[:,None]\n\n for dim in range(self.dimensions-1):\n y = np.concatenate((y,self.f(x,self.a[dim+1],self.b[dim+1])[:,None]),axis=-1)\n\n return torch.Tensor(x[:,None]), torch.Tensor(y)\n \n def test_set(self, size=50):\n x = np.linspace(-5, 5, size)\n y = self.f(x,self.a[0],self.b[0])[:,None]\n\n for dim in range(self.dimensions-1):\n y = np.concatenate((y,self.f(x,self.a[dim+1],self.b[dim+1])[:,None]),axis=-1)\n\n return torch.Tensor(x[:,None]), torch.Tensor(y)" ]
[ [ "numpy.linspace", "numpy.random.uniform", "numpy.sin", "torch.Tensor" ] ]
TowKnee0/Osu-Mania-Bot
[ "7d027121368788c4dca3dbf2a0b59b71f94b7984" ]
[ "main.py" ]
[ "import cv2\nimport numpy as np\nimport time\nimport key_press\nimport mss\n\nfrom typing import Tuple, List\n\n\nclass OsuManiaBot(object):\n \"\"\" A simple bot for playing Osu mania. Success rate near 100% using the provided skin.\n\n Instance Attributes:\n - bbox: coordinates in the form (x1, y1, x2, y2) of where the bot will look.\n this should ideally have a height of 1-5 pixels as close to the line where\n notes disappear as possible and width of the exact play area.\n - columns: the number of columns of the map\n - _col_states: the current state of each column: pressed or released\n - _codes: mapping of column number to keyboard key. Keybinds should be set\n to 1, 2, 3, 4... for columns, otherwise change the codes. Can\n be found here: https://gist.github.com/dretax/fe37b8baf55bc30e9d63\n - _inplay: whether the bot is currently active\n\n \"\"\"\n\n def __init__(self, bbox: Tuple[int, int, int, int], columns=4):\n self.bbox = bbox\n self.columns = columns\n self._col_states = self._initialize_states()\n self._codes = {0: 0x02,\n 1: 0x03,\n 2: 0x04,\n 3: 0x05,\n 4: 0x06,\n 5: 0x07,\n 6: 0x08,\n 7: 0x09,\n 8: 0x10,\n 9: 0x11}\n self._inplay = False\n\n def _initialize_states(self):\n \"\"\"Return a list of length columns with default values set to\n False representing key not pressed.\n \"\"\"\n\n return [False for _ in range(self.columns)]\n\n def _slice_columns(self, screen: np.array):\n \"\"\"Takes in a 2d array of pixels and slices by column. Returns a list of the\n columns.\n A tolerance value is added to account for self.bbox not being perfect width.\n\n Preconditions:\n - screen should have same number of elements on every row\n \"\"\"\n\n width = len(screen[0]) // self.columns\n tolerance = round(width * 0.2)\n\n cols = [[] for _ in range(self.columns)]\n\n for row in screen:\n for i in range(len(cols)):\n cols[i].extend(row[i * width + tolerance: (i + 1) * width - tolerance])\n\n return cols\n\n def _handle_press(self, cols: List[np.array]):\n \"\"\"Takes in list of pixel sliced by column and handles keypresses based on pixel data.\n\n If all pixels in the column are white and the key is not currently pressed, it will press\n the key corresponding to the column.\n\n If there is one pixel in a column that is not white and they key is currently pressed, it\n will release the corresponding key.\n\n Preconditions:\n - columns pixel data is binary\n \"\"\"\n\n for i, col in enumerate(cols):\n if np.all(col) and not self._col_states[i]:\n key_press.PressKey(self._codes[i])\n self._col_states[i] = True\n elif not np.all(col) and self._col_states[i]:\n key_press.ReleaseKey(self._codes[i])\n self._col_states[i] = False\n\n def test_region(self, bbox: Tuple[int, int, int, int]):\n \"\"\"Displays the screen based on given bbox.\n\n Use this to find the correct bbox before using the bot. Ideally, the bbox should be\n 1-5 pixels tall as close to the horizontal line where notes disappear as possible\n and exact width of play area.\n \"\"\"\n\n with mss.mss() as sct:\n\n while True:\n screen = np.array(sct.grab(bbox))\n gray = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)\n retv, binary = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY)\n\n cv2.imshow('Find the region', binary)\n\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n cv2.destroyAllWindows()\n break\n\n def run(self):\n \"\"\"Runs the bot.\n\n \"\"\"\n\n self._inplay = True\n last = time.time()\n\n with mss.mss() as snap:\n\n while self._inplay:\n\n screen = np.array(snap.grab(self.bbox))\n gray = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)\n retv, binary = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY)\n\n columns = self._slice_columns(binary)\n\n self._handle_press(columns)\n\n print(time.time() - last)\n last = time.time()\n\n if cv2.waitKey(3) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n\n# if __name__ == '__main__':\n\ntime.sleep(2)\nbbox4 = (250, 574, 510, 575)\nbbox7 = (225, 574, 575, 575)\nbot = OsuManiaBot(bbox7, columns=7)\n# bot.test_region(bbox7)\nbot.run()\n" ]
[ [ "numpy.all" ] ]
Artisan-Lab/SMTimer
[ "8e0bbb854afd360dcc61d6b098c4ae8931bae14c" ]
[ "dgl_treelstm/util.py" ]
[ "import torch as th\n\n\ndef extract_root(batch, device, logits):\n g = batch.graph\n root_idx = []\n result_idx = 0\n for idx in g.batch_num_nodes:\n root_idx.append(result_idx)\n result_idx = result_idx + idx\n root_idx = th.LongTensor(root_idx).to(device)\n batch_label = th.index_select(batch.label, 0, root_idx)\n if logits.shape[0] != g.batch_size:\n logits = th.index_select(logits, 0, root_idx)\n return batch_label, logits\n" ]
[ [ "torch.index_select", "torch.LongTensor" ] ]
oplatek/ALI
[ "193b666f62236fa1837613beb807d9dcdf978ce6" ]
[ "experiments/semi_supervised_svhn.py" ]
[ "import argparse\n\nimport numpy\nfrom fuel.datasets import H5PYDataset\nfrom fuel.schemes import ShuffledScheme, ShuffledExampleScheme\nfrom fuel.streams import DataStream\nfrom sklearn.svm import LinearSVC\n\n\ndef main(dataset_path, use_c, log_min, log_max, num_steps):\n train_set = H5PYDataset(\n dataset_path, which_sets=('train',), sources=('features', 'targets'),\n subset=slice(0, 63257), load_in_memory=True)\n train_stream = DataStream.default_stream(\n train_set,\n iteration_scheme=ShuffledExampleScheme(train_set.num_examples))\n\n def get_class_balanced_batch(iterator):\n train_features = [[] for _ in range(10)]\n train_targets = [[] for _ in range(10)]\n batch_size = 0\n while batch_size < 1000:\n f, t = next(iterator)\n t = t[0]\n if len(train_features[t]) < 100:\n train_features[t].append(f)\n train_targets[t].append(t)\n batch_size += 1\n train_features = numpy.vstack(sum(train_features, []))\n train_targets = numpy.vstack(sum(train_targets, []))\n return train_features, train_targets\n\n train_features, train_targets = get_class_balanced_batch(\n train_stream.get_epoch_iterator())\n\n valid_set = H5PYDataset(\n dataset_path, which_sets=('train',), sources=('features', 'targets'),\n subset=slice(63257, 73257), load_in_memory=True)\n valid_features, valid_targets = valid_set.data_sources\n\n test_set = H5PYDataset(\n dataset_path, which_sets=('test',), sources=('features', 'targets'),\n load_in_memory=True)\n test_features, test_targets = test_set.data_sources\n\n if use_c is None:\n best_error_rate = 1.0\n best_C = None\n for log_C in numpy.linspace(log_min, log_max, num_steps):\n C = numpy.exp(log_C)\n svm = LinearSVC(C=C)\n svm.fit(train_features, train_targets.ravel())\n error_rate = 1 - numpy.mean(\n [svm.score(valid_features[1000 * i: 1000 * (i + 1)],\n valid_targets[1000 * i: 1000 * (i + 1)].ravel())\n for i in range(10)])\n if error_rate < best_error_rate:\n best_error_rate = error_rate\n best_C = C\n print('C = {}, validation error rate = {} '.format(C, error_rate) +\n '(best is {}, {})'.format(best_C, best_error_rate))\n else:\n best_C = use_c\n\n error_rates = []\n for _ in range(10):\n train_features, train_targets = get_class_balanced_batch(\n train_stream.get_epoch_iterator())\n svm = LinearSVC(C=best_C)\n svm.fit(train_features, train_targets.ravel())\n error_rates.append(1 - numpy.mean(\n [svm.score(valid_features[1000 * i: 1000 * (i + 1)],\n valid_targets[1000 * i: 1000 * (i + 1)].ravel())\n for i in range(10)]))\n\n print('Validation error rate = {} +- {} '.format(numpy.mean(error_rates),\n numpy.std(error_rates)))\n\n error_rates = []\n for _ in range(100):\n train_features, train_targets = get_class_balanced_batch(\n train_stream.get_epoch_iterator())\n svm = LinearSVC(C=best_C)\n svm.fit(train_features, train_targets.ravel())\n s = 1000 * numpy.sum(\n [svm.score(test_features[1000 * i: 1000 * (i + 1)],\n test_targets[1000 * i: 1000 * (i + 1)].ravel())\n for i in range(26)])\n s += 32 * svm.score(test_features[-32:], test_targets[-32:].ravel())\n s = s / 26032.0\n error_rates.append(1 - s)\n\n print('Test error rate = {} +- {} '.format(numpy.mean(error_rates),\n numpy.std(error_rates)))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"ALI-based semi-supervised \"\n \"training on SVHN\")\n parser.add_argument(\"dataset_path\", type=str,\n help=\"path to the saved main loop\")\n parser.add_argument(\"--use-c\", type=float, default=None,\n help=\"evaluate using a specific C value\")\n parser.add_argument(\"--log-min\", type=float, default=-20,\n help=\"minimum C value in log-space\")\n parser.add_argument(\"--log-max\", type=float, default=20,\n help=\"maximum C value in log-space\")\n parser.add_argument(\"--num-steps\", type=int, default=50,\n help=\"number of values to try\")\n args = parser.parse_args()\n main(args.dataset_path, args.use_c, args.log_min, args.log_max,\n args.num_steps)\n" ]
[ [ "numpy.exp", "numpy.mean", "numpy.std", "numpy.linspace", "sklearn.svm.LinearSVC" ] ]
combofish/chips-get
[ "6005f24d09edda3f1f54c6603205b2f854ec3b3f" ]
[ "Python3/performance/get-pi.py" ]
[ "import sys\nimport numpy as np\nimport random\nimport time\n\ndef estimate_nbr_points_in_quarter_circle(nbr_estimates):\n nbr_trials_in_quarter_unit_circle = 0\n\n for step in range(int(nbr_estimates)):\n x = random.uniform(0,1)\n y = random.uniform(0,1)\n is_in_unit_circle = pow(x,2) + pow(y,2) <= 1.0\n nbr_trials_in_quarter_unit_circle += is_in_unit_circle\n\n return nbr_trials_in_quarter_unit_circle\n\ndef estimate_nbr_points_in_quarter_circle_numpy(nbr_samples):\n np.random.seed()\n xs = np.random.uniform(0, 1, nbr_samples)\n ys = np.random.uniform(0, 1, nbr_samples)\n estimate_inside_quarter_unit_circle = pow(xs, 2) + pow(ys, 2) <= 1\n nbr_trials_in_quarter_unit_circle = np.sum(estimate_inside_quarter_unit_circle)\n\n return nbr_trials_in_quarter_unit_circle\n\nif __name__ == '__main__':\n # arg = 100000\n arg = int(sys.argv[1])\n t1 = time.time()\n pi_estimate = estimate_nbr_points_in_quarter_circle(arg) / arg * 4\n t2 = time.time()\n pi_estimate_numpy = estimate_nbr_points_in_quarter_circle_numpy(arg) / arg * 4\n t3 = time.time()\n \n print(\"pi_estimate = \",pi_estimate, \" time cost : \", t2 - t1)\n print(\"pi_estimate_numpy = \", pi_estimate_numpy, \" time cost : \", t3 - t2)\n" ]
[ [ "numpy.random.seed", "numpy.sum", "numpy.random.uniform" ] ]
LukasBommes/PV-Drone-Inspect
[ "af07a5e5690326837d1e9b26bdbb32f5582e89fd" ]
[ "tests/unit/test_gps.py" ]
[ "import unittest\nimport numpy as np\nfrom hypothesis import given\nimport hypothesis.strategies as some\nimport hypothesis.extra.numpy as some_np\n\nfrom extractor.gps import gps_to_ltp, gps_from_ltp, \\\n interpolate_gps\n\n\nclass TestGps(unittest.TestCase):\n \n @given(\n some_np.arrays(\n dtype=np.float, \n shape=some.tuples(\n some.integers(min_value=1, max_value=5), # 1..5 rows\n some.integers(min_value=3, max_value=3) # 3 columns\n ), \n elements=some.floats(-90, 90)\n )\n )\n def test_gps_to_ltp_consistency(self, gps): \n gps_ltp, origin = gps_to_ltp(gps)\n gps_recovered = gps_from_ltp(gps_ltp, origin)\n\n self.assertTrue(\n np.allclose(\n (gps[0, 1], gps[0, 0], gps[0, 2]), \n origin\n )\n )\n self.assertTrue(\n np.allclose(\n gps, \n gps_recovered\n )\n )\n\n\n def test_gps_interpolation(self):\n gps = np.array([\n [10., 10., 10.], \n [10., 10., 10.], \n [10., 10., 10.],\n [10., 10., 10.],\n [12., 15., 8.],\n [12., 15., 8.],\n [12., 15., 8.],\n [12., 15., 8.],\n [15., 20., 5.],\n [15., 20., 5.],\n [15., 20., 5.],\n [15., 20., 5.],\n [20., 25., 10.],\n [20., 25., 10.],\n [20., 25., 10.],\n [20., 25., 10.]\n ])\n\n gps_interpolated_gt = np.array([\n [10. , 10. , 10. ],\n [10.5 , 11.25, 9.5 ],\n [11. , 12.5 , 9. ],\n [11.5 , 13.75, 8.5 ],\n [12. , 15. , 8. ],\n [12.75, 16.25, 7.25],\n [13.5 , 17.5 , 6.5 ],\n [14.25, 18.75, 5.75],\n [15. , 20. , 5. ],\n [16.25, 21.25, 6.25],\n [17.5 , 22.5 , 7.5 ],\n [18.75, 23.75, 8.75],\n [20. , 25. , 10. ],\n [20. , 25. , 10. ],\n [20. , 25. , 10. ],\n [20. , 25. , 10. ]\n ])\n \n gps_interpolated = interpolate_gps(gps) \n self.assertTrue(\n np.allclose(\n gps_interpolated_gt, \n gps_interpolated\n )\n )" ]
[ [ "numpy.allclose", "numpy.array" ] ]
akshayka/gavel
[ "40a22a725f2e70478483e98c9b07c6fc588e0c40" ]
[ "scheduler/policies/max_min_fairness.py" ]
[ "import os, sys\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\n\nimport cvxpy as cp\nimport numpy as np\n\nfrom policy import Policy, PolicyWithPacking\nfrom proportional import ProportionalPolicy\n\nclass MaxMinFairnessPolicy(Policy):\n\n def __init__(self, solver):\n self._name = 'MaxMinFairness'\n self._max_min_fairness_perf_policy = \\\n MaxMinFairnessPolicyWithPerf(solver)\n\n def get_allocation(self, unflattened_throughputs, scale_factors,\n priority_weights, cluster_spec):\n throughputs, index = super().flatten(unflattened_throughputs,\n cluster_spec)\n if throughputs is None: return None\n (job_ids, worker_types) = index\n\n new_unflattened_throughputs = {}\n for job_id in unflattened_throughputs:\n new_unflattened_throughputs[job_id] = {}\n for worker_type in unflattened_throughputs[job_id]:\n new_unflattened_throughputs[job_id][worker_type] = 1.0\n\n return self._max_min_fairness_perf_policy.get_allocation(\n new_unflattened_throughputs, scale_factors, priority_weights,\n cluster_spec)\n\n\nclass MaxMinFairnessPolicyWithPerf(Policy):\n\n def __init__(self, solver):\n Policy.__init__(self, solver)\n self._name = 'MaxMinFairness_Perf'\n self._proportional_policy = ProportionalPolicy()\n\n def get_allocation(self, unflattened_throughputs, scale_factors,\n unflattened_priority_weights, cluster_spec):\n throughputs, index = super().flatten(unflattened_throughputs,\n cluster_spec)\n if throughputs is None: return None\n (m, n) = throughputs.shape\n (job_ids, worker_types) = index\n\n # Row i of scale_factors_array is the scale_factor of job i\n # repeated len(worker_types) times.\n scale_factors_array = self.scale_factors_array(\n scale_factors, job_ids, m, n)\n\n priority_weights = np.array(\n [1. / unflattened_priority_weights[job_id]\n for job_id in job_ids])\n\n proportional_throughputs = self._proportional_policy.get_throughputs(\n throughputs, index, cluster_spec)\n priority_weights = np.multiply(priority_weights.reshape((m, 1)),\n 1.0 / proportional_throughputs.reshape((m, 1)))\n\n x = cp.Variable(throughputs.shape)\n # Multiply throughputs by scale_factors to ensure that scale_factor\n # is taken into account while allocating times to different jobs.\n # A job run on 1 GPU should receive `scale_factor` more time than\n # a job run on `scale_factor` GPUs if throughputs are equal.\n objective = cp.Maximize(\n cp.min(cp.sum(cp.multiply(\n np.multiply(throughputs * priority_weights.reshape((m, 1)),\n scale_factors_array), x), axis=1)))\n # Make sure that the allocation can fit in the cluster.\n constraints = self.get_base_constraints(x, scale_factors_array)\n cvxprob = cp.Problem(objective, constraints)\n result = cvxprob.solve(solver=self._solver)\n\n if cvxprob.status != \"optimal\":\n print('WARNING: Allocation returned by policy not optimal!')\n\n return super().unflatten(x.value.clip(min=0.0).clip(max=1.0), index)\n\n\nclass MaxMinFairnessPolicyWithPacking(PolicyWithPacking):\n\n def __init__(self, solver):\n PolicyWithPacking.__init__(self, solver)\n self._name = 'MaxMinFairness_Packing'\n self._proportional_policy = ProportionalPolicy()\n\n def get_allocation_using_job_type_throughputs(\n self, unflattened_throughputs, job_id_to_job_type_key,\n scale_factors, unflattened_priority_weights, cluster_spec):\n job_ids = sorted(job_id_to_job_type_key.keys())\n if len(job_ids) == 0:\n return None\n job_type_keys = sorted(unflattened_throughputs.keys())\n worker_types = sorted(cluster_spec.keys())\n num_workers = \\\n [cluster_spec[worker_type] for worker_type in worker_types]\n\n # Create a map from job type to list of job indexes.\n job_type_key_to_job_idx = {}\n for i, job_id in enumerate(job_ids):\n job_type_key = job_id_to_job_type_key[job_id]\n if job_type_key not in job_type_key_to_job_idx:\n job_type_key_to_job_idx[job_type_key] = []\n job_type_key_to_job_idx[job_type_key].append(i)\n\n # Num jobs.\n n = len(job_ids)\n # Num job_types.\n a = len(unflattened_throughputs.keys())\n # Num worker_types.\n m = len(worker_types)\n # Num varibles per job.\n num_vars_per_job = 1 + a\n\n # Set up scale factors.\n flattened_scale_factors = \\\n np.reshape([scale_factors[job_id] for job_id in job_ids], (n, 1))\n scale_factors_array = np.tile(flattened_scale_factors,\n (1, num_vars_per_job * m))\n\n # Set up flattened job type throughputs.\n flattened_throughputs = np.zeros(shape=(a, (1 + a) * m),\n dtype=np.float32)\n for i, job_type_key in enumerate(job_type_keys):\n for k, worker_type in enumerate(worker_types):\n for j, other_job_type_key in enumerate([None] + job_type_keys):\n if j > 0 and other_job_type_key[1] != job_type_key[1]:\n flattened_throughputs[i,k*(1+a)+j] = 0.0\n else:\n flattened_throughputs[i,k*(1+a)+j] = \\\n unflattened_throughputs[job_type_key][worker_type][other_job_type_key]\n\n # Set up masks to avoid double-counting allocation values when\n # computing constraint that the sum of allocation values of each\n # worker type must be <= the number of workers of that worker type.\n # TODO: Change this if we ever consider combinations larger than pairs.\n masks = np.full(shape=(n, num_vars_per_job), fill_value=0.5)\n masks[:,0] = 1.0\n\n # Allocation matrix.\n x = cp.Variable((n, num_vars_per_job * m))\n\n constraints = [\n # All allocation values must be >= 0.\n x >= 0,\n # The sum of allocation values for each job must be <= 1.\n cp.sum(x, axis=1) <= 1\n ]\n\n # The sum of allocation values for each worker type must be <=\n # the number of workers of that type.\n per_worker_type_allocations = []\n for i in range(m):\n relevant_vars = \\\n x[:,i*num_vars_per_job:(i+1)*num_vars_per_job]\n relevant_scale_factors = \\\n scale_factors_array[:,i*num_vars_per_job:(i+1)*num_vars_per_job]\n per_worker_type_allocations.append(\n cp.sum(cp.multiply(relevant_vars,\n cp.multiply(relevant_scale_factors,\n masks))))\n constraints.append(\n cp.hstack(per_worker_type_allocations) <= num_workers)\n\n # Set the following constraints:\n # for all job type pairs a, b:\n # sum of allocation of all jobs of type a paired with type b ==\n # sum of allocation of all jobs of type b paired with type a\n lhs = []\n rhs = []\n for i, job_type_key_0 in enumerate(job_type_keys):\n for j, job_type_key_1 in enumerate(job_type_keys):\n if j <= i:\n continue\n elif job_type_key_0[1] != job_type_key_1[1]:\n continue\n\n # Retrieve the list of jobs of each type.\n job_type_0_jobs = job_type_key_to_job_idx[job_type_key_0]\n job_type_1_jobs = job_type_key_to_job_idx[job_type_key_1]\n\n for k in range(m):\n job_type_0_mask = np.zeros(x.shape)\n job_type_1_mask = np.zeros(x.shape)\n\n # Allocation of job_type_0 jobs when paired with job_type_1\n for job_idx in job_type_0_jobs:\n offset = k * num_vars_per_job + 1 + j\n job_type_0_mask[job_idx,offset] = 1\n\n # Allocation of job_type_1 jobs when paired with job_type_0\n for job_idx in job_type_1_jobs:\n offset = k * num_vars_per_job + 1 + i\n job_type_1_mask[job_idx,offset] = 1\n\n lhs.append(cp.sum(x[job_type_0_mask == 1]))\n rhs.append(cp.sum(x[job_type_1_mask == 1]))\n\n assert (len(lhs) == len(rhs))\n if len(lhs) > 0:\n constraints.append(cp.hstack(lhs) == cp.hstack(rhs))\n\n # Add constraints to make all variables of the form i-A where job i\n # is of job type A equal.\n for i, job_type_key in enumerate(job_type_keys):\n for k in range(m):\n same_job_type_vars = []\n job_type_jobs = job_type_key_to_job_idx[job_type_key]\n\n # Find all variables for job-job_type pairs where the job\n # types match.\n offset = k * num_vars_per_job + 1 + i\n for job_idx in job_type_jobs:\n same_job_type_vars.append(x[job_idx, offset])\n\n # Constrain the variables to all be equal.\n c = cp.Variable()\n constraints.append(cp.hstack(same_job_type_vars) == c)\n\n throughputs_no_packed_jobs = np.zeros((len(job_ids), len(worker_types)))\n for i, job_id in enumerate(job_ids):\n job_type_key = job_id_to_job_type_key[job_id]\n for j, worker_type in enumerate(worker_types):\n throughputs_no_packed_jobs[i, j] = \\\n unflattened_throughputs[job_type_key][worker_type][None]\n proportional_throughputs = self._proportional_policy.get_throughputs(\n throughputs_no_packed_jobs,\n (job_ids, worker_types),\n cluster_spec)\n\n # Allocation coefficients.\n all_coefficients = np.zeros((n, num_vars_per_job * m))\n for i, job_id in enumerate(job_ids):\n job_type_key = job_id_to_job_type_key[job_id]\n job_type_idx = job_type_keys.index(job_type_key)\n if len(job_type_key_to_job_idx[job_type_key]) == 1:\n for k, worker_type in enumerate(worker_types):\n offset = k * num_vars_per_job + 1 + job_type_idx\n constraints.append(x[i,offset] == 0.0)\n proportional_throughput = proportional_throughputs[i]\n all_coefficients[i] = \\\n np.multiply(flattened_throughputs[job_type_idx],\n scale_factors_array[i]) /\\\n (unflattened_priority_weights[job_id] * proportional_throughput)\n objective = \\\n cp.Maximize(cp.min(cp.sum(cp.multiply(all_coefficients, x),\n axis=1)))\n\n cvxprob = cp.Problem(objective, constraints)\n result = cvxprob.solve(solver=self._solver)\n\n if cvxprob.status != \"optimal\":\n print('WARNING: Allocation returned by policy not optimal!')\n\n allocation = x.value.clip(min=0.0).clip(max=1.0)\n\n # Unflatten allocation.\n unflattened_allocation = {}\n for i, job_id in enumerate(job_ids):\n unflattened_allocation[job_id] = {}\n for j, worker_type in enumerate(worker_types):\n unflattened_allocation[job_id][worker_type] = {}\n for k, job_type_key in enumerate([None] + job_type_keys):\n unflattened_allocation[job_id][worker_type][job_type_key] = \\\n allocation[i, j * num_vars_per_job + k]\n\n return self.convert_job_type_allocation(unflattened_allocation,\n job_id_to_job_type_key)\n\n def get_allocation(self, unflattened_throughputs, scale_factors,\n unflattened_priority_weights, cluster_spec):\n all_throughputs, index = \\\n self.flatten(d=unflattened_throughputs,\n cluster_spec=cluster_spec,\n priority_weights=unflattened_priority_weights)\n if all_throughputs is None or len(all_throughputs) == 0: return None\n (m, n) = all_throughputs[0].shape\n (job_ids, single_job_ids, worker_types, relevant_combinations) = index\n x = cp.Variable((m, n))\n\n # Row i of scale_factors_array is the scale_factor of job\n # combination i repeated len(worker_types) times.\n scale_factors_array = self.scale_factors_array(\n scale_factors, job_ids, m, n)\n\n throughputs_no_packed_jobs = np.zeros((len(single_job_ids), n))\n for i, single_job_id in enumerate(single_job_ids):\n for j, worker_type in enumerate(worker_types):\n throughputs_no_packed_jobs[i, j] = \\\n unflattened_throughputs[single_job_id][worker_type]\n proportional_throughputs = self._proportional_policy.get_throughputs(\n throughputs_no_packed_jobs,\n (single_job_ids, worker_types),\n cluster_spec)\n\n objective_terms = []\n # Multiply throughputs by scale_factors to ensure that scale_factor\n # is taken into account while allocating times to different jobs.\n # A job run on 1 GPU should receive `scale_factor` more time than\n # a job run on `scale_factor` GPUs.\n import scipy.sparse as sp\n idx = []\n tputs = []\n # compute the obejctive in a vectorized fashion\n for i in range(len(all_throughputs)):\n indexes = relevant_combinations[single_job_ids[i]]\n idx += indexes\n proportional_throughput = float(proportional_throughputs[i])\n curr_throughputs = np.multiply(\n all_throughputs[i][indexes],\n scale_factors_array[indexes]) / proportional_throughput\n tputs.append(curr_throughputs)\n\n tputs = sp.csc_matrix(np.vstack(tputs))\n indexed_vars = x[idx]\n realized_tputs = cp.multiply(tputs, indexed_vars)\n # reshape so that the sum of each row gives the throughput\n realized_tputs_mat = cp.reshape(realized_tputs,\n (len(all_throughputs),\n int(np.prod(realized_tputs.shape) / len(all_throughputs))),\n order='C')\n\n objective_fn = cp.min(cp.sum(realized_tputs_mat, axis=1))\n\n objective = cp.Maximize(objective_fn)\n\n # Make sure the allocation can fit in the cluster.\n constraints = self.get_base_constraints(x, single_job_ids,\n scale_factors_array,\n relevant_combinations)\n\n # Explicitly constrain all allocation values with an effective scale\n # factor of 0 to be 0.\n # NOTE: This is not strictly necessary because these allocation values\n # do not affect the optimal allocation for nonzero scale factor\n # combinations.\n for i in range(m):\n for j in range(n):\n if scale_factors_array[i,j] == 0:\n constraints.append(x[i,j] == 0)\n cvxprob = cp.Problem(objective, constraints)\n if self._solver == 'SCS':\n # anderson acceleration is sometimes unstable, and adds\n # significant overhead\n kwargs = {'acceleration_lookback': 0}\n else:\n kwargs = {}\n\n result = cvxprob.solve(solver=self._solver, **kwargs)\n\n if cvxprob.status != \"optimal\":\n print('WARNING: Allocation returned by policy not optimal!')\n\n return self.unflatten(x.value.clip(min=0.0).clip(max=1.0), index)\n" ]
[ [ "numpy.full", "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.tile", "numpy.multiply", "numpy.prod", "numpy.vstack" ] ]
ArabicaCapitalOne/arabica_backend
[ "8810f2907ed2fff2adb033119a42a046bec68240" ]
[ "predict_algorithm.py" ]
[ "# This is the prediction algorithm to foresee the comparative consumption, impletement by TensorFlow\nimport tensorflow as tf\nimport json, csv\nimport output\n\nmonth = []\namount = []\nwith open('data.csv') as datafile:\n\treader = csv.DictReader(datafile)\n\tfor row in reader:\n\t\tmonth.append(float(row['month']))\n\t\tamount.append(float(row['amount']))\n\n# Model parameters\na = tf.Variable([1.], tf.float32)\nb = tf.Variable([1.], tf.float32)\nc = tf.Variable([1.], tf.float32)\n# Model input and output\nx = tf.placeholder(tf.float32)\n# linear_model = a*x**2 + b*x + c\nlinear_model = a*x + b\ny = tf.placeholder(tf.float32)\n# loss\nloss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares\n# optimizer\noptimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = optimizer.minimize(loss)\n# training loop\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\nfor i in range(1000):\n # sess.run(train, {x:month, y:amount})\n sess.run(train, {x:month, y:amount})\n\n# evaluate training accuracy\n# curr_a, curr_b, curr_c, curr_loss = sess.run([a, b, c, loss], {x:[1,2,3,4], y:[9,6,4,2]})\n# print(\"a: %s b: %s c: %s loss: %s\"%(curr_a, curr_b, curr_c, curr_loss))\ncurr_a, curr_b, curr_loss = sess.run([a, b, loss], {x:month, y:amount})\nprint(\"a: %s b: %s loss: %s\"%(curr_a, curr_b, curr_loss))\n" ]
[ [ "tensorflow.Session", "tensorflow.Variable", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.square", "tensorflow.train.GradientDescentOptimizer" ] ]
mszhanyi/onnxruntime
[ "6f85d3e5c81c919022ac4a77e5a051da8518b15d", "6f85d3e5c81c919022ac4a77e5a051da8518b15d" ]
[ "orttraining/orttraining/test/python/orttraining_test_ortmodule_deepspeed_zero_stage_1.py", "onnxruntime/test/providers/cpu/rnn/LSTM.py" ]
[ "\"\"\"Test for a simple ORTModule using the high-level DeepSpeed API.\n\nTo run on the local GPU(s):\n\n```\n$ pip install deepspeed\n$ deepspeed orttraining_test_ortmodule_deepspeed_zero_stage_1.py \\\n --deepspeed_config=orttraining_test_ortmodule_deepspeed_zero_stage_1_config.json\n```\n\"\"\"\nimport argparse\nimport torch\nimport time\nfrom torchvision import datasets, transforms\nimport torch.distributed as dist\n\nimport onnxruntime\nfrom onnxruntime.training.ortmodule import ORTModule, DebugOptions, LogLevel\n\nimport deepspeed\n\n\nclass NeuralNet(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(NeuralNet, self).__init__()\n\n self.fc1 = torch.nn.Linear(input_size, hidden_size)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(hidden_size, num_classes)\n\n def forward(self, input1):\n out = self.fc1(input1)\n out = self.relu(out)\n out = self.fc2(out)\n return out\n\n\ndef train(args, model, device, optimizer, loss_fn, train_loader, epoch):\n print(\n \"\\n======== Epoch {:} / {:} with batch size {:} ========\".format(\n epoch + 1, args.epochs, model.train_batch_size()\n )\n )\n model.train()\n # Measure how long the training epoch takes.\n t0 = time.time()\n start_time = t0\n\n # Reset the total loss for this epoch.\n total_loss = 0\n\n for iteration, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n data = data.reshape(data.shape[0], -1).half()\n\n optimizer.zero_grad()\n probability = model(data)\n\n if args.view_graphs:\n import torchviz\n\n pytorch_backward_graph = torchviz.make_dot(probability, params=dict(list(model.named_parameters())))\n pytorch_backward_graph.view()\n\n loss = loss_fn(probability, target)\n # Accumulate the training loss over all of the batches so that we can\n # calculate the average loss at the end. `loss` is a Tensor containing a\n # single value; the `.item()` function just returns the Python value\n # from the tensor.\n total_loss += loss.item()\n\n model.backward(loss)\n model.step()\n\n # Stats\n if iteration % args.log_interval == 0:\n curr_time = time.time()\n elapsed_time = curr_time - start_time\n print(\n \"[{:5}/{:5} ({:2.0f}%)]\\tLoss: {:.6f}\\tExecution time: {:.4f}\".format(\n iteration * len(data),\n len(train_loader.dataset),\n 100.0 * iteration / len(train_loader),\n loss,\n elapsed_time,\n )\n )\n start_time = curr_time\n\n # Calculate the average loss over the training data.\n avg_train_loss = total_loss / len(train_loader)\n\n epoch_time = time.time() - t0\n print(\"\\n Average training loss: {0:.2f}\".format(avg_train_loss))\n print(\" Training epoch took: {:.4f}s\".format(epoch_time))\n return epoch_time\n\n\ndef test(args, model, device, loss_fn, test_loader):\n model.eval()\n\n t0 = time.time()\n\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n data = data.reshape(data.shape[0], -1).half()\n output = model(data)\n\n # Stats\n test_loss += loss_fn(output, target, False).item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(test_loader.dataset)\n print(\n \"\\nTest set: Batch size: {:}, Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\".format(\n args.test_batch_size,\n test_loss,\n correct,\n len(test_loader.dataset),\n 100.0 * correct / len(test_loader.dataset),\n )\n )\n\n # Report the final accuracy for this validation run.\n epoch_time = time.time() - t0\n print(\" Accuracy: {0:.2f}\".format(float(correct) / len(test_loader.dataset)))\n print(\" Validation took: {:.4f}s\".format(epoch_time))\n return epoch_time\n\n\ndef my_loss(x, target, is_train=True):\n if is_train:\n return torch.nn.CrossEntropyLoss()(x, target)\n else:\n return torch.nn.CrossEntropyLoss(reduction=\"sum\")(x, target)\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description=\"PyTorch MNIST Example\")\n parser.add_argument(\n \"--train-steps\",\n type=int,\n default=-1,\n metavar=\"N\",\n help=\"number of steps to train. Set -1 to run through whole dataset (default: -1)\",\n )\n parser.add_argument(\"--lr\", type=float, default=0.01, metavar=\"LR\", help=\"learning rate (default: 0.01)\")\n parser.add_argument(\n \"--batch-size\", type=int, default=32, metavar=\"N\", help=\"input batch size for training (default: 32)\"\n )\n parser.add_argument(\n \"--test-batch-size\", type=int, default=64, metavar=\"N\", help=\"input batch size for testing (default: 64)\"\n )\n parser.add_argument(\"--no-cuda\", action=\"store_true\", default=False, help=\"disables CUDA training\")\n parser.add_argument(\"--seed\", type=int, default=42, metavar=\"S\", help=\"random seed (default: 42)\")\n parser.add_argument(\"--pytorch-only\", action=\"store_true\", default=False, help=\"disables ONNX Runtime training\")\n parser.add_argument(\n \"--log-interval\",\n type=int,\n default=300,\n metavar=\"N\",\n help=\"how many batches to wait before logging training status (default: 300)\",\n )\n parser.add_argument(\"--view-graphs\", action=\"store_true\", default=False, help=\"views forward and backward graphs\")\n parser.add_argument(\n \"--export-onnx-graphs\", action=\"store_true\", default=False, help=\"export ONNX graphs to current directory\"\n )\n parser.add_argument(\"--epochs\", type=int, default=10, metavar=\"N\", help=\"number of epochs to train (default: 10)\")\n parser.add_argument(\n \"--log-level\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n default=\"WARNING\",\n help=\"Log level (default: WARNING)\",\n )\n parser.add_argument(\"--data-dir\", type=str, default=\"./mnist\", help=\"Path to the mnist data directory\")\n\n # DeepSpeed-related settings\n parser.add_argument(\"--local_rank\", type=int, required=True, help=\"local rank passed from distributed launcher\")\n parser = deepspeed.add_config_arguments(parser)\n\n args = parser.parse_args()\n\n # Common setup\n torch.manual_seed(args.seed)\n onnxruntime.set_seed(args.seed)\n\n # TODO: CUDA support is broken due to copying from PyTorch into ORT\n if not args.no_cuda and torch.cuda.is_available():\n device = \"cuda:\" + str(args.local_rank)\n else:\n device = \"cpu\"\n\n ## Data loader\n\n dist.init_process_group(backend=\"nccl\")\n if args.local_rank == 0:\n # download only once on rank 0\n datasets.MNIST(args.data_dir, download=True)\n dist.barrier()\n train_set = datasets.MNIST(\n args.data_dir,\n train=True,\n transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]),\n )\n\n test_loader = None\n if args.test_batch_size > 0:\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(\n args.data_dir,\n train=False,\n transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]),\n ),\n batch_size=args.test_batch_size,\n shuffle=True,\n )\n\n # Model architecture\n model = NeuralNet(input_size=784, hidden_size=500, num_classes=10).to(device)\n if not args.pytorch_only:\n print(\"Training MNIST on ORTModule....\")\n\n # Set log level\n log_level_mapping = {\n \"DEBUG\": LogLevel.VERBOSE,\n \"INFO\": LogLevel.INFO,\n \"WARNING\": LogLevel.WARNING,\n \"ERROR\": LogLevel.ERROR,\n \"CRITICAL\": LogLevel.FATAL,\n }\n log_level = log_level_mapping.get(args.log_level.upper(), None)\n if not isinstance(log_level, LogLevel):\n raise ValueError(\"Invalid log level: %s\" % args.log_level)\n debug_options = DebugOptions(log_level=log_level, save_onnx=args.export_onnx_graphs, onnx_prefix=\"MNIST\")\n\n model = ORTModule(model, debug_options)\n\n else:\n print(\"Training MNIST on vanilla PyTorch....\")\n\n model, optimizer, train_loader, _ = deepspeed.initialize(\n args=args,\n model=model,\n model_parameters=[p for p in model.parameters() if p.requires_grad],\n training_data=train_set,\n )\n\n # Train loop\n total_training_time, total_test_time, epoch_0_training = 0, 0, 0\n for epoch in range(0, args.epochs):\n total_training_time += train(args, model, device, optimizer, my_loss, train_loader, epoch)\n if not args.pytorch_only and epoch == 0:\n epoch_0_training = total_training_time\n if args.test_batch_size > 0:\n total_test_time += test(args, model, device, my_loss, test_loader)\n\n print(\"\\n======== Global stats ========\")\n if not args.pytorch_only:\n estimated_export = 0\n if args.epochs > 1:\n estimated_export = epoch_0_training - (total_training_time - epoch_0_training) / (args.epochs - 1)\n print(\" Estimated ONNX export took: {:.4f}s\".format(estimated_export))\n else:\n print(\" Estimated ONNX export took: Estimate available when epochs > 1 only\")\n print(\" Accumulated training without export took: {:.4f}s\".format(total_training_time - estimated_export))\n print(\" Accumulated training took: {:.4f}s\".format(total_training_time))\n print(\" Accumulated validation took: {:.4f}s\".format(total_test_time))\n\n\nif __name__ == \"__main__\":\n main()\n", "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom typing import Any, Tuple\n\nimport numpy as np # type: ignore\n\n# import onnx\n# from ..base import Base\n# from . import expect\n\nDebugOutput = True\nnp.set_printoptions(suppress=True) # , precision=16, floatmode='maxprec')\n\n\ndef print_with_shape(name, a, force_output=False):\n if force_output or DebugOutput:\n print(name + \" [shape: \", a.shape, \"]\\n\", a)\n\n\ndef print_results(Y, Y_h, Y_c):\n print(\"*************************\")\n print_with_shape(\"Y\", Y, True)\n print(\"---------\")\n print_with_shape(\"Y_h\", Y_h, True)\n print(\"---------\")\n print_with_shape(\"Y_c\", Y_c, True)\n print(\"*************************\")\n\n\nclass LSTM_Helper:\n def __init__(self, **params): # type: (*Any) -> None\n\n required_inputs = [\"X\", \"W\", \"R\"]\n for i in required_inputs:\n assert i in params, \"Missing Required Input: {0}\".format(i)\n\n X = params[\"X\"]\n W = params[\"W\"]\n R = params[\"R\"]\n\n num_directions = W.shape[0]\n sequence_length = X.shape[0]\n batch_size = X.shape[1]\n hidden_size = R.shape[-1]\n\n B = (\n params[\"B\"]\n if \"B\" in params\n else np.zeros(num_directions * 8 * hidden_size).reshape(num_directions, 8 * hidden_size)\n )\n P = (\n params[\"P\"]\n if \"P\" in params\n else np.zeros(num_directions * 3 * hidden_size).reshape(num_directions, 3 * hidden_size)\n )\n h_0 = (\n params[\"initial_h\"]\n if \"initial_h\" in params\n else np.zeros((num_directions, batch_size, hidden_size)).reshape(num_directions, batch_size, hidden_size)\n )\n c_0 = (\n params[\"initial_c\"]\n if \"initial_c\" in params\n else np.zeros((num_directions, batch_size, hidden_size)).reshape(num_directions, batch_size, hidden_size)\n )\n\n f = params[\"f\"] if \"f\" in params else ActivationFuncs.sigmoid\n g = params[\"g\"] if \"g\" in params else ActivationFuncs.tanh\n h = params[\"h\"] if \"h\" in params else ActivationFuncs.tanh\n input_forget = params[\"input_forget\"] if \"input_forget\" in params else False\n clip = params[\"clip\"] if \"clip\" in params else 9999.0\n\n self.direction = params[\"direction\"] if \"direction\" in params else \"forward\"\n\n if num_directions == 1:\n if self.direction == \"forward\":\n self.one = OneDirectionLSTM(X, W, R, B, P, h_0, c_0, f, g, h, input_forget, clip)\n else:\n # flip input so we process in reverse\n self.one = OneDirectionLSTM(np.flip(X, 0), W, R, B, P, h_0, c_0, f, g, h, input_forget, clip)\n\n self.two = None\n\n else:\n # split the inputs which have per direction rows\n Wfw, Wbw = np.vsplit(W, 2)\n Rfw, Rbw = np.vsplit(R, 2)\n Bfw, Bbw = np.vsplit(B, 2)\n Pfw, Pbw = np.vsplit(P, 2)\n h_0fw, h_0bw = np.vsplit(h_0, 2)\n c_0fw, c_0bw = np.vsplit(c_0, 2)\n\n self.one = OneDirectionLSTM(X, Wfw, Rfw, Bfw, Pfw, h_0fw, c_0fw, f, g, h, input_forget, clip)\n self.two = OneDirectionLSTM(\n np.flip(X, 0),\n Wbw,\n Rbw,\n Bbw,\n Pfw,\n h_0bw,\n c_0fw,\n f,\n g,\n h,\n input_forget,\n clip,\n )\n\n def run(self):\n\n if self.direction == \"bidirectional\":\n f_output, f_Y_h, f_Y_c = self.one.execute()\n r_output, r_Y_h, r_Y_c = self.two.execute()\n\n # flip reverse output it matches the original input order\n r_output_orig_input_order = np.flip(r_output, 0)\n\n # create merged output by merging the forward and reverse rows for seq_length\n # 0 rows, 2 directions, batch size, hidden_size\n seq_length = f_output.shape[0]\n batch_size = f_output.shape[2]\n hidden_size = f_output.shape[3]\n\n output = np.empty((0, 2, batch_size, hidden_size), np.float32)\n # Y_h = np.empty((0, 2, batch_size, hidden_size), np.float32)\n # Y_c = np.empty((0, 2, hidden_size, hidden_size), np.float32)\n for x in range(0, seq_length):\n output = np.append(output, f_output[x])\n output = np.append(output, r_output_orig_input_order[x])\n\n output = output.reshape(seq_length, 2, batch_size, hidden_size)\n\n Y_h = np.append(f_Y_h, r_Y_h)\n Y_c = np.append(f_Y_c, r_Y_c)\n\n else:\n output, Y_h, Y_c = self.one.execute()\n if self.direction == \"reverse\":\n # flip so it's back in the original order of the inputs\n output = np.flip(output, 0)\n\n return output, Y_h, Y_c\n\n\nclass ActivationFuncs:\n @staticmethod\n def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n @staticmethod\n def tanh(x):\n return np.tanh(x)\n\n\nclass OneDirectionLSTM:\n def __init__(\n self,\n X,\n W,\n R,\n B,\n P,\n initial_h,\n initial_c,\n f=ActivationFuncs.sigmoid,\n g=ActivationFuncs.tanh,\n h=ActivationFuncs.tanh,\n input_forget=False,\n clip=9999.0,\n ):\n\n self.X = X\n # remove num_directions axis for W, R, B, P, H_0, C_0\n self.W = np.squeeze(W, axis=0)\n self.R = np.squeeze(R, axis=0)\n self.B = np.squeeze(B, axis=0)\n self.P = np.squeeze(P, axis=0)\n self.h_0 = np.squeeze(initial_h, axis=0)\n self.c_0 = np.squeeze(initial_c, axis=0)\n\n print_with_shape(\"X\", self.X)\n print_with_shape(\"W\", self.W)\n print_with_shape(\"R\", self.R)\n print_with_shape(\"B\", self.B)\n print_with_shape(\"P\", self.P)\n print_with_shape(\"h_0\", self.h_0)\n print_with_shape(\"c_0\", self.c_0)\n\n self.f = f\n self.g = g\n self.h = h\n self.input_forget = input_forget\n self.clip = clip\n\n def execute(self): # type: () -> Tuple[np.ndarray, np.ndarray]\n\n [p_i, p_o, p_f] = np.split(self.P, 3)\n h_list = []\n\n H_t = self.h_0\n C_t = self.c_0\n\n for x in np.split(self.X, self.X.shape[0], axis=0):\n print_with_shape(\"Xt1\", x)\n\n # gates = np.dot(x, np.transpose(self.W)) + np.dot(H_t, np.transpose(self.R)) + np.add(*np.split(self.B, 2))\n\n print_with_shape(\"W^T\", np.transpose(self.W))\n # t0 == t-1, t1 == current\n Xt1_W = np.dot(x, np.transpose(self.W))\n print_with_shape(\"Xt1_W^T\", Xt1_W)\n Ht0_R = np.dot(H_t, np.transpose(self.R))\n print_with_shape(\"Ht-1*R\", Ht0_R)\n WbRb = np.add(*np.split(self.B, 2))\n print_with_shape(\"Wb + Rb\", WbRb)\n gates = Xt1_W + Ht0_R + WbRb\n\n # input to it, ft, ct, ot\n it_in, ot_in, ft_in, ct_in = np.split(gates, 4, -1)\n print_with_shape(\"it_in\", it_in)\n print_with_shape(\"ot_in\", ot_in)\n print_with_shape(\"ft_in\", ft_in)\n print_with_shape(\"ct_in\", ct_in)\n\n i = self.f(np.clip((it_in + p_i * C_t), -self.clip, self.clip))\n if self.input_forget:\n f = 1.0 - i # this is what ONNXRuntime does\n else:\n f = self.f(np.clip((ft_in + p_f * C_t), -self.clip, self.clip))\n c = self.g(np.clip(ct_in, -self.clip, self.clip))\n C = f * C_t + i * c\n o = self.f(np.clip((ot_in + p_o * C), -self.clip, self.clip))\n H = o * self.h(C)\n h_list.append(H)\n H_t = H\n C_t = C\n\n print_with_shape(\"i\", i)\n print_with_shape(\"f\", f)\n print_with_shape(\"c\", c)\n print_with_shape(\"o\", o)\n print_with_shape(\"C\", C)\n print_with_shape(\"H\", i)\n\n concatenated = np.concatenate(h_list)\n output = np.expand_dims(concatenated, 1)\n return output, h_list[-1], C\n\n\nclass LSTM: # Base):\n @staticmethod\n def SimpleWeightsNoBiasTwoRows(direction): # type: () -> None\n\n print(LSTM.SimpleWeightsNoBiasTwoRows.__name__ + \" direction=\" + direction)\n\n seq_length = 2\n batch_size = 2\n input_size = 1\n hidden_size = 3\n number_of_gates = 4\n\n input = np.array([[[1.0], [2.0]], [[10.0], [11.0]]]).astype(np.float32)\n\n W = (\n np.array([0.1, 0.2, 0.3, 0.4, 1, 2, 3, 4, 10, 11, 12, 13])\n .astype(np.float32)\n .reshape(1, number_of_gates * hidden_size, input_size)\n )\n\n weight_scale = 0.1\n R = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n if direction == \"bidirectional\":\n W = W = np.tile(W, (2, 1)).reshape(2, number_of_gates * hidden_size, input_size)\n R = R = np.tile(R, (2, 1)).reshape(2, number_of_gates * hidden_size, hidden_size)\n\n lstm = LSTM_Helper(X=input, W=W, R=R, direction=direction)\n\n Y, Y_h, Y_c = lstm.run()\n # expect(node, inputs=[input, W, R], outputs=[Y_h.astype(np.float32)], name='test_lstm_defaults')\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def LargeBatchWithClip(clip):\n\n print(LSTM.LargeBatchWithClip.__name__ + \" clip=\" + str(clip))\n\n seq_length = 2\n batch_size = 32\n input_size = 1\n hidden_size = 3\n number_of_gates = 4\n\n # sequentialvalues from 1 to 32\n input = (\n np.array(range(1, seq_length * batch_size + 1, 1))\n .astype(np.float32)\n .reshape(seq_length, batch_size, input_size)\n )\n\n W = (\n np.array([0.1, 0.2, 0.3, 0.4, 1, 2, 3, 4, 10, 11, 12, 13])\n .astype(np.float32)\n .reshape(1, number_of_gates * hidden_size, input_size)\n )\n\n weight_scale = 0.1\n R = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n lstm = LSTM_Helper(X=input, W=W, R=R, clip=clip)\n\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def BatchParallelFalseSeqLengthGreaterThanOne():\n print(LSTM.BatchParallelFalseSeqLengthGreaterThanOne.__name__)\n\n seq_length = 2\n batch_size = 1\n input_size = 1\n hidden_size = 2\n number_of_gates = 4\n\n input = np.array([1, 2]).astype(np.float32).reshape(seq_length, batch_size, input_size)\n\n W = (\n np.array([0.1, 0.2, 0.3, 0.4, 1, 2, 3, 4])\n .astype(np.float32)\n .reshape(1, number_of_gates * hidden_size, input_size)\n )\n\n weight_scale = 0.1\n R = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n lstm = LSTM_Helper(X=input, W=W, R=R)\n\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def export_initial_bias(): # type: () -> None\n\n print(LSTM.export_initial_bias.__name__)\n\n input = np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]).astype(np.float32)\n\n input_size = 3\n hidden_size = 4\n weight_scale = 0.1\n custom_bias = 0.1\n number_of_gates = 4\n\n # node = onnx.helper.make_node(\n # 'LSTM',\n # inputs=['X', 'W', 'R', 'B'],\n # outputs=['', 'Y'],\n # hidden_size=hidden_size\n # )\n\n W = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\n R = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n\n # Adding custom bias\n W_B = custom_bias * np.ones((1, number_of_gates * hidden_size)).astype(np.float32)\n R_B = np.zeros((1, number_of_gates * hidden_size)).astype(np.float32)\n B = np.concatenate((W_B, R_B), 1)\n\n lstm = LSTM_Helper(X=input, W=W, R=R, B=B)\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n # expect(node, inputs=[input, W, R, B], outputs=[Y_h.astype(np.float32)], name='test_lstm_with_initial_bias')\n\n @staticmethod\n def export_peepholes(): # type: () -> None\n input = np.array([[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]).astype(np.float32)\n\n input_size = 4\n hidden_size = 3\n weight_scale = 0.1\n number_of_gates = 4\n number_of_peepholes = 3\n\n # node = onnx.helper.make_node(\n # 'LSTM',\n # inputs=['X', 'W', 'R', 'B', 'sequence_lens', 'initial_h', 'initial_c', 'P'],\n # outputs=['', 'Y'],\n # hidden_size=hidden_size\n # )\n\n # Initializing Inputs\n W = weight_scale * np.ones((1, number_of_gates * hidden_size, input_size)).astype(np.float32)\n R = weight_scale * np.ones((1, number_of_gates * hidden_size, hidden_size)).astype(np.float32)\n B = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32)\n seq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32)\n init_h = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\n init_c = np.zeros((1, input.shape[1], hidden_size)).astype(np.float32)\n P = weight_scale * np.ones((1, number_of_peepholes * hidden_size)).astype(np.float32)\n\n lstm = LSTM_Helper(X=input, W=W, R=R, B=B, P=P, initial_c=init_c, initial_h=init_h)\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n # expect(node, inputs=[input, W, R, B, seq_lens, init_h, init_c, P], outputs=[Y_h.astype(np.float32)],\n # name='test_lstm_with_peepholes')\n\n\nclass ONNXRuntimeTestContext:\n\n hidden_size = 2\n input_size = 2\n\n @staticmethod\n def OneDirectionWeights():\n\n num_directions = 1\n hidden_size = ONNXRuntimeTestContext.hidden_size\n input_size = ONNXRuntimeTestContext.input_size\n\n W = (\n np.array(\n [\n -0.494659,\n 0.0453352,\n -0.487793,\n 0.417264,\n -0.0175329,\n 0.489074,\n -0.446013,\n 0.414029,\n -0.0091708,\n -0.255364,\n -0.106952,\n -0.266717,\n -0.0888852,\n -0.428709,\n -0.283349,\n 0.208792,\n ]\n )\n .reshape(num_directions, 4 * hidden_size, input_size)\n .astype(np.float32)\n )\n\n R = (\n np.array(\n [\n 0.146626,\n -0.0620289,\n -0.0815302,\n 0.100482,\n -0.219535,\n -0.306635,\n -0.28515,\n -0.314112,\n -0.228172,\n 0.405972,\n 0.31576,\n 0.281487,\n -0.394864,\n 0.42111,\n -0.386624,\n -0.390225,\n ]\n )\n .reshape(num_directions, 4 * hidden_size, hidden_size)\n .astype(np.float32)\n )\n\n P = (\n np.array([0.2345, 0.5235, 0.4378, 0.3475, 0.8927, 0.3456])\n .reshape(num_directions, 3 * hidden_size)\n .astype(np.float32)\n )\n\n # // [8*hidden]\n B = (\n np.array(\n [\n 0.381619,\n 0.0323954,\n -0.14449,\n 0.420804,\n -0.258721,\n 0.45056,\n -0.250755,\n 0.0967895,\n # peephole bias\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n ]\n )\n .reshape(num_directions, 8 * hidden_size)\n .astype(np.float32)\n )\n\n return W, R, B, P\n\n @staticmethod\n def BidirectionalWeights():\n\n hidden_size = ONNXRuntimeTestContext.hidden_size\n input_size = ONNXRuntimeTestContext.input_size\n\n W1, R1, B1, P1 = ONNXRuntimeTestContext.OneDirectionWeights()\n\n W = np.tile(W1, (2, 1)).reshape(2, 4 * hidden_size, input_size)\n R = np.tile(R1, (2, 1)).reshape(2, 4 * hidden_size, hidden_size)\n B = np.tile(B1, (2, 1))\n P = np.tile(P1, (2, 1))\n\n return W, R, B, P\n\n @staticmethod\n def DefaultInput():\n seq_length = 2\n batch_size = 1\n input_size = 2\n\n input = (\n np.array([-0.455351, -0.276391, -0.185934, -0.269585])\n .reshape(seq_length, batch_size, input_size)\n .astype(np.float32)\n )\n\n return input\n\n\nclass ONNXRuntimeUnitTests:\n @staticmethod\n def ONNXRuntime_TestLSTMBidirectionalBasic():\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMBidirectionalBasic.__name__)\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.BidirectionalWeights()\n lstm = LSTM_Helper(X=input, W=W, R=R, B=B, P=P, direction=\"bidirectional\")\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMForwardNoBiasUsePeepholes():\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardNoBiasUsePeepholes.__name__)\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n lstm = LSTM_Helper(X=input, W=W, R=R, P=P) # no bias\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMForwardInputForget():\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardInputForget.__name__)\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n lstm = LSTM_Helper(X=input, W=W, R=R, B=B, P=P, input_forget=True)\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMForwardClip():\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardClip.__name__)\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n lstm = LSTM_Helper(X=input, W=W, R=R, B=B, P=P, clip=0.1)\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMBackward():\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMBackward.__name__)\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n lstm = LSTM_Helper(X=input, W=W, R=R, B=B, P=P, direction=\"reverse\")\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMForwardHiddenState():\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardHiddenState.__name__)\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n initial_h = np.array([0.34, 0.72]).reshape(1, 1, 2).astype(np.float32)\n lstm = LSTM_Helper(X=input, W=W, R=R, B=B, initial_h=initial_h)\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMForwardCellState():\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardCellState.__name__)\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n initial_h = np.array([0.34, 0.72]).reshape(1, 1, 2).astype(np.float32)\n initial_c = np.array([0.63, 0.21]).reshape(1, 1, 2).astype(np.float32)\n lstm = LSTM_Helper(X=input, W=W, R=R, B=B, initial_h=initial_h, initial_c=initial_c)\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMActivation():\n\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMActivation.__name__)\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n lstm = LSTM_Helper(\n X=input,\n W=W,\n R=R,\n B=B,\n f=ActivationFuncs.tanh,\n g=ActivationFuncs.sigmoid,\n h=ActivationFuncs.tanh,\n )\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMBatchReallocation():\n\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMBatchReallocation.__name__)\n seq_length = 2\n batch_size = 1\n input_size = 2\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n lstm = LSTM_Helper(\n X=input,\n W=W,\n R=R,\n B=B,\n f=ActivationFuncs.tanh,\n g=ActivationFuncs.sigmoid,\n h=ActivationFuncs.tanh,\n )\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n print(\"===============\")\n\n batch_size = 3\n input = (\n np.array(\n [\n -0.455351,\n -0.476391,\n -0.555351,\n -0.376391,\n -0.655351,\n -0.276391,\n -0.185934,\n -0.869585,\n -0.285934,\n -0.769585,\n -0.385934,\n -0.669585,\n ]\n )\n .reshape(seq_length, batch_size, input_size)\n .astype(np.float32)\n )\n\n W, R, B, P = ONNXRuntimeTestContext.OneDirectionWeights()\n lstm = LSTM_Helper(\n X=input,\n W=W,\n R=R,\n B=B,\n f=ActivationFuncs.tanh,\n g=ActivationFuncs.sigmoid,\n h=ActivationFuncs.tanh,\n )\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n @staticmethod\n def ONNXRuntime_TestLSTMOutputWrite():\n\n print(ONNXRuntimeUnitTests.ONNXRuntime_TestLSTMOutputWrite.__name__)\n seq_length = 2\n batch_size = 1\n input_size = 2\n\n input = ONNXRuntimeTestContext.DefaultInput()\n W, R, B, P = ONNXRuntimeTestContext.BidirectionalWeights()\n lstm = LSTM_Helper(\n X=input,\n W=W,\n R=R,\n B=B,\n direction=\"bidirectional\",\n f=ActivationFuncs.tanh,\n g=ActivationFuncs.sigmoid,\n h=ActivationFuncs.tanh,\n )\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n print(\"===============\")\n\n batch_size = 3\n input = (\n np.array(\n [\n -0.455351,\n -0.776391,\n -0.355351,\n -0.576391,\n -0.255351,\n -0.376391,\n -0.185934,\n -0.169585,\n -0.285934,\n -0.469585,\n -0.385934,\n -0.669585,\n ]\n )\n .reshape(seq_length, batch_size, input_size)\n .astype(np.float32)\n )\n\n W, R, B, P = ONNXRuntimeTestContext.BidirectionalWeights()\n lstm = LSTM_Helper(\n X=input,\n W=W,\n R=R,\n B=B,\n direction=\"bidirectional\",\n f=ActivationFuncs.tanh,\n g=ActivationFuncs.sigmoid,\n h=ActivationFuncs.tanh,\n )\n Y, Y_h, Y_c = lstm.run()\n print_results(Y, Y_h, Y_c)\n\n\nDebugOutput = False\nLSTM.SimpleWeightsNoBiasTwoRows(\"forward\")\nLSTM.SimpleWeightsNoBiasTwoRows(\"reverse\")\nLSTM.SimpleWeightsNoBiasTwoRows(\"bidirectional\")\nLSTM.LargeBatchWithClip(99999.0) # too large to affect output\nLSTM.LargeBatchWithClip(4.0)\nLSTM.BatchParallelFalseSeqLengthGreaterThanOne()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMBidirectionalBasic()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardNoBiasUsePeepholes()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardInputForget()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardClip()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMBackward()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardHiddenState()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMForwardCellState()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMActivation()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMBatchReallocation()\nONNXRuntimeUnitTests.ONNXRuntime_TestLSTMOutputWrite()\n" ]
[ [ "torch.nn.Linear", "torch.distributed.init_process_group", "torch.no_grad", "torch.manual_seed", "torch.nn.ReLU", "torch.cuda.is_available", "torch.distributed.barrier", "torch.nn.CrossEntropyLoss" ], [ "numpy.concatenate", "numpy.clip", "numpy.array", "numpy.empty", "numpy.vsplit", "numpy.zeros", "numpy.set_printoptions", "numpy.ones", "numpy.tile", "numpy.exp", "numpy.split", "numpy.tanh", "numpy.transpose", "numpy.flip", "numpy.append", "numpy.repeat", "numpy.squeeze", "numpy.expand_dims" ] ]
PerryXDeng/project_punyslayer
[ "79529b020ca56a5473dbb85ac7155bc03dc5023a", "79529b020ca56a5473dbb85ac7155bc03dc5023a" ]
[ "feature_cluster_algos.py", "legacy_code/tf_mnist/generate_datasets.py" ]
[ "import pickle\nimport os\nimport numpy as np\nimport cupy as cp\nimport tensorflow as tf\nimport cv2 as cv\n\nfrom generate_datasets import try_draw_single_font\nfrom unicode_info.database import generate_data_for_experiment\nfrom cluster_metrics import calculate_mean_iou, calculate_mean_precision\n\nimport time\nimport datetime\nimport tqdm\n\n\ndef generate_features_dict_file_path(save_dir: str, features_dict_file=\"features_dict_file.pkl\"):\n return os.path.join(save_dir, features_dict_file)\n\n\ndef generate_codepoints_cluster_map_file_path(save_dir: str): return os.path.join(save_dir,\n \"codepoints_cluster_map.pkl\")\n\n\ndef generate_cluster_codepoints_map_file_path(save_dir: str): return os.path.join(save_dir,\n \"codepoints_cluster_map.pkl\")\n\n\nclass EfficientNetFeatureExtractor:\n def __init__(self, model_path: str, batch_size: int, save_dir: str, multifont_mapping_path: str):\n self.mp = model_path\n self.bs = batch_size\n self.sd = save_dir\n self.mmp = multifont_mapping_path\n\n # Returns: A dictionary - Keys are all supported unicode characters, Values are minimum supported font for that unicode\n def generate_minimum_used_fonts_dict(self, unicode_supported_fonts_drawn_dict) -> dict:\n num_unicodes = len(unicode_supported_fonts_drawn_dict)\n minimum_font_dict = {}\n for unicode, font_feature_dict in unicode_supported_fonts_drawn_dict.items():\n supported_fonts = font_feature_dict.keys()\n for font in supported_fonts:\n if font not in minimum_font_dict:\n minimum_font_dict[font] = [unicode]\n else:\n minimum_font_dict[font].append(unicode)\n font_unicodes_list = list(minimum_font_dict.items())\n minimum_used_fonts_dict = {}\n last_update = []\n copied_font_unicodes_list = font_unicodes_list.copy()\n while len(minimum_used_fonts_dict) < num_unicodes:\n max_len_index = -1\n max_len = 0\n for char_ in range(len(copied_font_unicodes_list)):\n copied_font_unicodes_list[char_] = (copied_font_unicodes_list[char_][0],\n [i for i in copied_font_unicodes_list[char_][1] if\n i not in last_update])\n font_len = len(copied_font_unicodes_list[char_][1])\n if font_len > max_len:\n max_len = font_len\n max_len_index = char_\n most_supported_font = copied_font_unicodes_list[max_len_index]\n for unicode in most_supported_font[1]:\n assert unicode not in minimum_used_fonts_dict\n minimum_used_fonts_dict[unicode] = most_supported_font[0]\n last_update = most_supported_font[1]\n del copied_font_unicodes_list[max_len_index]\n return minimum_used_fonts_dict\n\n def _load_model_load_data_and_extract_features(self, model_path: str, batch_size: int,\n multifont_mapping_path: str) -> dict:\n # Load model + weights\n json_file = open(os.path.join(model_path, 'model.json'), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = tf.keras.models.model_from_json(loaded_model_json)\n loaded_model.load_weights(os.path.join(model_path, 'model.h5'))\n\n # Load model input info\n model_info_file = open(os.path.join(model_path, 'model_info.pkl'), 'rb')\n model_info_dict = pickle.load(model_info_file)\n img_size, font_size = model_info_dict['img_size'], model_info_dict['font_size']\n empty_image = np.full((img_size, img_size), 255)\n\n # Load multifont mapping dict\n multifont_mapping_file = open(multifont_mapping_path, 'rb')\n unicode_font_mapping_dict = pickle.load(multifont_mapping_file)\n\n if os.path.exists('unicode_supported_fonts_drawn_dict.pkl'):\n unicode_supported_fonts_drawn_dict_file = open('unicode_supported_fonts_drawn_dict.pkl', 'rb')\n unicode_supported_fonts_drawn_dict = pickle.load(unicode_supported_fonts_drawn_dict_file)\n else:\n unicode_supported_fonts_drawn_dict = {}\n for unicode_point in unicode_font_mapping_dict.keys():\n print(unicode_point)\n for font_path in unicode_font_mapping_dict[unicode_point]:\n unicode_drawn = try_draw_single_font(unicode_point, font_path, empty_image, img_size, font_size,\n \"./fonts\",\n transform_img=False)\n if not (unicode_drawn == empty_image).all():\n unicode_drawn_preprocessed = (\n (cv.cvtColor(unicode_drawn, cv.COLOR_GRAY2RGB) - (255 / 2)) / (255 / 2)).astype(np.float32)\n if unicode_point not in unicode_supported_fonts_drawn_dict:\n unicode_supported_fonts_drawn_dict[unicode_point] = {font_path: unicode_drawn_preprocessed}\n else:\n unicode_supported_fonts_drawn_dict[unicode_point][font_path] = unicode_drawn_preprocessed\n with open('unicode_supported_fonts_drawn_dict.pkl', 'wb+') as f:\n pickle.dump(unicode_supported_fonts_drawn_dict, f)\n print(len(unicode_supported_fonts_drawn_dict))\n\n if not os.path.exists('unicode_minimum_font_dict.pkl'):\n minimum_used_fonts_dict = self.generate_minimum_used_fonts_dict(unicode_supported_fonts_drawn_dict)\n with open('unicode_minimum_font_dict.pkl', 'wb+') as f:\n pickle.dump(minimum_used_fonts_dict, f)\n else:\n minimum_used_fonts_dict_file = open('unicode_minimum_font_dict.pkl', 'rb')\n minimum_used_fonts_dict = pickle.load(minimum_used_fonts_dict_file)\n print(len(minimum_used_fonts_dict))\n\n unicode_batch = {}\n unicode_feature_vectors_dict = {}\n for unicode_point in unicode_supported_fonts_drawn_dict.keys():\n unicode_batch[unicode_point] = unicode_supported_fonts_drawn_dict[unicode_point][\n minimum_used_fonts_dict[unicode_point]]\n if len(unicode_batch) == batch_size:\n unicode_batch_forward = loaded_model.predict(tf.convert_to_tensor(list(unicode_batch.values())))\n unicode_batch = dict(zip(unicode_batch.keys(), unicode_batch_forward))\n unicode_feature_vectors_dict.update(unicode_batch)\n unicode_batch = {}\n if len(unicode_batch) > 0:\n unicode_batch_forward = loaded_model.predict(tf.convert_to_tensor(list(unicode_batch.values())))\n unicode_batch = dict(zip(unicode_batch.keys(), unicode_batch_forward))\n unicode_feature_vectors_dict.update(unicode_batch)\n print(len(unicode_feature_vectors_dict))\n return unicode_feature_vectors_dict\n\n def extract_and_save_features(self):\n features_dict = self._load_model_load_data_and_extract_features(self.mp, self.bs, self.mmp)\n with open(generate_features_dict_file_path(self.sd), 'wb+') as f:\n pickle.dump(features_dict, f)\n\n\ndef cosine_similarity_matrix_cpu(features: np.ndarray) -> np.ndarray:\n start_time = time.time()\n n, k = features.shape\n\n a_ = features.reshape((n, 1, 1, k))\n b_ = features.reshape((1, n, k, 1))\n # [n, n]\n dot_products = np.matmul(a_, b_).reshape((n, n))\n\n # [n]\n norms = np.linalg.norm(features, axis=1)\n\n norms_a = norms.reshape((n, 1))\n norms_b = norms.reshape((1, n))\n # [n, n]\n norms_prod = np.multiply(norms_a, norms_b)\n cosine_similarity = dot_products / norms_prod\n\n elapsed_seconds = time.time() - start_time\n print(\"Time spent on similarity matrix: \" + str(datetime.timedelta(seconds=elapsed_seconds)))\n return cosine_similarity\n\n\ndef cosine_similarity_matrix_gpu_stream(features_cpu: np.ndarray, batch_size: int) -> np.ndarray:\n start_time = time.time()\n n, k = features_cpu.shape\n\n # [n, k] cpu-> gpu\n features = cp.asarray(features_cpu)\n\n # [n] gpu\n norms_gpu = cp.linalg.norm(features, axis=1)\n norms_a = norms_gpu.reshape((n, 1))\n norms_b = norms_gpu.reshape((1, n))\n\n\n # [n, k] gpu\n a_ = features.reshape((n, 1, 1, k))\n b_ = features.reshape((1, n, k, 1))\n\n cosine_similarities = []\n\n num_batches = n // batch_size\n for batch_i in range(num_batches):\n start = batch_i * batch_size\n end = (batch_i + 1) * batch_size\n if end > n: end = n\n norm_a_batch = norms_a[start:end]\n a_batch = a_[start:end] = a_[start:end]\n norms_prod_batch = cp.multiply(norm_a_batch, norms_b)\n dot_products_batch = cp.matmul(a_batch, b_).reshape((batch_size, n))\n cosine_similarities.append(cp.asnumpy(dot_products_batch / norms_prod_batch).astype(np.float16))\n cosine_similarities = np.concatenate(cosine_similarities)\n\n elapsed_seconds = time.time() - start_time\n print(\"Time spent on similarity matrix: \" + str(datetime.timedelta(seconds=elapsed_seconds)))\n return cosine_similarities\n\n\nclass _AbstractFeatureClusterer:\n def __init__(self, save_dir: str):\n self.sd = save_dir\n with open(generate_features_dict_file_path(self.sd), 'rb') as f:\n self.features_dict = pickle.load(f)\n\n def cluster_features_into_equivalence_classes(self, features_dict: dict) -> (dict, dict):\n \"\"\"\n *\n :param features_dict: keys are codepoint integers, values are numpy arrays of identical dimension\n :return: mapping of codepoints to cluster id, mapping of cluster ids to lists of codepoints\n \"\"\"\n raise NotImplementedError\n\n def find_and_save_equivalence_classes(self):\n codepoints_cluster_map, cluster_codepoints_map = self.cluster_features_into_equivalence_classes(\n self.features_dict)\n with open(generate_codepoints_cluster_map_file_path(self.sd), 'wb+') as f:\n pickle.dump(codepoints_cluster_map, f)\n with open(generate_cluster_codepoints_map_file_path(self.sd), 'wb+') as f:\n pickle.dump(cluster_codepoints_map, f)\n\n\ndef _dfs_traverse(adj_mat: np.ndarray, visited: np.ndarray, node: int, trace: list):\n visited[node] = True\n trace.append(node)\n neighbors_indices = np.nonzero(adj_mat[node])[0]\n for i in neighbors_indices:\n if not visited[i]:\n _dfs_traverse(adj_mat, visited, i, trace)\n\n\ndef _find_nontrivial_components_from_adjacency_matrix(adj_mat: np.ndarray) -> list:\n \"\"\"\n a simple algorithm to iterate through all unvisited nodes and DFS through their neighbors to find components\n ignores components with only one node\n :param adj_mat: *\n :return: list of lists of node ids, corresponding to connected components\n \"\"\"\n n, _ = adj_mat.shape\n visited = np.zeros(n, dtype=np.bool)\n connected_components = []\n for node in range(n):\n if not visited[node]:\n trace = []\n _dfs_traverse(adj_mat, visited, node, trace)\n if len(trace) > 0:\n connected_components.append(trace)\n return connected_components\n\n\nclass _AbstractGraphClusterer(_AbstractFeatureClusterer):\n \"\"\"\n cluster unicodes\n 1. compute adjacency matrix from pairwise comparison\n 2. find connected components with more than one nodes\n \"\"\"\n\n def __init__(self, save_dir: str):\n super().__init__(save_dir)\n\n def _generate_adjacency_matrix(self, features: np.ndarray):\n raise NotImplementedError\n\n def cluster_features_into_equivalence_classes(self, features_dict: dict) -> (dict, dict):\n \"\"\"\n *\n :param features_dict: keys are codepoint integers, values are numpy arrays of identical dimension\n :return: mapping of codepoints to cluster id, a mapping of cluster ids to lists of codepoints\n \"\"\"\n # cpu [n]\n ordered_codepoints = list(features_dict.keys())\n ordered_features = np.stack(list(features_dict.values()))\n adj_mat = self._generate_adjacency_matrix(ordered_features)\n connected_components = _find_nontrivial_components_from_adjacency_matrix(adj_mat)\n codepoints_cluster_map = {}\n cluster_codepoints_map = {}\n for cluster_id, component in enumerate(connected_components):\n cluster_codepoints_map[cluster_id] = []\n for node in component:\n codepoint = ordered_codepoints[node]\n codepoints_cluster_map[codepoint] = cluster_id\n cluster_codepoints_map[cluster_id].append(codepoint)\n for cluster_id_neg, codepoint in enumerate(ordered_codepoints):\n if codepoint not in codepoints_cluster_map:\n cluster_id = -1 * cluster_id_neg\n codepoints_cluster_map[codepoint] = cluster_id\n cluster_codepoints_map[cluster_id] = [codepoint]\n return codepoints_cluster_map, cluster_codepoints_map\n\n\nclass CosineSimGraphClustererCPU(_AbstractGraphClusterer):\n def __init__(self, save_dir: str, threshold: float, epsilon: float):\n super().__init__(save_dir)\n self.threshold = threshold\n self.epsilon = epsilon\n\n def _generate_adjacency_matrix(self, features: np.ndarray):\n self.cosine_similarity = cosine_similarity_matrix_cpu(features)\n adjacency_matrix = (self.cosine_similarity > self.threshold)\n return adjacency_matrix\n\n\ndef greedy_clique_cluster_heuristic(features_dict: dict, target_mean: float, target_std: float,\n target_threshold: float):\n \"\"\"\n Ground Truth Mean: 0.767601996660232\n Ground Truth Std: 0.0935437240793059\n Optimized parameters: .72,.01,.94\n \"\"\"\n ordered_codepoints = list(features_dict.keys())\n ordered_features = np.stack(list(features_dict.values()))\n similarity_matrix = cosine_similarity_matrix_cpu(ordered_features)\n\n start_time = time.time()\n cluster_id_indices_map = {}\n for node_index in range(len(ordered_codepoints)):\n added_to_existing_cluster = False\n for cluster_id, cluster_indices in cluster_id_indices_map.items():\n edges = similarity_matrix[node_index, cluster_indices]\n if (edges > target_threshold).all():\n cluster_id_indices_map[cluster_id].append(node_index)\n added_to_existing_cluster = True\n break\n if not added_to_existing_cluster:\n cluster_id_indices_map[len(cluster_id_indices_map)] = [node_index]\n print(\"Time spent on finding cliques: \" + str(datetime.timedelta(seconds=time.time() - start_time)))\n\n start_time = time.time()\n id_upperbound = len(cluster_id_indices_map)\n for cluster_id in range(id_upperbound):\n if cluster_id not in cluster_id_indices_map: continue\n cluster_indices = cluster_id_indices_map[cluster_id]\n for merge_candidate_id in range(cluster_id + 1, id_upperbound):\n if merge_candidate_id not in cluster_id_indices_map: continue\n candidate_indices = cluster_id_indices_map[merge_candidate_id]\n # https://stackoverflow.com/questions/22927181/selecting-specific-rows-and-columns-from-numpy-array\n edges = similarity_matrix[np.ix_(cluster_indices, candidate_indices)]\n edges_mean = np.mean(edges)\n edges_std = np.std(edges)\n if edges_mean > target_mean and edges_std < target_std:\n cluster_id_indices_map[cluster_id].extend(candidate_indices)\n del cluster_id_indices_map[merge_candidate_id]\n print(\"Time spent on merging clusters: \" + str(datetime.timedelta(seconds=time.time() - start_time)))\n predicted_cluster_codepoints_map = {cluster_id: [ordered_codepoints[index] for index in indices]\n for cluster_id, indices in cluster_id_indices_map.items()}\n predicted_codepoints_cluster_map = {ordered_codepoints[index]: cluster_id\n for cluster_id, indices in cluster_id_indices_map.items()\n for index in indices}\n return predicted_codepoints_cluster_map, predicted_cluster_codepoints_map\n\n\ndef component_into_cliques_heuristic(features_dict: dict, target_mean: float, target_std: float,\n target_threshold: float):\n ordered_codepoints = list(features_dict.keys())\n ordered_features = np.stack(list(features_dict.values()))\n adj_mat = None\n connected_components = _find_nontrivial_components_from_adjacency_matrix(adj_mat)\n return\n\n\ndef baseline_heuristic(features_dict: dict):\n predicted_codepoints_cluster_map = {}\n predicted_cluster_codepoints_map = {}\n for cluster_id, codepoint in enumerate(features_dict.keys()):\n predicted_codepoints_cluster_map[codepoint] = cluster_id\n predicted_cluster_codepoints_map[cluster_id] = [codepoint]\n return predicted_codepoints_cluster_map, predicted_cluster_codepoints_map\n\n\ndef _test_dfs_components_finder():\n import matplotlib.pyplot as plt\n import networkx as nx\n\n def show_graph_with_labels(adjacency_matrix):\n n, _ = adjacency_matrix.shape\n rows, cols = np.where(adjacency_matrix == 1)\n edges = zip(rows.tolist(), cols.tolist())\n gr = nx.Graph()\n gr.add_edges_from(edges)\n nx.draw_networkx(gr, node_size=250, labels={i: str(i) for i in range(n)}, with_labels=True)\n plt.show()\n\n adj_mat = np.zeros(shape=[10, 10], dtype=np.bool)\n for i in range(10):\n adj_mat[i, i] = True\n # component 1: 1, 2, 3\n adj_mat[1, 3] = True\n adj_mat[3, 1] = True\n adj_mat[1, 2] = True\n adj_mat[2, 1] = True\n adj_mat[2, 3] = True\n adj_mat[3, 2] = True\n # component 2: 4, 5\n adj_mat[4, 5] = True\n adj_mat[5, 4] = True\n # component 3: 6, 7, 8, 9\n adj_mat[6, 7] = True\n adj_mat[7, 6] = True\n adj_mat[7, 8] = True\n adj_mat[8, 7] = True\n adj_mat[8, 9] = True\n adj_mat[9, 8] = True\n print(_find_nontrivial_components_from_adjacency_matrix(adj_mat))\n show_graph_with_labels(adj_mat)\n\n\ndef run_dfs_on_consortium(num_random_additions: int = 0):\n supported_consortium_feature_vectors, ground_truth_consoritium_codepoints_map = generate_data_for_experiment(\n num_random_additions)\n Cluster_Algo = CosineSimGraphClustererCPU(save_dir=\"./\", threshold=.92, epsilon=1e-5)\n predicted_codepoints_cluster_map, predicted_cluster_codepoints_map = Cluster_Algo.cluster_features_into_equivalence_classes(\n features_dict=supported_consortium_feature_vectors)\n mean_IOU = calculate_mean_iou(predicted_codepoints_cluster_map, predicted_cluster_codepoints_map,\n ground_truth_consoritium_codepoints_map)\n mean_precision = calculate_mean_precision(predicted_codepoints_cluster_map, predicted_cluster_codepoints_map,\n ground_truth_consoritium_codepoints_map)\n print(f\"Mean IOU: \" + str(mean_IOU))\n print(f\"Mean precision: \" + str(mean_precision))\n\n\ndef run_clique_on_consortium(num_random_additions: int = 0):\n supported_consortium_feature_vectors, ground_truth_consoritium_codepoints_map = generate_data_for_experiment(\n num_random_additions)\n predicted_codepoints_cluster_map, predicted_cluster_codepoints_map = \\\n greedy_clique_cluster_heuristic(supported_consortium_feature_vectors, 0.72, 0.01, 0.94)\n mean_IOU = calculate_mean_iou(predicted_codepoints_cluster_map, predicted_cluster_codepoints_map,\n ground_truth_consoritium_codepoints_map)\n mean_precision = calculate_mean_precision(predicted_codepoints_cluster_map, predicted_cluster_codepoints_map,\n ground_truth_consoritium_codepoints_map)\n print(f\"Mean IOU: \" + str(mean_IOU))\n print(f\"Mean precision: \" + str(mean_precision))\n\n\ndef run_baseline_on_consortium(num_random_additions: int = 0):\n supported_consortium_feature_vectors, ground_truth_consoritium_codepoints_map = generate_data_for_experiment(\n num_random_additions)\n predicted_codepoints_cluster_map, predicted_cluster_codepoints_map = baseline_heuristic(supported_consortium_feature_vectors)\n mean_IOU = calculate_mean_iou(predicted_codepoints_cluster_map, predicted_cluster_codepoints_map,\n ground_truth_consoritium_codepoints_map)\n mean_precision = calculate_mean_precision(predicted_codepoints_cluster_map, predicted_cluster_codepoints_map,\n ground_truth_consoritium_codepoints_map)\n print(f\"Mean IOU: \" + str(mean_IOU))\n print(f\"Mean precision: \" + str(mean_precision))\n\n\ndef generate_sim_mat_float16_gpu():\n supported_unicode_feature_vectors = pickle.load(open('features_dict_file.pkl', 'rb'))\n ordered_features = np.stack(list(supported_unicode_feature_vectors.values()))\n similarity_matrix = cosine_similarity_matrix_gpu_stream(ordered_features, batch_size=1000)\n np.save('sim_mat_float16.npy', similarity_matrix)\n\n\ndef load_sim_mat_float16():\n return np.load('sim_mat_float16.npy')\n\n\nif __name__ == \"__main__\":\n # _test_dfs_components_finder()\n generate_sim_mat_float16_gpu()\n # import argparse\n # parser = argparse.ArgumentParser()\n # parser.add_argument('-hc', '--heuristic_choice', action='store', type=str, default='dfs')\n # parser.add_argument('-nra', '--num_rand_add', action='store', type=int, default=0)\n # args = parser.parse_args()\n # num_rand_add = args.num_rand_add\n # hc = args.heuristic_choice\n #\n # t0 = time.time()\n # print(\"Heuristic: \" + hc)\n # print(\"Random Additions: \" + str(num_rand_add))\n #\n # if hc == \"clique\":\n # run_clique_on_consortium(num_rand_add)\n # elif hc == \"dfs\":\n # run_dfs_on_consortium(num_rand_add)\n # elif hc == \"baseline\":\n # run_baseline_on_consortium(num_rand_add)\n # print(\"Total Elapsed Time: \" + str(datetime.timedelta(seconds=time.time() - t0)))\n", "import tensorflow as tf\nimport numpy as np\nimport random\n\n\ndef create_pairs(x, digit_indices):\n random.seed(0)\n num_classes = 10\n pairs = []\n labels = []\n # number of examples in class with in examples\n n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1\n for d in range(num_classes):\n for i in range(n):\n z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]\n pairs += [[x[z1], x[z2]]]\t# pair of data of the same class\n inc = random.randrange(1, num_classes)\n dn = (d + inc) % num_classes# random class\n z1, z2 = digit_indices[d][i], digit_indices[dn][i]\n pairs += [[x[z1], x[z2]]] # pair of data of two different class\n labels += [1, 0] # two consecutive pairs\n return np.array(pairs), np.array(labels)\n\n\ndef create_triplets(x_train, digit_indices):\n random.seed(0)\n num_classes = 10\n # number of examples in class with in examples\n n = min([len(digit_indices[d]) for d in range(num_classes)])\n total_size = [num_classes * n, 28, 28]\n anchors = np.empty(total_size, dtype=np.uint8)\n positives = np.empty(total_size, dtype=np.uint8)\n negatives = np.empty(total_size, dtype=np.uint8)\n index = 0\n for c in range(num_classes):\n for i in range(n):\n anchor = digit_indices[c][i]\n positive = digit_indices[c][random.randrange(0, n)]\n random_class = (c + random.randrange(1, num_classes)) % num_classes# random class\n negative = digit_indices[random_class][i]\n anchors[index] = x_train[anchor]\n positives[index] = x_train[positive]\n negatives[index] = x_train[negative]\n index += 1\n return anchors, positives, negatives\n\n\ndef compile_datasets():\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n # x train 60k * 28 * 28\n # y train 10k * 28 * 28\n num_classes = 10\n\n # Split training labels by class, information contains y_train index\n digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]\n # Around 100k training examples. Each one is a pair, each is 28x28\n train_pairs, train_pairs_labels = create_pairs(x_train, digit_indices)\n\n # Split test labels by class, information contains y_test index\n digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)]\n # Around 18k training examples. Each one is a pair, each is 28x28\n test_pairs, test_pairs_labels = create_pairs(x_test, digit_indices)\n\n # 108400 pairs of training data\n # 17820 pairs of testing data\n size_train = train_pairs.shape[0]\n size_test = test_pairs.shape[0]\n\n # Separate pairs of training examples\n x_1_train = np.reshape(train_pairs[:, 0], (size_train, 28, 28, 1))\n x_2_train = np.reshape(train_pairs[:, 1], (size_train, 28, 28, 1))\n\n y_train = np.reshape(train_pairs_labels, (size_train, 1))\n\n # Separate pairs of testing examples\n x_1_test = np.reshape(test_pairs[:, 0], (size_test, 28, 28, 1))\n x_2_test = np.reshape(test_pairs[:, 1], (size_test, 28, 28, 1))\n\n y_test = np.reshape(test_pairs_labels, (size_test, 1))\n\n return x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test\n\n\ndef compile_triplet_datasets():\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n # x train 60k * 28 * 28\n # y train 10k * 28 * 28\n num_classes = 10\n\n # Split training labels by class, information contains y_train index\n digit_indices = [np.where(y_train == i)[0] for i in range(num_classes)]\n # Around 100k training examples. Each one is a pair, each is 28x28\n anchors, positives, negatives = create_triplets(x_train, digit_indices)\n\n # Split test labels by class, information contains y_test index\n digit_indices = [np.where(y_test == i)[0] for i in range(num_classes)]\n # Around 18k training examples. Each one is a pair, each is 28x28\n test_pairs, test_pairs_labels = create_pairs(x_test, digit_indices)\n\n # 108400 pairs of training data\n # 17820 pairs of testing data\n size_test = test_pairs.shape[0]\n\n append_channel = lambda ndarray: np.reshape(ndarray, [ndarray.shape[0], ndarray.shape[1], ndarray.shape[2], 1])\n anchors, positives, negatives = append_channel(anchors), append_channel(positives), append_channel(negatives)\n\n # Separate pairs of testing examples\n x_1_test = append_channel(test_pairs[:, 0])\n x_2_test = append_channel(test_pairs[:, 1])\n\n y_test = np.reshape(test_pairs_labels, (size_test, 1))\n\n return anchors, positives, negatives, x_1_test, x_2_test, y_test\n\n\ndef print_default_dtypes():\n x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test = compile_datasets()\n ndarrays = [x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test]\n for ndarray in ndarrays:\n print(ndarray.dtype)\n print(ndarray.shape)\n # uint8\n # (108400, 28, 28, 1)\n # uint8\n # (108400, 28, 28, 1)\n # int64\n # (108400, 1)\n # uint8\n # (17820, 28, 28, 1)\n # uint8\n # (17820, 28, 28, 1)\n # int64\n # (17820, 1)\n\ndef print_default_triplets_dtypes():\n anchors, positives, negatives, x_1_test, x_2_test, y_test = compile_triplet_datasets()\n ndarrays = [anchors, positives, negatives, x_1_test, x_2_test, y_test]\n for ndarray in ndarrays:\n print(ndarray.dtype)\n print(ndarray.shape)\n # uint8\n # (54210, 28, 28, 1)\n # uint8\n # (54210, 28, 28, 1)\n # uint8\n # (54210, 28, 28, 1)\n # uint8\n # (17820, 28, 28, 1)\n # uint8\n # (17820, 28, 28, 1)\n # int64\n # (17820, 1)\n\n\ndef compile_transformed_float32_datasets():\n x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test = compile_datasets()\n x_1_train = x_1_train.astype(np.float32)\n x_2_train = x_2_train.astype(np.float32)\n y_train = y_train.astype(np.float32)\n x_1_test = x_1_test.astype(np.float32)\n x_2_test = x_2_test.astype(np.float32)\n y_test = y_test.astype(np.float32)\n x_1_train = (x_1_train - 255 / 2) / 255\n x_2_train = (x_2_train - 255 / 2) / 255\n x_1_test = (x_1_test - 255 / 2) / 255\n x_2_test = (x_2_test - 255 / 2) / 255\n return x_1_train, x_2_train, y_train, x_1_test, x_2_test, y_test\n\n\nif __name__ == \"__main__\":\n print_default_triplets_dtypes()\n" ]
[ [ "numpy.concatenate", "numpy.full", "numpy.linalg.norm", "numpy.matmul", "numpy.zeros", "numpy.load", "numpy.save", "numpy.nonzero", "numpy.multiply", "numpy.where", "numpy.mean", "numpy.std", "numpy.ix_", "matplotlib.pyplot.show", "tensorflow.keras.models.model_from_json" ], [ "numpy.where", "numpy.array", "numpy.empty", "numpy.reshape" ] ]
YoushaaMurhij/VOTR_Inference
[ "dbf6ddc66f2644c1897a4307e30e2ae781ebacf6" ]
[ "VOTR/pcdet/utils/loss_utils.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom . import box_utils\n\n\nclass SigmoidFocalClassificationLoss(nn.Module):\n \"\"\"\n Sigmoid focal cross entropy loss.\n \"\"\"\n\n def __init__(self, gamma: float = 2.0, alpha: float = 0.25):\n \"\"\"\n Args:\n gamma: Weighting parameter to balance loss for hard and easy examples.\n alpha: Weighting parameter to balance loss for positive and negative examples.\n \"\"\"\n super(SigmoidFocalClassificationLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n\n @staticmethod\n def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):\n \"\"\" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:\n max(x, 0) - x * z + log(1 + exp(-abs(x))) in\n https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits\n\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n\n Returns:\n loss: (B, #anchors, #classes) float tensor.\n Sigmoid cross entropy loss without reduction\n \"\"\"\n loss = torch.clamp(input, min=0) - input * target + \\\n torch.log1p(torch.exp(-torch.abs(input)))\n return loss\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):\n \"\"\"\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predicted logits for each class\n target: (B, #anchors, #classes) float tensor.\n One-hot encoded classification targets\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n weighted_loss: (B, #anchors, #classes) float tensor after weighting.\n \"\"\"\n pred_sigmoid = torch.sigmoid(input)\n alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)\n pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid\n focal_weight = alpha_weight * torch.pow(pt, self.gamma)\n\n bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)\n\n loss = focal_weight * bce_loss\n\n if weights.shape.__len__() == 2 or \\\n (weights.shape.__len__() == 1 and target.shape.__len__() == 2):\n weights = weights.unsqueeze(-1)\n\n assert weights.shape.__len__() == loss.shape.__len__()\n\n return loss * weights\n\n\nclass WeightedSmoothL1Loss(nn.Module):\n \"\"\"\n Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss\n https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py\n | 0.5 * x ** 2 / beta if abs(x) < beta\n smoothl1(x) = |\n | abs(x) - 0.5 * beta otherwise,\n where x = input - target.\n \"\"\"\n def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):\n \"\"\"\n Args:\n beta: Scalar float.\n L1 to L2 change point.\n For beta values < 1e-5, L1 loss is computed.\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n \"\"\"\n super(WeightedSmoothL1Loss, self).__init__()\n self.beta = beta\n if code_weights is not None:\n self.code_weights = np.array(code_weights, dtype=np.float32)\n self.code_weights = torch.from_numpy(self.code_weights).cuda()\n\n @staticmethod\n def smooth_l1_loss(diff, beta):\n if beta < 1e-5:\n loss = torch.abs(diff)\n else:\n n = torch.abs(diff)\n loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)\n\n return loss\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):\n \"\"\"\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n \"\"\"\n target = torch.where(torch.isnan(target), input, target) # ignore nan targets\n\n diff = input - target\n # code-wise weighting\n if self.code_weights is not None:\n diff = diff * self.code_weights.view(1, 1, -1)\n\n loss = self.smooth_l1_loss(diff, self.beta)\n\n # anchor-wise weighting\n if weights is not None:\n assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]\n loss = loss * weights.unsqueeze(-1)\n\n return loss\n\n\nclass WeightedL1Loss(nn.Module):\n def __init__(self, code_weights: list = None):\n \"\"\"\n Args:\n code_weights: (#codes) float list if not None.\n Code-wise weights.\n \"\"\"\n super(WeightedL1Loss, self).__init__()\n if code_weights is not None:\n self.code_weights = np.array(code_weights, dtype=np.float32)\n self.code_weights = torch.from_numpy(self.code_weights).cuda()\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):\n \"\"\"\n Args:\n input: (B, #anchors, #codes) float tensor.\n Ecoded predicted locations of objects.\n target: (B, #anchors, #codes) float tensor.\n Regression targets.\n weights: (B, #anchors) float tensor if not None.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted smooth l1 loss without reduction.\n \"\"\"\n target = torch.where(torch.isnan(target), input, target) # ignore nan targets\n\n diff = input - target\n # code-wise weighting\n if self.code_weights is not None:\n diff = diff * self.code_weights.view(1, 1, -1)\n\n loss = torch.abs(diff)\n\n # anchor-wise weighting\n if weights is not None:\n assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]\n loss = loss * weights.unsqueeze(-1)\n\n return loss\n\n\nclass WeightedCrossEntropyLoss(nn.Module):\n \"\"\"\n Transform input to fit the fomation of PyTorch offical cross entropy loss\n with anchor-wise weighting.\n \"\"\"\n def __init__(self):\n super(WeightedCrossEntropyLoss, self).__init__()\n\n def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):\n \"\"\"\n Args:\n input: (B, #anchors, #classes) float tensor.\n Predited logits for each class.\n target: (B, #anchors, #classes) float tensor.\n One-hot classification targets.\n weights: (B, #anchors) float tensor.\n Anchor-wise weights.\n\n Returns:\n loss: (B, #anchors) float tensor.\n Weighted cross entropy loss without reduction\n \"\"\"\n input = input.permute(0, 2, 1)\n target = target.argmax(dim=-1)\n loss = F.cross_entropy(input, target, reduction='none') * weights\n return loss\n\n\ndef get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):\n \"\"\"\n Args:\n pred_bbox3d: (N, 7) float Tensor.\n gt_bbox3d: (N, 7) float Tensor.\n\n Returns:\n corner_loss: (N) float Tensor.\n \"\"\"\n assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]\n\n pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)\n gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)\n\n gt_bbox3d_flip = gt_bbox3d.clone()\n gt_bbox3d_flip[:, 6] += np.pi\n gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)\n # (N, 8)\n corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),\n torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))\n # (N, 8)\n corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)\n\n return corner_loss.mean(dim=1)\n\nclass CenterNetFocalLoss(nn.Module):\n \"\"\"nn.Module warpper for focal loss\"\"\"\n def __init__(self):\n super(CenterNetFocalLoss, self).__init__()\n\n def _neg_loss(self, pred, gt):\n \"\"\" Modified focal loss. Exactly the same as CornerNet.\n Runs faster and costs a little bit more memory\n Arguments:\n pred (batch x c x h x w)\n gt_regr (batch x c x h x w)\n \"\"\"\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n\n neg_weights = torch.pow(1 - gt, 4)\n\n loss = 0\n\n pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds\n neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss\n\n def forward(self, out, target):\n return self._neg_loss(out, target)\n\ndef _gather_feat(feat, ind, mask=None):\n dim = feat.size(2)\n ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)\n feat = feat.gather(1, ind)\n if mask is not None:\n mask = mask.unsqueeze(2).expand_as(feat)\n feat = feat[mask]\n feat = feat.view(-1, dim)\n return feat\n\ndef _transpose_and_gather_feat(feat, ind):\n feat = feat.permute(0, 2, 3, 1).contiguous()\n feat = feat.view(feat.size(0), -1, feat.size(3)).contiguous()\n feat = _gather_feat(feat, ind)\n return feat.contiguous()\n\nclass CenterNetRegLoss(nn.Module):\n \"\"\"Regression loss for an output tensor\n Arguments:\n output (batch x dim x h x w)\n mask (batch x max_objects)\n ind (batch x max_objects)\n target (batch x max_objects x dim)\n \"\"\"\n\n def __init__(self):\n super(CenterNetRegLoss, self).__init__()\n\n def _reg_loss(self, regr, gt_regr, mask):\n \"\"\" L1 regression loss\n Arguments:\n regr (batch x max_objects x dim)\n gt_regr (batch x max_objects x dim)\n mask (batch x max_objects)\n \"\"\"\n num = mask.float().sum()\n mask = mask.unsqueeze(2).expand_as(gt_regr).float()\n isnotnan = (~ torch.isnan(gt_regr)).float()\n mask *= isnotnan\n regr = regr * mask\n gt_regr = gt_regr * mask\n\n loss = torch.abs(regr - gt_regr)\n loss = loss.transpose(2, 0).contiguous()\n\n loss = torch.sum(loss, dim=2)\n loss = torch.sum(loss, dim=1)\n\n loss = loss / (num + 1e-4)\n return loss\n\n def forward(self, output, mask, ind, target):\n pred = _transpose_and_gather_feat(output, ind)\n loss = self._reg_loss(pred, target, mask)\n return loss\n\nclass CenterNetSmoothRegLoss(nn.Module):\n \"\"\"Regression loss for an output tensor\n Arguments:\n output (batch x dim x h x w)\n mask (batch x max_objects)\n ind (batch x max_objects)\n target (batch x max_objects x dim)\n \"\"\"\n\n def __init__(self):\n super(CenterNetSmoothRegLoss, self).__init__()\n\n def _smooth_reg_loss(self, regr, gt_regr, mask, sigma=3):\n \"\"\" L1 regression loss\n Arguments:\n regr (batch x max_objects x dim)\n gt_regr (batch x max_objects x dim)\n mask (batch x max_objects)\n \"\"\"\n num = mask.float().sum()\n mask = mask.unsqueeze(2).expand_as(gt_regr).float()\n isnotnan = (~ torch.isnan(gt_regr)).float()\n mask *= isnotnan\n regr = regr * mask\n gt_regr = gt_regr * mask\n\n abs_diff = torch.abs(regr - gt_regr)\n\n abs_diff_lt_1 = torch.le(abs_diff, 1 / (sigma ** 2)).type_as(abs_diff)\n\n loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * sigma, 2) + (\n abs_diff - 0.5 / (sigma ** 2)\n ) * (1.0 - abs_diff_lt_1)\n\n loss = loss.transpose(2, 0).contiguous()\n\n loss = torch.sum(loss, dim=2)\n loss = torch.sum(loss, dim=1)\n\n loss = loss / (num + 1e-4)\n return loss\n\n def forward(self, output, mask, ind, target, sin_loss):\n assert sin_loss is False\n pred = _transpose_and_gather_feat(output, ind)\n loss = self._smooth_reg_loss(pred, target, mask)\n return loss\n" ]
[ [ "torch.sigmoid", "numpy.array", "torch.isnan", "torch.norm", "torch.le", "torch.pow", "torch.clamp", "torch.abs", "torch.from_numpy", "torch.nn.functional.cross_entropy", "torch.log", "torch.where", "torch.sum" ] ]
hatimwen/CV_Project_wenht
[ "4463797193107fdaab8394770dd382c9c5ca09ee" ]
[ "utils/mixup.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\n\ndef mixup(batch_x, batch_y, alpha=0.5, use_cuda=True):\n '''\n batch_x:批样本数,shape=[batch_size,channels,width,height]\n batch_y:批样本标签,shape=[batch_size]\n alpha:生成lam的beta分布参数,一般取0.5效果较好\n use_cuda:是否使用cuda\n \n returns:\n mixed inputs, pairs of targets, and lam\n '''\n \n if alpha > 0:\n #alpha=0.5使得lam有较大概率取0或1附近\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n batch_size = batch_x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size) #生成打乱的batch_size索引\n \n #获得混合的mixed_batchx数据,可以是同类(同张图片)混合,也可以是异类(不同图片)混合\n mixed_batchx = lam * batch_x + (1 - lam) * batch_x[index, :]\n \n \"\"\"\n Example:\n 假设batch_x.shape=[2,3,112,112],batch_size=2时,\n 如果index=[0,1]的话,则可看成mixed_batchx=lam*[[0,1],3,112,112]+(1-lam)*[[0,1],3,112,112]=[[0,1],3,112,112],即为同类混合\n 如果index=[1,0]的话,则可看成mixed_batchx=lam*[[0,1],3,112,112]+(1-lam)*[[1,0],3,112,112]=[batch_size,3,112,112],即为异类混合\n \"\"\"\n batch_ya, batch_yb = batch_y, batch_y[index]\n return mixed_batchx, batch_ya, batch_yb, lam" ]
[ [ "numpy.random.beta", "torch.randperm" ] ]
youweiliang/norm_robustness
[ "a258ae64bb5cef1feeb120e3ec088bd311853643" ]
[ "AT/train_adv_cifar10_wd.py" ]
[ "from __future__ import print_function\nimport os\nimport pickle\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\n# from models.wideresnet import *\nfrom models import *\nfrom losses import alp_loss, pgd_loss, trades_loss\n\nfrom lip.add_lip import bind_lip\nfrom lip.recorder import Recorder\n\nfrom auto_attack.autoattack import AutoAttack\n\n\ndef train(args, model, device, train_loader, optimizer, recorder, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n\n optimizer.zero_grad()\n\n # calculate robust loss\n loss = LOSS[args.loss](model=model,\n x_natural=data,\n y=target,\n optimizer=optimizer,\n step_size=args.step_size,\n epsilon=args.epsilon,\n perturb_steps=args.num_steps,\n beta=args.beta,\n loss=args.loss,\n distance=args.distance,\n m = args.m,\n s = args.s)\n loss.backward()\n\n lipc, all_lip = model.calc_lip()\n\n recorder.record('lip_sum', lipc)\n recorder.record('lip', all_lip)\n\n optimizer.step()\n\n # print progress\n # if batch_idx % args.log_interval == 0:\n # print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n # epoch, batch_idx * len(data), len(train_loader.dataset),\n # 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef eval_train(model, device, train_loader, recorder):\n model.eval()\n train_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in train_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n train_loss += F.cross_entropy(output, target, size_average=False).item()\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.view_as(pred)).sum().item()\n train_loss /= len(train_loader.dataset)\n print('Training: loss: {:.4f}, Acc: {}/{} ({:.0f}%)'.format(\n train_loss, correct, len(train_loader.dataset),\n 100. * correct / len(train_loader.dataset)), end=' | ')\n training_accuracy = correct / len(train_loader.dataset)\n\n recorder.record('train_acc', training_accuracy)\n recorder.record_train(train_loss)\n\n return train_loss, training_accuracy\n\n\ndef eval_test(model, device, test_loader, recorder):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.cross_entropy(output, target, size_average=False).item()\n pred = output.max(1, keepdim=True)[1]\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(test_loader.dataset)\n print('Test: loss: {:.4f}, Acc: {}/{} ({:.0f}%)'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n \n test_accuracy = correct / len(test_loader.dataset)\n\n recorder.record('test_acc', test_accuracy)\n recorder.record_test(test_loss)\n\n return test_loss, test_accuracy\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"decrease the learning rate\"\"\"\n lr = args.lr\n if epoch >= 75:\n lr = args.lr * 0.1\n if epoch >= 90:\n lr = args.lr * 0.01\n if epoch >= 100:\n lr = args.lr * 0.001\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef main():\n # init model, ResNet18() can be also used here for training\n if args.loss == 'alp' or args.loss == 'trades' or args.loss == 'pgd':\n print(\"normalize False\")\n model = nets[args.model]().to(device)\n else:\n print(\"normalize True\")\n model = nets[args.model](use_FNandWN=True).to(device)\n \n bind_lip(model, norm='1-norm', verbose=False)\n\n recorder = Recorder(f'{name}')\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n for epoch in range(1, args.epochs + 1):\n # adjust learning rate for SGD\n print(f'Epoch: {epoch:3d}', end=' ')\n adjust_learning_rate(optimizer, epoch)\n\n # adversarial training\n train(args, model, device, train_loader, optimizer, recorder, epoch)\n\n # evaluation on natural examples\n # print('==============')\n eval_train(model, device, train_loader, recorder)\n eval_test(model, device, test_loader, recorder)\n # print('==============')\n\n # save checkpoint\n if (epoch >= args.start_freq) and (epoch % args.save_freq == 0):\n torch.save(model.state_dict(),\n os.path.join(model_dir, f'{name}-epoch{epoch}.pt'))\n\n recorder.step()\n\n torch.save(model.state_dict(), os.path.join(model_dir, f'{name}.pt'))\n\n with open(f'{log_dir}/{name}_record.pkl', 'wb') as file:\n pickle.dump(recorder, file)\n\n recorder.draw('lip_sum')\n recorder.draw_many('lip')\n\n recorder.draw('train_acc')\n recorder.draw('test_acc')\n\n adversary = AutoAttack(model, norm='Linf', eps=8/255, version='standard', verbose=False)\n adversary.attacks_to_run = ['apgd-ce', 'apgd-t']\n\n model.eval()\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs, targets = inputs.to(device), targets.to(device)\n # print(inputs.max(), inputs.min())\n\n x_adv, robust_accuracy = adversary.run_standard_evaluation(inputs, targets, bs=128)\n print(f'robust_accuracy: {robust_accuracy}')\n break\n\n recorder.record('robust_accuracy', robust_accuracy)\n\n with open(f'{log_dir}/{name}_record.pkl', 'wb') as file:\n pickle.dump(recorder, file)\n\nif __name__ == '__main__':\n nets = {\n 'vgg': VGG,\n 'regnet': RegNetX_200MF,\n 'resnet': ResNet18,\n 'preact_resnet': PreActResNet18,\n 'googlenet': GoogLeNet,\n 'densenet': DenseNet121,\n 'resnetxt': ResNeXt29_2x64d,\n 'mobilenet': MobileNet,\n 'mobilenet2': MobileNetV2,\n 'dpn': DPN92,\n 'shefflenet': ShuffleNetG2,\n 'senet': SENet18,\n 'shefflenet2': ShuffleNetV2,\n 'efficientnet': EfficientNetB0\n }\n\n models = [key for key, value in nets.items()]\n\n\n parser = argparse.ArgumentParser(description='Adversarial Training')\n parser.add_argument('--batch-size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 128)')\n parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',\n help='input batch size for testing (default: 128)')\n parser.add_argument('--epochs', type=int, default=120, metavar='N',\n help='number of epochs to train')\n parser.add_argument('--weight-decay', '--wd', default=0.,\n type=float, metavar='W')\n parser.add_argument('--lr', type=float, default=0.1, metavar='LR',\n help='learning rate')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--epsilon', default=0.031,\n help='perturbation')\n parser.add_argument('--num-steps', default=10,\n help='perturb number of steps')\n parser.add_argument('--step-size', default=0.007,\n help='perturb step size')\n parser.add_argument('--beta', type = float, default=1.0)\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='how many batches to wait before logging training status')\n parser.add_argument('--snap-epoch', type=int, default=5, metavar='N',\n help='how many batches to test') \n parser.add_argument('--model', default='resnet', type=str, \n choices=models, help='model to use')\n parser.add_argument('--save-freq', default=10, type=int, metavar='N',\n help='save frequency')\n parser.add_argument('--start-freq', default=1, type=int, metavar='N',\n help='start point')\n parser.add_argument('--loss', default='pgd', type=str, \n choices=['pgd', 'pgd_he', 'alp', 'alp_he', 'trades', 'trades_he'])\n parser.add_argument('--distance', default='l_inf', type=str, help='distance')\n parser.add_argument('--m', default=0.2, type=float, help='angular margin')\n parser.add_argument('--s', default=15.0, type=float, help='s value')\n parser.add_argument('--gpu_id', default='0', type=str, help='gpu id to use')\n\n args = parser.parse_args()\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\n\n model_dir = \"checkpoint_wd//\" + args.loss\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n log_dir = './log_wd'\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n torch.manual_seed(args.seed)\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}\n\n # setup data loader\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n ])\n trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform_train)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, **kwargs)\n testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform_test)\n test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)\n testloader = torch.utils.data.DataLoader(testset, batch_size=128*8, shuffle=True, **kwargs)\n LOSS= {\n 'pgd': pgd_loss,\n 'pgd_he': pgd_loss,\n 'alp': alp_loss,\n 'alp_he': alp_loss,\n 'trades': trades_loss,\n 'trades_he': trades_loss,\n }\n\n for los in ['alp']:\n args.loss = los\n name = f'{args.model}_{args.loss}_wd'\n print(f'Using {name}')\n main()\n" ]
[ [ "torch.device", "torch.no_grad", "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.cuda.is_available", "torch.utils.data.DataLoader" ] ]
rangsimanketkaew/moleview
[ "686161680284fe81f5d406d4119a45764ab6e14d" ]
[ "moleview/src/draw.py" ]
[ "\"\"\"\r\nMIT License\r\n\r\nCopyright (c) 2020-2021 Rangsiman Ketkaew\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\"\"\"\r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\r\n\r\nfrom .atom import check_atom, check_radii, check_color, find_bonds\r\n\r\n\r\nclass DrawComplex:\r\n \"\"\"\r\n Display 3D structure of octahedral complex with label for each atoms.\r\n\r\n Parameters\r\n ----------\r\n atom : list, None\r\n Atomic symbols of octahedral structure.\r\n coord : list, array, tuple, bool, None\r\n Atomic coordinates of octahedral structure.\r\n cutoff_global : int or float\r\n Global cutoff for screening bonds.\r\n Default value is 2.0.\r\n cutoff_hydrogen : int or float\r\n Cutoff for screening hydrogen bonds.\r\n Default value is 1.2.\r\n\r\n Examples\r\n --------\r\n >>> atom = ['Fe', 'N', 'N', 'N', 'O', 'O', 'O']\r\n >>> coord = [[2.298354000, 5.161785000, 7.971898000],\r\n [1.885657000, 4.804777000, 6.183726000],\r\n [1.747515000, 6.960963000, 7.932784000],\r\n [4.094380000, 5.807257000, 7.588689000],\r\n [0.539005000, 4.482809000, 8.460004000],\r\n [2.812425000, 3.266553000, 8.131637000],\r\n [2.886404000, 5.392925000, 9.848966000]]\r\n >>> test = DrawComplex(atom=atom, coord=coord)\r\n >>> test.add_atom()\r\n >>> test.add_bond()\r\n >>> test.add_legend()\r\n >>> test.show_plot()\r\n\r\n \"\"\"\r\n\r\n def __init__(self, atom=None, coord=None, cutoff_global=2.0, cutoff_hydrogen=1.2):\r\n self.atom = atom\r\n self.coord = coord\r\n self.cutoff_global = cutoff_global\r\n self.cutoff_hydrogen = cutoff_hydrogen\r\n\r\n if self.atom is None:\r\n raise TypeError(\"atom is not specified\")\r\n if self.coord is None:\r\n raise TypeError(\"coord is not specified\")\r\n\r\n self.title_name = \"Display Complex\"\r\n self.title_size = \"12\"\r\n self.label_size = \"10\"\r\n self.show_title = True\r\n self.show_axis = True\r\n self.show_grid = True\r\n\r\n self.atoms_pair = []\r\n self.bond_list = None\r\n\r\n self.start_plot()\r\n\r\n def start_plot(self):\r\n \"\"\"\r\n Introduce figure to plot.\r\n\r\n \"\"\"\r\n self.fig = plt.figure()\r\n self.ax = Axes3D(self.fig)\r\n\r\n self.ax.set_title(\"Full complex\", fontsize=\"12\")\r\n # ax = fig.add_subplot(111, projection='3d')\r\n\r\n def add_atom(self):\r\n \"\"\"\r\n Add all atoms to show in figure.\r\n\r\n \"\"\"\r\n for i in range(len(self.coord)):\r\n # Determine atomic number\r\n n = check_atom(self.atom[i])\r\n self.ax.scatter(\r\n self.coord[i][0],\r\n self.coord[i][1],\r\n self.coord[i][2],\r\n marker=\"o\",\r\n linewidths=0.5,\r\n edgecolors=\"black\",\r\n color=check_color(n),\r\n label=f\"{self.atom[i]}\",\r\n s=check_radii(n) * 300,\r\n )\r\n\r\n def add_symbol(self):\r\n \"\"\"\r\n Add symbol of atoms to show in figure.\r\n\r\n \"\"\"\r\n for j in range(len(self.atom)):\r\n self.ax.text(\r\n self.coord[j][0] + 0.1,\r\n self.coord[j][1] + 0.1,\r\n self.coord[j][2] + 0.1,\r\n f\"{self.atom[j]},{j}\",\r\n fontsize=9,\r\n )\r\n\r\n def add_bond(self):\r\n \"\"\"\r\n Calculate bond distance, screen bond, and add them to show in figure.\r\n\r\n See Also\r\n --------\r\n octadist.src.util.find_bonds : Find atomic bonds.\r\n\r\n \"\"\"\r\n _, self.bond_list = find_bonds(self.atom, self.coord, self.cutoff_global, self.cutoff_hydrogen)\r\n\r\n for i in range(len(self.bond_list)):\r\n get_atoms = self.bond_list[i]\r\n x, y, z = zip(*get_atoms)\r\n atoms = list(zip(x, y, z))\r\n self.atoms_pair.append(atoms)\r\n\r\n for i in range(len(self.atoms_pair)):\r\n merge = list(zip(self.atoms_pair[i][0], self.atoms_pair[i][1]))\r\n x, y, z = merge\r\n self.ax.plot(x, y, z, \"k-\", color=\"black\", linewidth=2)\r\n\r\n def add_legend(self):\r\n \"\"\"\r\n Add atoms legend to show in figure.\r\n\r\n References\r\n ----------\r\n 1. Remove duplicate labels in legend.\r\n Ref: https://stackoverflow.com/a/26550501/6596684.\r\n\r\n 2. Fix size of point in legend.\r\n Ref: https://stackoverflow.com/a/24707567/6596684.\r\n\r\n \"\"\"\r\n # remove duplicate labels\r\n handles, labels = self.ax.get_legend_handles_labels()\r\n handle_list, label_list = [], []\r\n\r\n for handle, label in zip(handles, labels):\r\n if label not in label_list:\r\n handle_list.append(handle)\r\n label_list.append(label)\r\n leg = plt.legend(handle_list, label_list, loc=\"lower left\", scatterpoints=1, fontsize=12)\r\n\r\n # fix size of point in legend\r\n for i in range(len(leg.legendHandles)):\r\n leg.legendHandles[i]._sizes = [90]\r\n\r\n def config_plot(self, show_title=True, show_axis=True, show_grid=True, **kwargs):\r\n \"\"\"\r\n Setting configuration for figure.\r\n\r\n Parameters\r\n ----------\r\n show_title : bool\r\n If True, show title of figure.\r\n If False, not show title of figure.\r\n show_axis : bool\r\n If True, show axis of figure.\r\n If False, not show axis of figure.\r\n show_grid : bool\r\n If True, show grid of figure.\r\n If False, not show grid of figure.\r\n kwargs : dict (optional)\r\n title_name : title name of figure.\r\n title_size : text size of title.\r\n label_size : text size of axis labels.\r\n\r\n \"\"\"\r\n title_name_user = kwargs.get(\"title_name\")\r\n self.title_size = kwargs.get(\"title_size\")\r\n self.label_size = kwargs.get(\"label_size\")\r\n self.show_title = show_title\r\n self.show_axis = show_axis\r\n self.show_grid = show_grid\r\n\r\n if title_name_user is not None:\r\n self.ax.set_title(title_name_user)\r\n\r\n if self.title_size is not None:\r\n if title_name_user is None:\r\n title_name_user = self.title_name\r\n self.ax.set_title(title_name_user, fontsize=self.title_size)\r\n\r\n if self.label_size is not None:\r\n self.ax.set_xlabel(r\"X\", fontsize=self.label_size)\r\n self.ax.set_ylabel(r\"Y\", fontsize=self.label_size)\r\n self.ax.set_zlabel(r\"Z\", fontsize=self.label_size)\r\n\r\n if not self.show_title:\r\n self.ax.set_title(\"\")\r\n if not self.show_axis:\r\n plt.axis(\"off\")\r\n if not self.show_grid:\r\n self.ax.grid(False)\r\n\r\n def save_img(self, save=\"Complex_saved_by_OctaDist\", file=\"png\"):\r\n \"\"\"\r\n Save figure as an image.\r\n\r\n Parameters\r\n ----------\r\n save : str\r\n Name of image file.\r\n Default value is \"Complex_saved_by_OctaDist\".\r\n file : file\r\n Image type.\r\n Default value is \"png\".\r\n\r\n \"\"\"\r\n plt.savefig(f\"{save}.{file}\")\r\n\r\n def show_plot(self):\r\n \"\"\"\r\n Show plot.\r\n\r\n \"\"\"\r\n plt.show()\r\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
google-research/reverse-engineering-neural-networks
[ "5449da47db051cb073786615b2de52c9fdade319" ]
[ "renn/data/datasets.py" ]
[ "\"\"\"Datasets.\"\"\"\n\nimport tensorflow_datasets as tfds\nimport tensorflow_text as text\nimport tensorflow as tf\n\nimport os\n\nfrom renn import utils\nfrom renn.data.tokenizers import load_tokenizer, SEP, tensor_punctuation_separator\nfrom renn.data import data_utils\n\n__all__ = [\n 'ag_news', 'goemotions', 'imdb', 'snli', 'tokenize_fun', 'mnist', 'yelp',\n 'dbpedia', 'amazon'\n]\n\n\ndef pipeline(dset, preprocess_fun=utils.identity, filter_fn=None, bufsize=1024):\n \"\"\"Common (standard) dataset pipeline.\n Preprocesses the data, filters it (if a filter function is specified), caches it, and shuffles it.\n\n Note: Does not batch\"\"\"\n\n # Apply custom preprocessing.\n dset = dset.map(preprocess_fun)\n\n # Apply custom filter.\n if filter_fn is not None:\n dset = dset.filter(filter_fn)\n\n # Cache and shuffle.\n dset = dset.cache().shuffle(buffer_size=bufsize)\n\n return dset\n\n\ndef tokenize_fun(tokenizer):\n \"\"\"Standard text processing function.\"\"\"\n wsp = text.WhitespaceTokenizer()\n return utils.compose(tokenizer.tokenize, wsp.tokenize, text.case_fold_utf8)\n\n\ndef tokenize_w_punctuation(tokenizer):\n \"\"\"Text processing function which splits off punctuation.\"\"\"\n wsp = text.WhitespaceTokenizer()\n return utils.compose(tokenizer.tokenize, wsp.tokenize,\n tensor_punctuation_separator, text.case_fold_utf8)\n\n\ndef padded_batch(dset, batch_size, sequence_length, label_shape=()):\n \"\"\"Pads examples to a fixed length, and collects them into batches.\"\"\"\n\n # We assume the dataset contains inputs, labels, and an index.\n padded_shapes = {\n 'inputs': (sequence_length,),\n 'labels': label_shape,\n 'index': (),\n }\n\n # Filter out examples longer than sequence length.\n dset = dset.filter(lambda d: d['index'] <= sequence_length)\n\n # Pad remaining examples to the sequence length.\n dset = dset.padded_batch(batch_size, padded_shapes)\n\n return dset\n\n\ndef filter_pad_batch(dset, batch_size, field_lengths, padded_shapes):\n \"\"\"Pads examples to a fixed length and collects them into batches.\n Unlike padded_batch(), no assumption is made as to the fields in each\n example\"\"\"\n\n # Filter out examples longer than the desired lengths\n for field, length in field_lengths.items():\n dset = dset.filter(lambda d: d[field] <= length)\n\n dset = dset.padded_batch(batch_size, padded_shapes)\n\n return dset\n\n\ndef load_tfds(name, split, preprocess_fun, filter_fn=None, data_dir=None):\n \"\"\"Helper that loads a text classification dataset\n from tensorflow_datasets\"\"\"\n\n # Load raw dataset.\n dset = tfds.load(name, split=split, data_dir=data_dir)\n\n # Apply common dataset pipeline.\n dset = pipeline(dset, preprocess_fun=preprocess_fun, filter_fn=filter_fn)\n\n return dset\n\n\ndef load_csv(name, split, preprocess_fun, filter_fn=None, data_dir=None):\n \"\"\"Helper that loads a text classification dataset\n from a csv file\"\"\"\n # Load raw dataset.\n\n output_types = {'text': tf.string, 'label': tf.int64}\n output_shapes = {'text': (), 'label': ()}\n\n filename = os.path.join(data_dir, f'{split}.csv')\n row_parser = data_utils.PARSERS[name]\n dset_iterator = lambda: data_utils.readfile(filename, row_parser)\n dset = tf.data.Dataset.from_generator(dset_iterator, output_types,\n output_shapes)\n\n # Apply common dataset pipeline.\n dset = pipeline(dset, preprocess_fun=preprocess_fun, filter_fn=filter_fn)\n\n return dset\n\n\ndef paracrawl(language_pair,\n vocab_files,\n sequence_length,\n batch_size=64,\n transform_fn=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads a paracrawl translation dataset from TFDS.\n\n Arguments:\n language_pair: str, e.g. 'ende', specifying both languages.\n vocab_files: List[str], vocab filenames for each language.\n \"\"\"\n\n PARACRAWL_LANGUAGE_PAIRS = [\n 'enbg', 'encs', 'enda', 'ende', 'enel', 'enes', 'enet', 'enfi', 'enfr',\n 'enga', 'enhr', 'enhu', 'enit', 'enlt', 'enlv', 'enmt', 'ennl', 'enpl',\n 'enpt', 'enro', 'ensk', 'ensl', 'ensv'\n ]\n\n if language_pair not in PARACRAWL_LANGUAGE_PAIRS:\n raise ValueError(f'language_pair must be one of {PARACRAWL_LANGUAGE_PAIRS}')\n languages = [language_pair[:2], language_pair[2:]]\n\n tokenizer_list = [\n tokenize_w_punctuation(load_tokenizer(f)) for f in vocab_files\n ]\n tokenizer_dict = dict(zip(languages, tokenizer_list))\n\n def _preprocess(d):\n tokens = {l: tokenizer_dict[l](d[l]).flat_values for l in languages}\n for l in languages:\n tokens.update({f'{l}_index': tf.size(tokens[l])})\n tokens.update({f'{l}_orig': d[l]})\n return transform_fn(tokens)\n\n dataset = tfds.load(\n f'para_crawl/{language_pair}',\n split='train', # para_crawl only has a train split\n data_dir=data_dir)\n\n dset = pipeline(dataset, preprocess_fun=_preprocess, filter_fn=filter_fn)\n\n # Filter out examples longer than sequence length.\n for l in languages:\n dset = dset.filter(lambda d: d[f'{l}_index'] <= sequence_length)\n\n # We assume the dataset contains inputs, labels, and an index.\n padded_shapes = {}\n for l in languages:\n padded_shapes[f'{l}_index'] = ()\n padded_shapes[f'{l}_orig'] = ()\n padded_shapes[l] = (sequence_length,)\n\n # Pad remaining examples to the sequence length.\n dset = dset.padded_batch(batch_size, padded_shapes)\n\n return dset, tokenizer_dict\n\n\ndef ag_news(split,\n vocab_file,\n sequence_length=100,\n batch_size=64,\n transform_fn=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads the ag news dataset.\"\"\"\n tokenize = tokenize_fun(load_tokenizer(vocab_file))\n\n def _preprocess(d):\n \"\"\"Applies tokenization.\"\"\"\n tokens = tokenize(d['description']).flat_values # Note: we ignore 'title'\n preprocessed = {\n 'inputs': tokens,\n 'labels': d['label'],\n 'index': tf.size(tokens),\n }\n return transform_fn(preprocessed)\n\n # Load dataset.\n dset = load_tfds('ag_news_subset',\n split,\n _preprocess,\n filter_fn,\n data_dir=data_dir)\n\n # Pad remaining examples to the sequence length.\n dset = padded_batch(dset, batch_size, sequence_length)\n\n return dset\n\n\ndef goemotions(split,\n vocab_file,\n sequence_length=50,\n batch_size=64,\n emotions=None,\n transform=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads the goemotions dataset.\"\"\"\n tokenize = tokenize_fun(load_tokenizer(vocab_file))\n\n if emotions is None: # Use all emotions.\n emotions = ('admiration', 'amusement', 'anger', 'annoyance', 'approval',\n 'caring', 'confusion', 'curiosity', 'desire', 'disappointment',\n 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear',\n 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'neutral',\n 'optimism', 'pride', 'realization', 'relief', 'remorse',\n 'sadness', 'surprise')\n\n def _preprocess(d):\n tokens = tokenize(d['comment_text']).flat_values\n index = tf.size(tokens)\n labels = tf.convert_to_tensor([d[e] for e in emotions], dtype=tf.int64)\n preprocessed = {\n 'inputs': tokens,\n 'labels': labels,\n 'index': index,\n }\n return transform(preprocessed)\n\n # Load dataset.\n dset = load_tfds('goemotions',\n split,\n _preprocess,\n filter_fn,\n data_dir=data_dir)\n\n # Pad remaining examples to the sequence length.\n dset = padded_batch(dset,\n batch_size,\n sequence_length,\n label_shape=(len(emotions),))\n\n return dset\n\n\ndef imdb(split,\n vocab_file,\n sequence_length=1000,\n batch_size=64,\n transform=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads the imdb reviews dataset.\"\"\"\n tokenize = tokenize_fun(load_tokenizer(vocab_file))\n\n def _preprocess(d):\n \"\"\"Applies tokenization.\"\"\"\n tokens = tokenize(d['text']).flat_values\n preprocessed = {\n 'inputs': tokens,\n 'labels': d['label'],\n 'index': tf.size(tokens),\n }\n return transform(preprocessed)\n\n # Load dataset.\n dset = load_tfds('imdb_reviews',\n split,\n _preprocess,\n filter_fn,\n data_dir=data_dir)\n\n # Pad remaining examples to the sequence length.\n dset = padded_batch(dset, batch_size, sequence_length)\n\n return dset\n\n\ndef yelp(split,\n num_classes,\n vocab_file,\n sequence_length=1000,\n batch_size=64,\n transform=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads the yelp reviews dataset.\"\"\"\n tokenize = tokenize_fun(load_tokenizer(vocab_file))\n\n if data_dir is None:\n raise ValueError('Yelp dataset requires data_dir to be provided.')\n\n label_conversion = data_utils.sentiment_relabel(num_classes)\n\n def _preprocess(d):\n \"\"\"Applies tokenization, and\n transforms the yelp labels according to the\n specified number of classes\"\"\"\n\n tokens = tokenize(d['text']).flat_values\n preprocessed = {\n 'inputs': tokens,\n 'labels': label_conversion(d['label']),\n 'index': tf.size(tokens),\n }\n\n return transform(preprocessed)\n\n filter_fn = lambda x: x['labels'] != -1\n\n # Load dataset.\n dset = load_csv('yelp', split, _preprocess, filter_fn, data_dir=data_dir)\n\n # Pad remaining examples to the sequence length.\n dset = padded_batch(dset, batch_size, sequence_length)\n\n return dset\n\n\ndef amazon(split,\n num_classes,\n vocab_file,\n sequence_length=250,\n batch_size=64,\n transform=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads the yelp reviews dataset.\"\"\"\n tokenize = tokenize_fun(load_tokenizer(vocab_file))\n\n if data_dir is None:\n raise ValueError('Amazon dataset requires data_dir to be provided.')\n\n label_conversion = data_utils.sentiment_relabel(num_classes)\n\n def _preprocess(d):\n \"\"\"Applies tokenization, and\n transforms the Amazon labels according to the\n specified number of classes\"\"\"\n\n tokens = tokenize(d['text']).flat_values\n preprocessed = {\n 'inputs': tokens,\n 'labels': label_conversion(d['label']),\n 'index': tf.size(tokens),\n }\n\n return transform(preprocessed)\n\n filter_fn = lambda x: x['labels'] != -1\n\n # Load dataset.\n dset = load_csv('amazon', split, _preprocess, filter_fn, data_dir=data_dir)\n\n # Pad remaining examples to the sequence length.\n dset = padded_batch(dset, batch_size, sequence_length)\n\n return dset\n\n\ndef dbpedia(split,\n num_classes,\n vocab_file,\n sequence_length=1000,\n batch_size=64,\n transform=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads the dpedia text classification dataset.\"\"\"\n tokenize = tokenize_fun(load_tokenizer(vocab_file))\n\n if data_dir is None:\n raise ValueError('DBPedia dataset requires data_dir to be provided.')\n\n def _preprocess(d):\n \"\"\"Applies tokenization, and\n transforms the dbpedia labels according to the\n specified number of classes\n\n For a given number of classes, the classes with\n labels below that number are kept, and all other classes\n are removed.\n\n So, e.g., num_classes = 4, would keep classes 0,1,2,3\"\"\"\n\n def relabel(label):\n if label <= num_classes:\n # in DBPedia csv file, labels are\n # given as 1, 2, ...\n return label - 1\n else:\n return tf.constant(-1, dtype=tf.int64)\n\n tokens = tokenize(d['text']).flat_values\n preprocessed = {\n 'inputs': tokens,\n 'labels': relabel(d['label']),\n 'index': tf.size(tokens),\n }\n\n return transform(preprocessed)\n\n filter_fn = lambda x: x['labels'] != -1\n\n # Load dataset.\n dset = load_csv('dbpedia', split, _preprocess, filter_fn, data_dir=data_dir)\n\n # Pad remaining examples to the sequence length.\n dset = padded_batch(dset, batch_size, sequence_length)\n\n return dset\n\n\ndef snli_sep(split,\n vocab_file,\n hypothesis_length=40,\n premise_length=40,\n batch_size=64,\n transform=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads the SNLI dataset, with hypothesis and premise\n separated as two different fields \"\"\"\n tokenize = tokenize_fun(load_tokenizer(vocab_file))\n\n def _preprocess(d):\n \"\"\"Applies tokenization.\"\"\"\n hypothesis = tokenize(d['hypothesis']).flat_values\n premise = tokenize(d['premise']).flat_values\n return transform({\n 'hypothesis': hypothesis,\n 'premise': premise,\n 'hypothesis_index': tf.size(hypothesis),\n 'premise_index': tf.size(premise),\n 'labels': d['label'],\n })\n\n # Load dataset.\n dset = load_tfds('snli', split, _preprocess, filter_fn, data_dir=data_dir)\n\n # Pad remaining examples to the sequence length.\n field_lengths = {\n 'hypothesis_index': hypothesis_length,\n 'premise_index': premise_length\n }\n padded_shapes = {\n 'hypothesis': (hypothesis_length,),\n 'premise': (premise_length,),\n 'premise_index': (),\n 'hypothesis_index': (),\n 'labels': ()\n }\n dset = filter_pad_batch(dset, batch_size, field_lengths, padded_shapes)\n\n return dset\n\n\ndef snli(split,\n vocab_file,\n sequence_length=75,\n batch_size=64,\n transform=utils.identity,\n filter_fn=None,\n data_dir=None):\n \"\"\"Loads the SNLI dataset.\"\"\"\n tokenize = tokenize_fun(load_tokenizer(vocab_file))\n\n def _preprocess(d):\n \"\"\"Applies tokenization.\"\"\"\n hypothesis = tokenize(d['hypothesis']).flat_values\n premise = tokenize(d['premise']).flat_values\n sep = tokenize(SEP).flat_values\n tokens = tf.concat([hypothesis, sep, premise], axis=0)\n return transform({\n 'inputs': tokens,\n 'labels': d['label'],\n 'index': tf.size(tokens),\n })\n\n # Load dataset.\n dset = load_tfds('snli', split, _preprocess, filter_fn, data_dir=data_dir)\n\n # Pad remaining examples to the sequence length.\n dset = padded_batch(dset, batch_size, sequence_length)\n\n return dset\n\n\ndef mnist(split,\n order='row',\n batch_size=64,\n transform=utils.identity,\n filter_fn=None,\n data_dir=None,\n classes=None):\n \"\"\"Loads the serialized MNIST dataset.\n\n Args:\n classes - the subset of classes to keep.\n If None, all will be kept\"\"\"\n\n def _preprocess(example):\n image = tf.squeeze(example['image'])\n image = tf.cast(image, tf.float32) / 255.\n\n if order == 'col':\n image = tf.transpose(image, perm=[1, 0])\n\n return transform({'inputs': image, 'labels': example['label'], 'index': 28})\n\n # Load dataset.\n dset = tfds.load('mnist', data_dir=data_dir)[split]\n\n if classes is not None:\n # Filter out images without the proper label\n allowed_fn = _in_subset(classes)\n # Remap labels to be in range (0, number of classes)\n relabel_fn = _relabel_subset(classes)\n\n dset = dset.filter(allowed_fn).map(relabel_fn)\n\n dset = pipeline(dset, _preprocess, filter_fn)\n\n # Batch dataset.\n return dset.batch(batch_size)\n\n\ndef _relabel_subset(subclasses):\n \"\"\"Provides a function for relabeling classes.\n Example should contain key 'label' \"\"\"\n\n s = tf.constant(subclasses, dtype=tf.int64)\n\n def relabel(example):\n example.update({'label': tf.where(s == example['label'])[0][0]})\n return example\n\n return relabel\n\n\ndef _in_subset(subclasses):\n \"\"\"Provides a function for determining whether\n an example is in one of the provided subclasses.\n Expmle should contain a key 'label' \"\"\"\n\n s = tf.constant(subclasses, dtype=tf.int64)\n\n def in_subset(example):\n label = example['label']\n isallowed = tf.equal(s, label)\n reduced = tf.reduce_sum(tf.cast(isallowed, tf.float32))\n return tf.greater(reduced, tf.constant(0.))\n\n return in_subset\n" ]
[ [ "tensorflow.size", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.where", "tensorflow.equal", "tensorflow.transpose", "tensorflow.data.Dataset.from_generator", "tensorflow.constant", "tensorflow.squeeze", "tensorflow.cast" ] ]
mhauru/uhlmann-fidelities-from-tensor-networks
[ "cc30882db529fcc787418b172550ae91bd34dcba" ]
[ "plot_quench_magnetization.py" ]
[ "import numpy as np\nimport scipy.integrate as integrate\nimport pickle\nimport datetime\nimport sys\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import pyplot as plt\nfrom ncon import ncon\nfrom tntools import datadispenser\n\nnoshow = \"noshow\" in sys.argv\npractice = \"practice\" in sys.argv\n\nmax_t = 20.0\nmax_x = 25\ncomplexion_timestep = 0.2\ninsertions = \"zz\"\nmax_x_plot = 25\n\ndatafilename = (\n \"quench_magnetization_data/{}expectations_{}_{}_{}.npy\"\n .format(insertions, complexion_timestep, max_t, max_x)\n)\nmax_t_step = int(np.ceil(max_t/complexion_timestep))\nt_steps = list(range(max_t_step+1))\nts = [complexion_timestep*t_step for t_step in t_steps]\nposes = list(range(-max_x, max_x+1))\n\nexpectations = np.load(datafilename)\nmax_t_step = expectations.shape[1]\nt_steps = list(range(max_t_step))\nts = np.array([complexion_timestep*t_step for t_step in t_steps])\n\nfig = plt.figure(figsize=(8,4))\n\n# Plot a heat map over both x and t\nimag_norm = np.linalg.norm(np.imag(expectations))/np.linalg.norm(expectations)\nif imag_norm < 1e-10:\n print(\"Taking the real part, since imags are zero.\")\n expectations = np.real(expectations)\nelse:\n print(\"Taking the abs, since imags are not zero.\")\n expectations = np.abs(expectations)\nax = fig.add_subplot(111)\nX = np.array([poses for t in ts]).transpose()\nY = np.array([ts for pos in poses])\nZ = expectations\n\nif max_x > max_x_plot:\n X = X[max_x-max_x_plot:-max_x+max_x_plot,:]\n Y = Y[max_x-max_x_plot:-max_x+max_x_plot,:]\n Z = Z[max_x-max_x_plot:-max_x+max_x_plot,:]\n\nim = ax.pcolormesh(X, Y, Z)\nax.set_xlabel(\"Position $x$\")\nax.set_ylabel(\"Time $t$\")\nfig.colorbar(im, orientation=\"horizontal\")\n# The same as above, but compared to the exact solution.\n\nplt.tight_layout()\nplt.savefig(\"quench_magnetization.pdf\")\nif not noshow:\n plt.show()\n\n" ]
[ [ "numpy.array", "numpy.ceil", "numpy.linalg.norm", "matplotlib.pyplot.savefig", "numpy.load", "numpy.real", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "numpy.abs", "matplotlib.pyplot.show", "numpy.imag" ] ]
poketorena/deep-learning-from-scratch
[ "1142428f121e5c57499ccb61a679df39044835da" ]
[ "convolution.py" ]
[ "import numpy as np\nfrom common.util import im2col\n\n\nclass Convolution:\n def __init__(self, W, b, stride=1, pad=0):\n self.W = W\n self.b = b\n self.stride = stride\n self.pad = pad\n\n def forward(self, x):\n FN, C, FH, FW = self.W.shape\n N, C, H, W = x.shape\n out_h = int(1 + (H + 2 * self.pad - FH) / self.stride)\n out_w = int(1 + (W + 2 * self.pad - FW) / self.stride)\n\n col = im2col(x, FH, FW, self.stride, self.pad)\n col_W = self.W.reshape(FN, -1).T # フィルターの展開\n out = np.dot(col, col_W) + self.b\n\n out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)\n\n return out\n" ]
[ [ "numpy.dot" ] ]
mberr/rank-based-evaluation
[ "76a0847eecf4350d92783e9773d6fc6b6c69ca51" ]
[ "src/kgm/models/matching/gcn_align.py" ]
[ "# coding=utf-8\n\"\"\"\nImplementation of GCN-Align.\n\nThe paper introducing the model can be found at https://www.aclweb.org/anthology/D18-1032.pdf.\n\nThe authors' implementation can be found at https://github.com/1049451037/GCN-Align and they also refer to\nhttps://github.com/1049451037/HIN-Align for an improved implementation.\n\"\"\"\nimport logging\nfrom typing import Any, Mapping, Optional\n\nimport torch\nfrom torch import nn\n\nfrom .base import GraphBasedKGMatchingModel, IndependentSideMixin\nfrom ...data import KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES\nfrom ...data.reduction import DropRelationInformationKnowledgeGraphToGraphReduction, KnowledgeGraphToGraphReduction, target_normalization\nfrom ...modules.embeddings.base import Embedding, EmbeddingNormalizationMode, NodeEmbeddingInitMethod, get_embedding_pair\nfrom ...modules.embeddings.norm import EmbeddingNormalizationMethod\nfrom ...modules.graph import GCNBlock, IdentityMessageCreator, MessagePassingBlock, OnlyUpdate, SumAggregator\n\nlogger = logging.getLogger(name=__name__)\n\n\nclass GCNAlign(IndependentSideMixin, GraphBasedKGMatchingModel):\n \"\"\"GCN-Align model implementation.\"\"\"\n\n #: The node embeddings\n node_embeddings: Mapping[MatchSideEnum, Embedding]\n\n def __init__(\n self,\n dataset: KnowledgeGraphAlignmentDataset,\n reduction_cls: Optional[KnowledgeGraphToGraphReduction] = None,\n reduction_kwargs: Optional[Mapping[str, Any]] = None,\n embedding_dim: int = 200,\n activation_cls: nn.Module = nn.ReLU,\n n_layers: int = 2,\n use_conv_weights: bool = False,\n node_embedding_init_method: NodeEmbeddingInitMethod = NodeEmbeddingInitMethod.sqrt_total, # 'total', # 'individual'\n vertical_sharing: bool = True,\n node_embedding_dropout: Optional[float] = None,\n node_embedding_init_config: Optional[Mapping[str, Any]] = None,\n ):\n \"\"\"\n Initialize the model.\n\n :param dataset:\n The dataset.\n :param reduction_cls:\n The reduction strategy to obtain a (weighted) adjacency matrix from a knowledge graph.\n :param embedding_dim:\n The dimension of the node embedding.\n :param activation_cls:\n The non-linear activation to use between the message passing steps.\n :param n_layers:\n The number of layers.\n :param use_conv_weights:\n Whether to use convolution weights.\n :param node_embedding_init_method:\n The method used to initialize the node embeddings.\n :param vertical_sharing:\n Whether to use \"vertical weight sharing\", i.e. apply the same convolution weights for all layers.\n :param node_embedding_dropout:\n An optional dropout to use on the node embeddings.\n \"\"\"\n if reduction_cls is None:\n reduction_cls = DropRelationInformationKnowledgeGraphToGraphReduction\n reduction_kwargs = dict(\n normalization=target_normalization,\n )\n super().__init__(dataset=dataset, reduction_cls=reduction_cls, reduction_kwargs=reduction_kwargs)\n\n # node embeddings\n self.node_embeddings = get_embedding_pair(\n init=node_embedding_init_method,\n dataset=dataset,\n embedding_dim=embedding_dim,\n dropout=node_embedding_dropout,\n trainable=True,\n init_config=node_embedding_init_config,\n norm=EmbeddingNormalizationMethod.l2,\n normalization_mode=EmbeddingNormalizationMode.every_forward,\n )\n\n # GCN layers\n self.n_layers = n_layers\n self.use_conv_weights = use_conv_weights\n self.vertical_sharing = vertical_sharing\n blocks = []\n if use_conv_weights:\n if self.vertical_sharing:\n gcn_block = GCNBlock(input_dim=embedding_dim, output_dim=embedding_dim, use_bias=True)\n activation = activation_cls()\n for _ in range(n_layers):\n blocks.append(gcn_block)\n blocks.append(activation)\n else:\n for _ in range(n_layers):\n gcn_block = GCNBlock(input_dim=embedding_dim, output_dim=embedding_dim, use_bias=True)\n activation = activation_cls()\n blocks.append(gcn_block)\n blocks.append(activation)\n else:\n message_block = MessagePassingBlock(\n message_creator=IdentityMessageCreator(),\n message_aggregator=SumAggregator(),\n node_updater=OnlyUpdate(),\n )\n for _ in range(n_layers):\n blocks.append(message_block)\n activation = activation_cls()\n blocks.append(activation)\n side_to_modules = {\n side: nn.ModuleList(blocks)\n for side in SIDES\n }\n self.layers = nn.ModuleDict(modules=side_to_modules)\n\n # Initialize parameters\n self.reset_parameters()\n\n def _forward_side(\n self,\n side: MatchSideEnum,\n indices: Optional[torch.LongTensor] = None,\n ) -> torch.FloatTensor: # noqa: D102\n x = self.node_embeddings[side](indices=None)\n\n # Prepare message passing keyword arguments\n adjacency = self.reductions[side]()\n message_passing_kwargs = {\n 'source': adjacency.source,\n 'target': adjacency.target,\n 'edge_weights': adjacency.values,\n }\n\n # forward pass through all layers\n if side in self.layers.keys():\n layers = self.layers[side] if side in self.layers.keys() else []\n else:\n logger.warning('No layers for side %s', side)\n layers = []\n\n for layer in layers:\n if isinstance(layer, MessagePassingBlock):\n x = layer(x, **message_passing_kwargs)\n else:\n x = layer(x)\n\n # Select indices if requested\n if indices is not None:\n x = x[indices]\n\n return x\n" ]
[ [ "torch.nn.ModuleDict", "torch.nn.ModuleList" ] ]
stas00/pytorch-lightning
[ "84c507c4df5f5c336deb19ce7f70fa02329f39f6" ]
[ "pytorch_lightning/core/step_result.py" ]
[ "from typing import Optional, Dict, Union, Sequence, Callable, MutableMapping, Any\nfrom torch import Tensor\nimport torch\nfrom copy import copy\n\n\nclass Result(Dict):\n\n def __init__(\n self,\n minimize: Optional[Tensor] = None,\n early_stop_on: Optional[Tensor] = None,\n checkpoint_on: Union[Tensor, bool, None] = None,\n hiddens: Optional[Tensor] = None,\n ):\n\n super().__init__()\n\n if early_stop_on is not None:\n self.early_stop_on = early_stop_on\n if checkpoint_on is not None and checkpoint_on:\n self.checkpoint_on = checkpoint_on\n if hiddens is not None:\n self.hiddens = hiddens\n if minimize is not None:\n err = 'Minimize can only be used in training_step, training_step_end, training_epoch_end'\n self._assert_grad_tensor_metric('minimize', minimize, err)\n self.minimize = minimize\n\n if minimize is not None and checkpoint_on is None:\n self.checkpoint_on = minimize.detach()\n\n self['meta'] = {\n '_internal': {\n '_reduce_on_epoch': False\n }\n }\n\n def __getattr__(self, key: str) -> Any:\n try:\n if key == 'callback_metrics':\n return self.get_callback_metrics()\n elif key == 'batch_log_metrics':\n return self.get_batch_log_metrics()\n elif key == 'batch_pbar_metrics':\n return self.get_batch_pbar_metrics()\n elif key == 'epoch_log_metrics':\n return self.get_epoch_log_metrics()\n elif key == 'epoch_pbar_metrics':\n return self.get_epoch_pbar_metrics()\n else:\n return self[key]\n except KeyError:\n return None\n\n def __setattr__(self, key: str, val: Union[Tensor, Any]):\n # ensure reserve keys are tensors and detached\n if key in {'hiddens', 'checkpoint_on', 'early_stop_on'}:\n self._assert_tensor_metric(key, val)\n if val is not None and isinstance(val, torch.Tensor):\n val = val.detach()\n\n # ensure anything else that is a tensor is detached\n elif isinstance(val, torch.Tensor) and key != 'minimize':\n val = val.detach()\n\n self[key] = val\n\n def _assert_tensor_metric(self, name: str, potential_metric: Union[bool, Tensor, None, Any]):\n if potential_metric is not None and not isinstance(potential_metric, bool):\n assert isinstance(potential_metric, Tensor), f'{name} must be a torch.Tensor'\n\n def _assert_grad_tensor_metric(self, name: str, x: Union[torch.Tensor, Any], additional_err: str = ''):\n if x is not None:\n assert isinstance(x, Tensor), f'{name} must be a torch.Tensor'\n m = f'{name} must have a computational graph.'\n\n if additional_err:\n m += f' {additional_err}'\n assert x.grad_fn is not None, m\n\n def log(\n self,\n name: str,\n value: Any,\n prog_bar: bool = False,\n logger: bool = True,\n on_step: bool = False,\n on_epoch: bool = True,\n reduce_fx: Callable = torch.mean,\n enable_graph: bool = False,\n ):\n # no metrics should be logged with graphs\n if not enable_graph and isinstance(value, torch.Tensor):\n value = value.detach()\n\n if 'meta' not in self:\n self.__setitem__('meta', {})\n\n # if user requests both step and epoch, then we split the metric in two automatically\n # one will be logged per step. the other per epoch\n if on_step and on_epoch:\n # set step version\n step_name = f'step_{name}'\n self.__set_meta(step_name, value, prog_bar, logger, on_step=True, on_epoch=False, reduce_fx=reduce_fx)\n self.__setitem__(step_name, value)\n\n # set epoch version\n epoch_name = f'epoch_{name}'\n self.__set_meta(epoch_name, value, prog_bar, logger, on_step=False, on_epoch=True, reduce_fx=reduce_fx)\n self.__setitem__(epoch_name, value)\n else:\n self.__set_meta(name, value, prog_bar, logger, on_step, on_epoch, reduce_fx)\n\n # set the value\n self.__setitem__(name, value)\n\n def __set_meta(\n self,\n name: str,\n value: Any,\n prog_bar: bool,\n logger: bool,\n on_step: bool,\n on_epoch: bool,\n reduce_fx: Callable,\n ):\n # set the meta for the item\n meta_value = value\n meta = dict(\n prog_bar=prog_bar,\n logger=logger,\n on_step=on_step,\n on_epoch=on_epoch,\n reduce_fx=reduce_fx,\n value=meta_value\n )\n\n self['meta'][name] = meta\n\n # track whether any input requires reduction on epoch end\n _internal = self['meta']['_internal']\n _internal['_reduce_on_epoch'] = max(_internal['_reduce_on_epoch'], on_epoch)\n\n def get_callback_metrics(self) -> dict:\n result = {\n 'early_stop_on': self.early_stop_on,\n 'checkpoint_on': self.checkpoint_on\n }\n\n return result\n\n def get_batch_log_metrics(self) -> dict:\n \"\"\"\n Gets the metrics to log at the end of the batch step\n \"\"\"\n result = {}\n\n meta = self['meta']\n for k, options in meta.items():\n if k == '_internal':\n continue\n if options['logger'] and options['on_step']:\n result[k] = self[k]\n return result\n\n def get_epoch_log_metrics(self) -> dict:\n \"\"\"\n Gets the metrics to log at the end of the batch step\n \"\"\"\n result = {}\n\n meta = self['meta']\n for k, options in meta.items():\n if k == '_internal':\n continue\n if options['logger'] and options['on_epoch']:\n result[k] = self[k]\n return result\n\n def get_epoch_pbar_metrics(self):\n \"\"\"\n Gets the metrics to log at the end of the batch step\n \"\"\"\n result = {}\n\n meta = self['meta']\n for k, options in meta.items():\n if k == '_internal':\n continue\n if options['prog_bar'] and options['on_epoch']:\n result[k] = self[k]\n return result\n\n def get_batch_pbar_metrics(self):\n \"\"\"\n Gets the metrics to log at the end of the batch step\n \"\"\"\n result = {}\n\n meta = self['meta']\n for k, options in meta.items():\n if k == '_internal':\n continue\n if options['prog_bar'] and options['on_step']:\n result[k] = self[k]\n return result\n\n def detach(self):\n for k, v in self.items():\n if isinstance(v, torch.Tensor):\n self.__setitem__(k, v.detach())\n\n def __repr__(self):\n self_copy = self.copy()\n\n if 'meta' in self_copy:\n del self_copy['meta']\n\n return str(self_copy)\n\n def __str__(self):\n copy = self.copy()\n del copy['meta']\n\n return str(copy)\n\n def __copy__(self):\n newone = type(self)()\n for k, v in self.items():\n newone[k] = copy(v)\n return newone\n\n @classmethod\n def gather(cls, outputs):\n meta = outputs[0].get('meta')\n result = cls()\n result = recursive_gather(outputs, result)\n recursive_stack(result)\n\n if meta:\n result['meta'] = meta\n return result\n\n @classmethod\n def reduce_on_epoch_end(cls, outputs):\n meta = outputs[0]['meta']\n result = cls()\n result = recursive_gather(outputs, result)\n recursive_stack(result)\n\n for k, option in meta.items():\n if k == '_internal':\n continue\n\n if option['on_epoch']:\n fx = option['reduce_fx']\n result[k] = fx(result[k])\n\n result['meta'] = meta\n return result\n\n @property\n def should_reduce_on_epoch_end(self) -> bool:\n return self['meta']['_internal']['_reduce_on_epoch']\n\n\ndef recursive_gather(outputs: Sequence[dict], result: Optional[MutableMapping] = None) -> Optional[MutableMapping]:\n for out in outputs:\n if 'meta' in out:\n del out['meta']\n\n for k, v in out.items():\n if isinstance(v, dict):\n v = recursive_gather([v], result)\n\n if k not in result:\n result[k] = []\n\n result[k].append(v)\n\n return result\n\n\ndef recursive_stack(result: MutableMapping):\n for k, v in result.items():\n if isinstance(v, dict):\n recursive_stack(v)\n\n if isinstance(v, list) and len(v) > 0 and isinstance(v[0], torch.Tensor):\n v = torch.stack(v)\n result[k] = v\n\n\nclass TrainResult(Result):\n\n def __init__(\n self,\n minimize: Optional[Tensor] = None,\n early_stop_on: Tensor = None,\n checkpoint_on: Union[Tensor, bool] = None,\n hiddens: Optional[Tensor] = None,\n ):\n\n super().__init__(minimize, early_stop_on, checkpoint_on, hiddens)\n\n def log(\n self,\n name,\n value,\n prog_bar: bool = False,\n logger: bool = True,\n on_step: bool = True,\n on_epoch: bool = False,\n reduce_fx: Callable = torch.mean,\n enable_graph: bool = False,\n ):\n super().log(name, value, prog_bar, logger, on_step, on_epoch, reduce_fx, enable_graph)\n\n\nclass EvalResult(Result):\n\n def __init__(\n self,\n early_stop_on: Optional[Tensor] = None,\n checkpoint_on: Optional[Tensor] = None,\n hiddens: Optional[Tensor] = None,\n ):\n\n super().__init__(None, early_stop_on, checkpoint_on, hiddens)\n\n def log(\n self,\n name,\n value,\n prog_bar: bool = False,\n logger: bool = True,\n on_step: bool = False,\n on_epoch: bool = True,\n reduce_fx: Callable = torch.mean,\n enable_graph: bool = False,\n ):\n super().log(name, value, prog_bar, logger, on_step, on_epoch, reduce_fx, enable_graph)\n\n def get_callback_metrics(self) -> dict:\n result = {\n 'val_early_stop_on': self.early_stop_on,\n 'val_checkpoint_on': self.checkpoint_on\n }\n\n return result\n" ]
[ [ "torch.stack" ] ]
yurynamgung/faster-rcnn-pytorch
[ "6817ffdedfa8527ea06a93782ac37ebfdb3d33af" ]
[ "model/utils/generate_anchor.py" ]
[ "import math\nimport numpy as np\n\ndef generate_anchor(feature_height, feature_width, image_size, ratio=[0.5, 1, 2], anchor_size = [128, 256, 512]):\n #---------- debug\n assert len(image_size) == 2\n #----------\n anchor_base = []\n for ratio_t in ratio:\n for anchor_size_t in anchor_size:\n h = anchor_size_t*math.sqrt(ratio_t)\n w = anchor_size_t*math.sqrt(1/ratio_t)\n anchor_base.append([-h/2, -w/2, h/2, w/2])\n anchor_base = np.array(anchor_base) # default shape: [9,4]\n\n K = len(ratio) * len(anchor_size) # default: 9\n image_height = image_size[0]\n image_width = image_size[1]\n stride_x = image_height / feature_height\n stride_y = image_width / feature_width\n anchors = np.zeros([feature_height, feature_width, K, 4])\n for i in range(feature_height):\n for j in range(feature_width):\n x = i*stride_x + stride_x/2\n y = j*stride_y + stride_y/2\n shift = [x,y,x,y]\n anchors[i, j] = anchor_base+shift\n\n anchors = anchors.reshape([-1,4])\n #----------\n assert isinstance(anchors, np.ndarray)\n assert anchors.shape[0] == feature_height*feature_width*len(ratio)*len(anchor_size)\n assert anchors.shape[1] == 4\n #----------\n return anchors\n\n\nif __name__ == '__main__':\n anchor = generate_anchor(50, 50, (512,812.34))\n assert anchor.shape == (50*50*9,4)" ]
[ [ "numpy.array", "numpy.zeros" ] ]
sselab2021/FMS_human_3d-skeleton_by_Azure-Kinect
[ "99520fd78a32f467cf0ada3632984304a64d5cbd" ]
[ "03_Figures_of_paper/Fig_4_1.py" ]
[ "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import font_manager\n\n# 01. start-Data preparation\n\n# To save the CSV files that recorded the frequency of no-body frames in 'Front' and 'Side' sensors\n# Front.csv and Side.csv are saved in dataset.\nnobody_csv_file_list = [r'G:\\Dataset of FMS\\Preprocessing files\\Statistics of frames without skeleton\\Front.csv',\n r'G:\\Dataset of FMS\\Preprocessing files\\Statistics of frames without skeleton\\Side.csv']\n\n# the path of saving the file of Fig_4_1.\npic_save_path = r'E:\\Fig_4_1.pdf'\n\n# Temporarily store the rate of no-body\nno_bodies_frames_rate_list = []\nfor i in range(len(nobody_csv_file_list)):\n df = pd.read_csv(nobody_csv_file_list[i])\n no_bodies_frames_rate_list.append((df[\"no_bodies_frames_rate\"]).tolist())\n\n# 01. end-Data preparation\n\n# 02. start-plot\n\n# Set global font and fontsize\nfont_manager.fontManager.addfont(r'C:\\Users\\Xingqingjun\\AppData\\Local\\Microsoft\\Windows\\Fonts\\Helvetica.ttf')\nplt.rcParams['font.sans-serif'] = 'Helvetica'\nplt.rcParams['font.size'] = '16'\n\nx_data = [i + 1 for i in range(1812)]\ny_data = no_bodies_frames_rate_list[0]\ny_data2 = no_bodies_frames_rate_list[1]\n\nfig = plt.figure(figsize=(7.2, 5.1))\n\n# Add auxiliary lines in plot.\nplt.axhline(y=0.01, ls=\"--\", c=\"#1B9F2E\", lw=0.5)\nplt.axvline(x=59, ymin=0, ymax=1 / 22, ls=\"--\", c=\"green\", lw=0.5)\nplt.axvline(x=347, ymin=0, ymax=1 / 22, ls=\"--\", c=\"green\", lw=0.5)\n\n# Key marker in plot.\npoint_x = [59, 347]\npoint_y = [0.01, 0.01]\npoint_colors = [\"#ff0000\", \"#1F77B4\"]\nplt.scatter(point_x, point_y, s=9, c=point_colors)\n\nplt.text(53, 0.05, \"(59, 0.01)\", fontsize=16)\nplt.text(341, 0.05, \"(347, 0.01)\", fontsize=16)\n\n# Remove the top and bottom borders\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\nln1, = plt.plot(x_data, y_data, color='red', linewidth=2.0)\nln2, = plt.plot(x_data, y_data2, linewidth=2.0)\n\nx = range(0, 1813, 300)\nplt.xticks(x)\nplt.xlim(-50, 1812)\nplt.xlabel(\"Number of episodes (N)\", labelpad=10.0)\nplt.ylabel(\"No-body frame ratios (%)\")\n\nplt.legend(handles=[ln1, ln2], labels=['Front', 'Side'])\n\n# plt.show()\nplt.savefig(pic_save_path, bbox_inches='tight', dpi=300)\n\n# 02. end-plot\n" ]
[ [ "matplotlib.pyplot.text", "matplotlib.pyplot.xlim", "matplotlib.pyplot.axhline", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.font_manager.fontManager.addfont", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.axvline", "matplotlib.pyplot.gca", "matplotlib.pyplot.xticks", "pandas.read_csv" ] ]
alantess/DDQN-BTC
[ "0fff185200dd1c16088dc322cbb7790b848c1e6d" ]
[ "agent.py" ]
[ "import numpy as np\r\nimport torch as T\r\nimport torch.nn as nn\r\nfrom networks import DDQN\r\nfrom memory import ReplayBuffer\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nclass Agent(object):\r\n def __init__(self, lr, input_dims, n_actions,epsilon, batch_size,env,\r\n capacity=1000000, eps_dec=4.5e-7, fc1_dims = 512, fc2_dims=256,\r\n replace=1000, gamma=0.99,network_name='_eval'):\r\n self.input_dims = input_dims\r\n self.n_actions = n_actions\r\n self.batch_size = batch_size\r\n self.gamma = gamma\r\n self.eps_min = 0.01\r\n self.epsilon = epsilon\r\n self.env = env\r\n self.memory = ReplayBuffer(capacity, input_dims,n_actions)\r\n self.eps_dec = eps_dec\r\n self.replace = replace\r\n self.update_cntr = 0\r\n self.scaler = self._get_scaler(env)\r\n\r\n # Evaluate network\r\n self.q_eval = DDQN(lr=lr, input_dims=self.input_dims,n_actions=self.n_actions,fc1_dims=fc1_dims, fc2_dims=fc2_dims,network_name=network_name)\r\n # Training Network\r\n self.q_train = DDQN(lr=lr, input_dims=self.input_dims,n_actions=self.n_actions,fc1_dims=fc1_dims, fc2_dims=fc2_dims,network_name=network_name)\r\n\r\n # Normalize the observation\r\n def pick_action(self, obs):\r\n if np.random.random() > self.epsilon:\r\n obs = self.scaler.transform([obs])\r\n state = T.tensor([obs], dtype=T.float).to(self.q_eval.device)\r\n actions = self.q_train.forward(state)\r\n action = T.argmax(actions).item()\r\n else:\r\n action = self.env.sample_action()\r\n\r\n return action\r\n\r\n# For normalizing states -- _get_scaler(env)\r\n def _get_scaler(self, env):\r\n states = []\r\n for _ in range(self.env.n_steps):\r\n action = self.env.sample_action()\r\n state_, reward, done, _ = self.env.step(action)\r\n states.append(state_)\r\n if done:\r\n break\r\n scaler = StandardScaler()\r\n scaler.fit(states)\r\n return scaler\r\n\r\n\r\n\r\n def store_transition(self, state, action, reward, state_, done):\r\n state = self.scaler.transform([state])\r\n state_ = self.scaler.transform([state_])\r\n self.memory.store_transition(state, action, reward,state_,done)\r\n\r\n\r\n def update_target_network(self):\r\n if self.update_cntr % self.replace == 0:\r\n self.q_eval.load_state_dict(self.q_train.state_dict())\r\n\r\n\r\n def save(self):\r\n print('Saving...')\r\n self.q_eval.save()\r\n self.q_train.save()\r\n\r\n def load(self):\r\n print('Loading...')\r\n self.q_eval.load()\r\n self.q_train.load()\r\n # Normalize the states, create a function\r\n def learn(self):\r\n if self.memory.mem_cntr < self.batch_size:\r\n return\r\n\r\n states, actions, rewards, states_, done = self.memory.sample_buffer(self.batch_size)\r\n\r\n states = T.tensor(states, dtype=T.float).to(self.q_eval.device)\r\n actions = T.tensor(actions, dtype=T.int64).to(self.q_eval.device)\r\n rewards = T.tensor(rewards, dtype=T.float).to(self.q_eval.device)\r\n states_ =T.tensor(states_, dtype=T.float).to(self.q_eval.device)\r\n done = T.tensor(done, dtype=T.bool).to(self.q_eval.device)\r\n\r\n self.q_train.optimizer.zero_grad()\r\n self.update_target_network()\r\n\r\n\r\n indices = np.arange(self.batch_size)\r\n q_pred = (self.q_train.forward(states) * actions).sum(dim=1)\r\n q_next = self.q_eval.forward(states_)\r\n q_train = self.q_train.forward(states_)\r\n\r\n max_action = T.argmax(q_train,dim=1)\r\n q_next[done] = 0.0\r\n\r\n y = rewards + self.gamma*q_next[indices,max_action]\r\n\r\n loss = self.q_train.loss(y,q_pred).to(self.q_eval.device)\r\n loss.backward()\r\n\r\n self.q_train.optimizer.step()\r\n\r\n\r\n\r\n self.update_cntr += 1\r\n self.epsilon = self.epsilon - self.eps_dec if self.epsilon > self.eps_min else self.eps_min\r\n\r\n\r\n\r\n" ]
[ [ "sklearn.preprocessing.StandardScaler", "numpy.arange", "torch.tensor", "numpy.random.random", "torch.argmax" ] ]
GMLC-TDC/pesgm-2019-helics-tutorial
[ "7534cc5f208e558c5f1f9b4347e93f82065be3c9" ]
[ "data/case9/case9.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) 1996-2015 PSERC. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n\"\"\"Power flow data for 9 bus, 3 generator case.\n\"\"\"\n\nfrom numpy import array\n\n\ndef case9():\n \"\"\"Power flow data for 9 bus, 3 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from Joe H. Chow's book, p. 70.\n\n @return: Power flow data for 9 bus, 3 generator case.\n \"\"\"\n ppc = {\"version\": \"2\"}\n\n ##----- Power Flow Data -----##\n ## system MVA base\n ppc[\"baseMVA\"] = 100.0\n\n ## bus data\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\n ppc[\"bus\"] = array(\n [\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [2, 2, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [3, 2, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [5, 1, 90, 30, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [7, 1, 100, 35, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [9, 1, 125, 50, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n ]\n )\n\n ## generator data\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\n ppc[\"gen\"] = array(\n [\n [1, 0, 0, 300, -300, 1, 100, 1, 250, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [2, 163, 0, 300, -300, 1, 100, 1, 300, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [3, 85, 0, 300, -300, 1, 100, 1, 270, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n ]\n )\n\n ## branch data\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\n ppc[\"branch\"] = array(\n [\n [1, 4, 0, 0.0576, 0, 250, 250, 250, 0, 0, 1, -360, 360],\n [4, 5, 0.017, 0.092, 0.158, 250, 250, 250, 0, 0, 1, -360, 360],\n [5, 6, 0.039, 0.17, 0.358, 150, 150, 150, 0, 0, 1, -360, 360],\n [3, 6, 0, 0.0586, 0, 300, 300, 300, 0, 0, 1, -360, 360],\n [6, 7, 0.0119, 0.1008, 0.209, 150, 150, 150, 0, 0, 1, -360, 360],\n [7, 8, 0.0085, 0.072, 0.149, 250, 250, 250, 0, 0, 1, -360, 360],\n [8, 2, 0, 0.0625, 0, 250, 250, 250, 0, 0, 1, -360, 360],\n [8, 9, 0.032, 0.161, 0.306, 250, 250, 250, 0, 0, 1, -360, 360],\n [9, 4, 0.01, 0.085, 0.176, 250, 250, 250, 0, 0, 1, -360, 360],\n ]\n )\n\n ##----- OPF Data -----##\n ## area data\n # area refbus\n ppc[\"areas\"] = array([[1, 5]])\n\n ## generator cost data\n # 1 startup shutdown n x1 y1 ... xn yn\n # 2 startup shutdown n c(n-1) ... c0\n ppc[\"gencost\"] = array(\n [\n [2, 1500, 0, 3, 0.11, 5, 150],\n [2, 2000, 0, 3, 0.085, 1.2, 600],\n [2, 3000, 0, 3, 0.1225, 1, 335],\n ]\n )\n\n return ppc\n" ]
[ [ "numpy.array" ] ]
Connossor/mystic-bit
[ "f57f471d3d154560d23bc9eff17fd5b8f284684c" ]
[ "mysticbit/ml.py" ]
[ "\"\"\" Core ML functions\"\"\"\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.model_selection import LeavePGroupsOut\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.cluster import KMeans\n\n\ndef create_facies(df_logs):\n \"\"\" Adds a facies column from clustering\"\"\"\n pipe = make_pipeline(RobustScaler(), KMeans(n_clusters=4))\n X = df_logs[['GR', 'RHOB', 'NPHI', 'DT']]\n cluster_id = pipe.fit_predict(X)\n df_logs['facies'] = cluster_id\n df_logs['facies'] = 'facies_' + df_logs['facies'].astype(str)\n return df_logs\n\n\ndef train_test_split(df_ml):\n \"\"\" Split log data into train and test by well ID \"\"\"\n\n test_wells = set(['B03', 'B05', 'B06'])\n train_wells = set(df_ml.HACKANAME.unique()) - test_wells\n print('Train well: ', train_wells)\n print('Test wells: ', test_wells)\n\n mask_train = df_ml.HACKANAME.isin(train_wells)\n df_ml_train = df_ml[mask_train]\n df_ml_test = df_ml[~mask_train]\n\n return df_ml_train, df_ml_test\n\n\ndef make_model(X_train, y_train, quantile=0.5):\n \"\"\" Returns a trained model \"\"\"\n\n model = MultiOutputRegressor(GradientBoostingRegressor(loss='quantile', alpha=quantile))\n model.fit(X_train, y_train)\n return model\n\n\ndef make_multiple_models(df_ml_train, X_cols, y_cols):\n \"\"\" Returns low, base and high trained models \"\"\"\n\n X_train = df_ml_train[X_cols]\n y_train = df_ml_train[y_cols]\n\n models = []\n models.append(['high', make_model(X_train, y_train, quantile=0.90)])\n models.append(['base', make_model(X_train, y_train, quantile=0.50)])\n models.append(['low', make_model(X_train, y_train, quantile=0.10)])\n\n return models\n\n\ndef make_predictions(models, df_ml, X_cols, y_cols):\n df_pred = df_ml.copy()\n \"\"\" Use trained models to make predictions, add on to df_ml as new columns \"\"\"\n\n X = df_pred[X_cols]\n\n for name, model in models:\n y_pred = model.predict(X)\n pred_cols = [c + '_pred_'+name for c in y_cols]\n df_pred[pred_cols] = pd.DataFrame(y_pred, index=df_pred.index)\n\n return df_pred" ]
[ [ "pandas.DataFrame", "sklearn.cluster.KMeans", "sklearn.ensemble.GradientBoostingRegressor", "sklearn.preprocessing.RobustScaler" ] ]
wilko77/blocklib
[ "0fb4ef62f2b9cd6d685fadf92801ca207079843d" ]
[ "blocklib/encoding.py" ]
[ "\"\"\"Class to implement privacy preserving encoding.\"\"\"\nimport hashlib\nimport numpy as np\nfrom typing import List, Set\n\n\ndef flip_bloom_filter(string: str, bf_len: int, num_hash_funct: int):\n \"\"\"\n Hash string and return indices of bits that have been flipped correspondingly.\n\n :param string: string: to be hashed and to flip bloom filter\n :param bf_len: int: length of bloom filter\n :param num_hash_funct: int: number of hash functions\n :return: bfset: a set of integers - indices that have been flipped to 1\n \"\"\"\n # config for hashing\n h1 = hashlib.sha1\n h2 = hashlib.md5\n\n sha_bytes = h1(string.encode('utf-8')).digest()\n md5_bytes = h2(string.encode('utf-8')).digest()\n int1 = int.from_bytes(sha_bytes, 'big') % bf_len\n int2 = int.from_bytes(md5_bytes, 'big') % bf_len\n\n # flip {num_hash_funct} times\n bfset = set()\n for i in range(num_hash_funct):\n gi = (int1 + i * int2) % bf_len\n bfset.add(gi)\n\n return bfset\n\n\ndef generate_bloom_filter(list_of_strs: List[str], bf_len: int, num_hash_funct: int):\n \"\"\"\n Generate a bloom filter given list of strings.\n\n :param return_cbf_index_sig_map:\n :param list_of_strs:\n :param bf_len:\n :param num_hash_funct:\n :return: bloom_filter_vector if return_cbf_index_sig_map is False else (bloom_filter_vector, cbf_index_sig_map)\n \"\"\"\n # go through each signature and generate bloom filter of it\n # -- we only store the set of index that flipped to 1\n candidate_bloom_filter = set() # type: Set[int]\n\n for signature in list_of_strs:\n bfset = flip_bloom_filter(signature, bf_len, num_hash_funct)\n # union indices that have been flipped 1 in candidate bf\n candidate_bloom_filter = candidate_bloom_filter.union(bfset)\n\n # massage the cbf into a numpy bool array from a set\n bloom_filter_vector = np.zeros(bf_len, dtype=bool)\n bloom_filter_vector[list(candidate_bloom_filter)] = True\n\n return bloom_filter_vector\n" ]
[ [ "numpy.zeros" ] ]
jameszou/unseenest
[ "24afc3dfb74c5d6ef49427f59213661dc411f929" ]
[ "src/unseen_est.py" ]
[ "from __future__ import division\nimport os, sys, random, cPickle, sets, subprocess, pandas, gzip, math, itertools\nimport numpy as np\nfrom operator import itemgetter\nfrom scipy.stats import binom_test\nfrom scipy.stats import chi2_contingency\nfrom scipy.stats import entropy\nfrom scipy.stats import poisson\nfrom scipy.stats import binom\nfrom scipy.stats import hypergeom\nimport statsmodels.api as sm\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport scipy.io\nfrom cvxopt import matrix, solvers\n\n# f is a list of fingerprint values\n# n_samples is the number of alleles in the cohort\ndef unseen_est(filename, n_samples):\n file = open(filename,'r')\n f = []\n for line in file:\n f.append(int(line.strip()))\n \n ########### BASIC CONSTANTS ###################\n gridFactor = 1.05\n maxLPIters = 1000\n xLPmax = len(f)/n_samples\n xLPmin = 1./(n_samples*100)\n N_max = 65000000\n #N_max = 650000000\n \n ########### SETTING UP THE LP ###################\n fLP = f + [0]*int(np.ceil(np.sqrt(len(f))))\n szLPf = len(fLP)\n xLP = xLPmin*np.power(gridFactor, np.arange(0, np.ceil(np.log(xLPmax/xLPmin)/np.log(gridFactor))+1))\n szLPx = np.max(xLP.shape)\n \n ## set up the objective function\n objf = np.zeros((1, szLPx + 2*szLPf))\n objf[0, np.arange(szLPx, szLPx + 2*szLPf, 2)] = 1./np.sqrt(np.array(fLP) + 1)\n objf[0, np.arange(szLPx+1, szLPx + 2*szLPf, 2)] = 1./np.sqrt(np.array(fLP) + 1)\n \n ## set up the inequality constraints corresponding to the moment matching\n ## first 2*szLPf are for moment matching, next szLPx+2*szLPf for >=0, last for <= N_max\n A = np.zeros((2*szLPf+szLPx+2*szLPf+1, szLPx+2*szLPf)) \n b = np.zeros((2*szLPf+szLPx+2*szLPf+1, 1))\n \n rv_list = [binom(n_samples, x) for x in xLP]\n # moment matching constraints\n for i in range(szLPf):\n A[2*i, np.arange(szLPx)] = [rv.pmf(i+1) for rv in rv_list]\n A[2*i+1, np.arange(szLPx)] = -A[2*i, np.arange(szLPx)]\n A[2*i, szLPx+2*i] = -1\n A[2*i+1, szLPx+2*i+1] = -1\n b[2*i, 0] = fLP[i]\n b[2*i+1, 0] = -fLP[i]\n \n # >= 0 constraints\n for i in range(szLPx+2*szLPf):\n A[i+2*szLPf,i] = -1\n b[i+2*szLPf,0] = 0\n \n # <= N_max constraint\n A[-1,range(szLPx)] = 1\n b[-1,0] = N_max\n \n \n ## set up the equality constraints\n Aeq = np.zeros((1, szLPx+2*szLPf))\n Aeq[0, range(szLPx)] = xLP\n beq = np.sum(np.array(f)*(1+np.arange(len(f))))/n_samples\n \n ########### RUNNING THE LP ###################\n \n solvers.options['show_progress'] = False\n \n ## rescaling for better conditioning\n for j in range(np.max(xLP.shape)):\n A[:,j] = A[:,j]/xLP[j]\n Aeq[0,j] = Aeq[0,j]/xLP[j]\n \n #return objf, A, b, szLPf, szLPx, xLP\n sol = solvers.lp(matrix(objf.T), matrix(A), matrix(b), matrix(Aeq), matrix(beq)) \n #res = linprog(list(objf[0]), A_ub = A, b_ub = list(b.T[0]), A_eq = Aeq, b_eq = [beq] , options = {'maxiter': maxLPIters})\n \n ## remove the scaling\n histx = np.array(sol['x'])[0:szLPx]\n histx = [histx[i]/xLP[i] for i in range(szLPx)]\n \n return np.array(histx), xLP\n\ndef write_output(histx, xLP, outname):\n out = open(outname, 'w')\n out.write('\\t'.join(['frequency', '# of variants'])+'\\n')\n for i in range(len(xLP)):\n out.write('\\t'.join([str(xLP[i]), str(histx[i,0])])+'\\n')\n out.close()\n \nif __name__ == '__main__':\n filename = sys.argv[1]\n n_alleles = int(sys.argv[2])\n outname = sys.argv[3]\n histx, xLP = unseen_est(filename, n_alleles)\n write_output(histx, xLP, outname)\n\n\n\n \n " ]
[ [ "numpy.max", "numpy.array", "numpy.zeros", "numpy.log", "numpy.arange", "scipy.stats.binom" ] ]
nhrade/advent-of-code
[ "96a60ebb1bb4dfd0d6aabc0d71c926580c86c7e0" ]
[ "day15/chiton.py" ]
[ "import numpy as np\nfrom heapq import heappush, heappop\nfrom dataclasses import dataclass, field\nimport os\n\n\n@dataclass(order=True)\nclass PosItem:\n priority: int\n pos: tuple[int, int] = field(compare=False)\n\n\npath = os.path.join(os.path.dirname(__file__), \"input.txt\")\n\n\ndef find_path(arr):\n pq = []\n visited = set()\n cost = np.zeros_like(arr, dtype=np.int32)\n cost.fill(2 ** 31 - 1)\n prev = np.zeros(shape=(cost.shape[0], cost.shape[1], 2), dtype=np.int32)\n cost[0, 0] = 0\n pq.append(PosItem(0, (0, 0)))\n\n while pq:\n item = heappop(pq)\n r, c = item.pos\n visited.add((r, c))\n\n if (\n (r + 1, c) not in visited\n and r < arr.shape[0] - 1\n and cost[r, c] + arr[r + 1, c] < cost[r + 1, c]\n ):\n cost[r + 1, c] = cost[r, c] + arr[r + 1, c]\n prev[r + 1, c, :] = [r, c]\n heappush(pq, PosItem(cost[r + 1, c], (r + 1, c)))\n if (\n (r, c + 1) not in visited\n and c < arr.shape[1] - 1\n and cost[r, c] + arr[r, c + 1] < cost[r, c + 1]\n ):\n cost[r, c + 1] = cost[r, c] + arr[r, c + 1]\n prev[r, c + 1, :] = [r, c]\n heappush(pq, PosItem(cost[r, c + 1], (r, c + 1)))\n return prev, cost\n\n\nif __name__ == \"__main__\":\n with open(path) as file:\n contents = file.read()\n arr = np.asarray(\n [[int(n) for n in line] for line in contents.split(\"\\n\")], dtype=np.int32\n )\n\n prev, cost = find_path(arr)\n print(f\"Lowest cost path is {cost[cost.shape[0]-1, cost.shape[1]-1]}\")\n" ]
[ [ "numpy.zeros_like", "numpy.zeros" ] ]
mdomarsaleem/kedro
[ "571bb8009b930e67c073115c23ae2d03942f6499" ]
[ "kedro/io/parquet_local.py" ]
[ "# Copyright 2018-2019 QuantumBlack Visual Analytics Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND\n# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS\n# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# The QuantumBlack Visual Analytics Limited (\"QuantumBlack\") name and logo\n# (either separately or in combination, \"QuantumBlack Trademarks\") are\n# trademarks of QuantumBlack. The License does not grant you any right or\n# license to the QuantumBlack Trademarks. You may not use the QuantumBlack\n# Trademarks or any confusingly similar mark as a trademark for your product,\n# or use the QuantumBlack Trademarks in any other manner that might cause\n# confusion in the marketplace, including but not limited to in advertising,\n# on websites, or on software.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"``ParquetLocalDataSet`` is a data set used to load and save\ndata to local parquet files. It uses the ``pyarrow`` implementation,\nwhich allows for multiple parts, compression, column selection etc.\n\nDocumentation on the PyArrow library features, compatibility\nlist and known caveats can also be found on their official guide at:\n\nhttps://arrow.apache.org/docs/python/index.html\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nimport pandas as pd\n\nfrom kedro.io.core import AbstractVersionedDataSet, DataSetError, Version\n\n\nclass ParquetLocalDataSet(AbstractVersionedDataSet):\n \"\"\"``AbstractDataSet`` with functionality for handling local parquet files.\n\n Example:\n ::\n\n >>> from kedro.io import ParquetLocalDataSet\n >>> import pandas as pd\n >>>\n >>> data = pd.DataFrame({'col1': [1, 2], 'col2': [4, 5],\n >>> 'col3': [5, 6]})\n >>> data_set = ParquetLocalDataSet('myFile')\n >>> data_set.save(data)\n >>> loaded_data = data_set.load()\n >>> assert data.equals(loaded_data)\n \"\"\"\n\n # pylint: disable=too-many-arguments\n def __init__(\n self,\n filepath: str,\n engine: str = \"auto\",\n load_args: Dict[str, Any] = None,\n save_args: Dict[str, Any] = None,\n version: Version = None,\n ) -> None:\n \"\"\"Creates a new instance of ``ParquetLocalDataSet`` pointing to a\n concrete filepath.\n\n Args:\n filepath: Path to a parquet file or a metadata file of a multipart\n parquet collection or the directory of a multipart parquet.\n\n engine: The engine to use, one of: `auto`, `fastparquet`,\n `pyarrow`. If `auto`, then the default behavior is to try\n `pyarrow`, falling back to `fastparquet` if `pyarrow` is\n unavailable.\n\n load_args: Additional loading options `pyarrow`:\n https://arrow.apache.org/docs/python/generated/pyarrow.parquet.read_table.html\n or `fastparquet`:\n https://fastparquet.readthedocs.io/en/latest/api.html#fastparquet.ParquetFile.to_pandas\n\n save_args: Additional saving options for `pyarrow`:\n https://arrow.apache.org/docs/python/generated/pyarrow.Table.html#pyarrow.Table.from_pandas\n or `fastparquet`:\n https://fastparquet.readthedocs.io/en/latest/api.html#fastparquet.write\n\n version: If specified, should be an instance of\n ``kedro.io.core.Version``. If its ``load`` attribute is\n None, the latest version will be loaded. If its ``save``\n attribute is None, save version will be autogenerated.\n\n \"\"\"\n super().__init__(Path(filepath), version)\n default_save_args = {\"compression\": None} # type: Dict[str, Any]\n default_load_args = {} # type: Dict[str, Any]\n\n self._engine = engine\n self._load_args = (\n {**default_load_args, **load_args}\n if load_args is not None\n else default_load_args\n )\n self._save_args = (\n {**default_save_args, **save_args}\n if save_args is not None\n else default_save_args\n )\n\n def _describe(self) -> Dict[str, Any]:\n return dict(\n filepath=self._filepath,\n engine=self._engine,\n load_args=self._load_args,\n save_args=self._save_args,\n version=self._version,\n )\n\n def _load(self) -> pd.DataFrame:\n load_path = Path(self._get_load_path())\n return pd.read_parquet(load_path, engine=self._engine, **self._load_args)\n\n def _save(self, data: pd.DataFrame) -> None:\n save_path = Path(self._get_save_path())\n save_path.parent.mkdir(parents=True, exist_ok=True)\n data.to_parquet(save_path, engine=self._engine, **self._save_args)\n\n load_path = Path(self._get_load_path())\n self._check_paths_consistency(load_path.absolute(), save_path.absolute())\n\n def _exists(self) -> bool:\n try:\n path = self._get_load_path()\n except DataSetError:\n return False\n return Path(path).is_file()\n" ]
[ [ "pandas.read_parquet" ] ]
MistQue/kendryte-model-compiler
[ "36af917defb37880037fb84330ab995ed44311e1" ]
[ "model_loader/pb/layer_list_to_k210_layer.py" ]
[ "'''\n * Copyright 2018 Canaan Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n '''\n\nimport k210_layer\nimport numpy as np\nfrom . import tensor_list_to_layer_list\n\n\ndef make_k210_layer_from_tensor(sess, dataset, buffer, input_min, input_max, eight_bit_mode, range_from_batch, idx):\n pool_tensor_info = dict()\n pool_type_size_stride = None # bypass pool\n\n if isinstance(buffer[-1], tensor_list_to_layer_list.LayerConvolutional) \\\n or isinstance(buffer[-1], tensor_list_to_layer_list.LayerDepthwiseConvolutional):\n conv_layer = buffer.pop()\n\n input_shape = list(sess.run(conv_layer.tensor_conv_x, dataset).shape)\n conved_shape = list(sess.run(conv_layer.tensor_conv_y, dataset).shape)\n\n if conv_layer.tensor_conv_x.op.type == 'SpaceToBatchND':\n print('[warning] found SpaceToBatchND fix input_shape/')\n input_shape[1] = input_shape[1]-2\n input_shape[2] = input_shape[2]-2\n\n weights_min, weights_max, _ = range_from_batch(sess, conv_layer.tensor_conv_w, dataset, is_weights=True)\n conv_weights = conv_layer.weights\n conv_isdw = isinstance(conv_layer, tensor_list_to_layer_list.LayerDepthwiseConvolutional)\n conv_tensor_info = {'name': conv_layer.tensor_conv_y.name}\n\n if int(conv_layer.config['batch_normalize']) == 1:\n bn_mean_var_gamma_beta_epsilon = [\n conv_layer.batch_normalize_moving_mean,\n conv_layer.batch_normalize_moving_variance,\n conv_layer.batch_normalize_gamma,\n conv_layer.batch_normalize_beta,\n conv_layer.batch_normalize_epsilon\n ]\n bn_tensor_info = {'name': 'bn'}\n else:\n bias_shape = conv_layer.bias.shape\n bn_mean_var_gamma_beta_epsilon = [\n 0, 1, np.ones(bias_shape), conv_layer.bias, 0\n ]\n bn_tensor_info = {'name': 'bn'}\n\n tensor_act = conv_layer.tensor_activation\n act_min_y, act_max_y, _ = range_from_batch(sess, tensor_act, dataset)\n act_type = conv_layer.config['activation']\n act_tensor_info = {'name': tensor_act.name if tensor_act is not None else 'default_linear'}\n output_shape = tensor_act.shape\n else:\n raise ValueError('unsupported type seq: ', *[type(l) for l in buffer])\n\n if len(buffer) > 0 and isinstance(buffer[-1], tensor_list_to_layer_list.LayerPool):\n pool_layer = buffer.pop()\n assert (isinstance(pool_layer, tensor_list_to_layer_list.LayerPool))\n pool_size = pool_layer.config['size']\n pool_stride = pool_layer.config['stride']\n pool_type = pool_layer.tensor_pool.op.type\n\n if pool_size == 2 and pool_layer.tensor_pool.op.inputs[0].shape[3] % 2 != 0:\n if pool_layer.tensor_pool.op.get_attr('padding') == b'SAME':\n raise ValueError(\"at {} unsupport padding mode SAME of pooling with size == 2\" \\\n .format(pool_layer.tensor_pool.name))\n\n pool_type_size_stride = [pool_type, pool_size, pool_stride]\n pool_tensor_info = {'name': pool_layer.tensor_pool.op.name}\n\n return {\n 'iwo_minmax':[input_min, input_max, weights_min, weights_max, act_min_y, act_max_y],\n 'ico_shapes':[input_shape, conved_shape, output_shape],\n 'conv_weights_isdw':[conv_weights, conv_isdw],\n 'bn_mean_var_gamma_beta_epsilon':bn_mean_var_gamma_beta_epsilon,\n 'act_type':act_type,\n 'pool_type_size_stride':pool_type_size_stride,\n 'eight_bit_mode':eight_bit_mode,\n 'cbap_tensor_info':[conv_tensor_info, bn_tensor_info, act_tensor_info, pool_tensor_info],\n 'idx':idx\n }\n\n\ndef gen_k210_layers(layers: [tensor_list_to_layer_list.LayerBase], sess, dataset, range_from_batch,\n eight_bit_mode=False, input_min=0, input_max=1, layer_start_idx=0):\n buffer = list(layers)\n buffer.reverse()\n kl_args_list = []\n\n net = buffer.pop()\n assert (isinstance(net, tensor_list_to_layer_list.LayerNet))\n\n while len(buffer) != 0:\n if len(kl_args_list) > 0:\n last_min = kl_args_list[-1]['iwo_minmax'][4]\n last_max = kl_args_list[-1]['iwo_minmax'][5]\n else:\n last_min = input_min\n last_max = input_max\n\n cur_k210_arg = make_k210_layer_from_tensor(\n sess=sess, dataset=dataset,\n buffer=buffer,\n input_min=last_min, input_max=last_max,\n eight_bit_mode=eight_bit_mode,\n range_from_batch=range_from_batch,\n idx=len(kl_args_list)+layer_start_idx\n )\n kl_args_list.append(cur_k210_arg)\n\n kl_args_fixed = k210_layer.k210_layer_post_fix(kl_args_list)\n kl_list = [k210_layer.K210Layer(**kl_args) for kl_args in kl_args_fixed]\n return kl_list\n" ]
[ [ "numpy.ones" ] ]
Johnzhjw/MOE-DGNAS
[ "3387808846fdb0dd8446117cef261b24c38d1702" ]
[ "CCMOEA.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport geatpy as ea # 导入geatpy库\nfrom sys import path as paths\nfrom os import path\nimport random\nfrom SurroModel import get_surrogate_model\n\npaths.append(path.split(path.split(path.realpath(__file__))[0])[0])\n\n\ndef remove_duplication(pop):\n x = pop.decoding()\n x_unique, ind = np.unique(x, return_index=True, axis=0)\n ind = np.sort(ind)\n pop = pop[ind]\n return pop, ind\n\n\nclass CCNSGA2_archive(ea.MoeaAlgorithm):\n\n def __init__(self, problem, population, grps, CC_flag, CC_type, surrog_flag, name_surrog, folder):\n ea.MoeaAlgorithm.__init__(self, problem, population) # 先调用父类构造方法\n if population.ChromNum != 1:\n raise RuntimeError('传入的种群对象必须是单染色体的种群类型。')\n self.name = 'CCNSGA2-archive'\n self.grps = grps\n if population.Encoding == 'BG':\n self.bit_lens = population.Field[0].astype(np.int32)\n self.bit_offs = np.cumsum(self.bit_lens).astype(np.int32)\n self.grps = []\n for grp in grps:\n tmp_grp = []\n for i in grp:\n for _ in range(self.bit_lens[i]):\n tmp_grp.append(self.bit_offs[i] - self.bit_lens[i] + _)\n self.grps.append(tmp_grp)\n self.rem_grps = []\n for _ in range(len(grps)):\n rem_idx = []\n for i, grp in enumerate(self.grps):\n if i != _:\n rem_idx += grp\n self.rem_grps.append(rem_idx)\n self.CC_flag = CC_flag\n self.CC_type = CC_type\n self.surrog_flag = surrog_flag\n self.name_surrog = name_surrog\n self.folder = folder\n if self.problem.M < 10:\n self.ndSort = ea.ndsortESS # 采用ENS_SS进行非支配排序\n else:\n self.ndSort = ea.ndsortTNS # 高维目标采用T_ENS进行非支配排序,速度一般会比ENS_SS要快\n self.selFunc = 'tour' # 选择方式,采用锦标赛选择\n '''\n if population.Encoding == 'P':\n self.recOper = ea.Xovpmx(XOVR=1) # 生成部分匹配交叉算子对象\n self.mutOper = ea.Mutinv(Pm=1) # 生成逆转变异算子对象\n elif population.Encoding == 'BG':\n self.recOper = ea.Xovud(XOVR=1) # 生成均匀交叉算子对象\n self.mutOper = ea.Mutbin(Pm=None) # 生成二进制变异算子对象,Pm设置为None时,具体数值取变异算子中Pm的默认值\n elif population.Encoding == 'RI':\n self.recOper = ea.Recsbx(XOVR=1, n=20) # 生成模拟二进制交叉算子对象\n self.mutOper = ea.Mutpolyn(Pm=1 / self.problem.Dim, DisI=20) # 生成多项式变异算子对象\n self.crsOper = ea.Xovbd(XOVR=0.5, Half_N=True) # 生成二项式分布交叉算子对象,这里的XOVR即为DE中的Cr\n else:\n raise RuntimeError('编码方式必须为''BG''、''RI''或''P''.')\n '''\n if population.Encoding == 'RI':\n self.mutOper = ea.Mutde(F=0.5) # 生成差分变异算子对象\n self.pmOper = ea.Mutpolyn(Pm=1 / self.problem.Dim, DisI=20) # 生成多项式变异算子对象\n self.crsOper1 = ea.Xovbd(XOVR=0.5, Half_N=True) # 生成二项式分布交叉算子对象,这里的XOVR即为DE中的Cr\n self.crsOper2 = ea.Xovbd(XOVR=0.5, Half_N=True) # 生成二项式分布交叉算子对象,这里的XOVR即为DE中的Cr\n self.xosp = ea.Xovsp(XOVR=1.0)\n self.xodp = ea.Xovdp(XOVR=1.0)\n self.gmOper = ea.Mutgau(Pm=1 / self.problem.Dim)\n elif population.Encoding == 'BG':\n self.recOper = ea.Xovud(XOVR=0.5) # 生成均匀交叉算子对象\n self.mutOper = ea.Mutbin(Pm=None) # 生成二进制变异算子对象,Pm设置为None时,具体数值取变异算子中Pm的默认值\n self.crsOper1 = ea.Xovbd(XOVR=0.5, Half_N=True) # 生成二项式分布交叉算子对象,这里的XOVR即为DE中的Cr\n self.crsOper2 = ea.Xovbd(XOVR=0.5, Half_N=True) # 生成二项式分布交叉算子对象,这里的XOVR即为DE中的Cr\n self.xosp = ea.Xovsp(XOVR=1.0)\n self.xodp = ea.Xovdp(XOVR=1.0)\n self.gmOper = ea.Mutgau(Pm=None)\n else:\n raise RuntimeError('编码方式必须为''BG''或''RI''.')\n self.NP = population.sizes\n self.MAXSIZE = 10 * population.sizes # 全局非支配解存档的大小限制,默认为10倍的种群个体数\n self.MAXSIZE_acc = population.sizes\n self.useSurrogate = False\n self.surrogate = None\n self.MAXSIZE_surrogate = 150\n self.gap_surrogate = 5\n self.ngn_surrogate = 100\n self.Pm_min = 1.0 / self.problem.Dim\n if population.Encoding == 'BG':\n self.Pm_min = 1.0 / sum(population.Field[0])\n self.Pm_max = 0.5 # 3 * self.Pm_min\n self.Pm_rng = self.Pm_min - self.Pm_min\n self.XOVR_max = 0.5\n self.XOVR_min = 0.1\n self.XOVR_rng = self.XOVR_max - self.XOVR_min\n self.Encoding = population.Encoding\n\n def call_aimFunc(self, pop):\n\n \"\"\"\n 使用注意:\n 本函数调用的目标函数形如:aimFunc(pop), (在自定义问题类中实现)。\n 其中pop为种群类的对象,代表一个种群,\n pop对象的Phen属性(即种群染色体的表现型)等价于种群所有个体的决策变量组成的矩阵。\n 若不符合上述规范,则请修改算法模板或自定义新算法模板。\n\n 描述:\n 该函数调用自定义问题类中自定义的目标函数aimFunc()得到种群所有个体的目标函数值组成的矩阵,\n 以及种群个体违反约束程度矩阵(假如在aimFunc()中构造了该矩阵的话)。\n 该函数不返回任何的返回值,求得的目标函数值矩阵保存在种群对象的ObjV属性中,\n 违反约束程度矩阵保存在种群对象的CV属性中。\n 例如:population为一个种群对象,则调用call_aimFunc(population)即可完成目标函数值的计算。\n 之后可通过population.ObjV得到求得的目标函数值,population.CV得到违反约束程度矩阵。\n\n 输入参数:\n pop : class <Population> - 种群对象。\n\n 输出参数:\n 无输出参数。\n\n \"\"\"\n\n # print(pop.sizes)\n pop.Phen = pop.decoding() # 染色体解码\n if self.problem is None:\n raise RuntimeError('error: problem has not been initialized. (算法模板中的问题对象未被初始化。)')\n if self.useSurrogate:\n if len(pop) > 0:\n self.problem.aimFunc_no_train(pop) # 调用问题类的aimFunc()\n acc_pred = self.surrogate.predict_values(pop.Phen)\n pop.ObjV[:, 0] = acc_pred.reshape(-1)\n else:\n self.problem.aimFunc(pop) # 调用问题类的aimFunc()\n self.evalsNum = self.evalsNum + pop.sizes if self.evalsNum is not None else pop.sizes # 更新评价次数\n # 格式检查\n if not isinstance(pop.ObjV, np.ndarray) or pop.ObjV.ndim != 2 or pop.ObjV.shape[0] != pop.sizes or \\\n pop.ObjV.shape[1] != self.problem.M:\n raise RuntimeError('error: ObjV is illegal. (目标函数值矩阵ObjV的数据格式不合法,请检查目标函数的计算。)')\n if pop.CV is not None:\n if not isinstance(pop.CV, np.ndarray) or pop.CV.ndim != 2 or pop.CV.shape[0] != pop.sizes:\n raise RuntimeError('error: CV is illegal. (违反约束程度矩阵CV的数据格式不合法,请检查CV的计算。)')\n\n def reinsertion(self, population, offspring, NUM, globalNDSet):\n\n \"\"\"\n 描述:\n 重插入个体产生新一代种群(采用父子合并选择的策略)。\n NUM为所需要保留到下一代的个体数目,globalNDSet为全局非支配解存档。\n \"\"\"\n\n # 父子两代合并\n population = population + offspring\n globalNDSet = population + globalNDSet # 将population与全局存档合并\n # 非支配排序分层\n [levels, criLevel] = self.ndSort(globalNDSet.ObjV, None, None, globalNDSet.CV, self.problem.maxormins)\n # 更新全局存档\n globalNDSet = globalNDSet[np.where(levels == 1)[0]]\n if globalNDSet.CV is not None: # CV不为None说明有设置约束条件\n globalNDSet = globalNDSet[np.where(np.all(globalNDSet.CV <= 0, 1))[0]] # 排除非可行解\n if globalNDSet.sizes > self.MAXSIZE:\n dis = ea.crowdis(globalNDSet.ObjV, np.ones(globalNDSet.sizes)) # 计算拥挤距离\n globalNDSet = globalNDSet[np.argsort(-dis)[:self.MAXSIZE]] # 根据拥挤距离选择符合个数限制的解保留在存档中\n # 选择个体保留到下一代\n levels = levels[: population.sizes] # 得到与population个体对应的levels\n dis = ea.crowdis(population.ObjV, levels) # 计算拥挤距离\n population.FitnV[:, 0] = np.argsort(np.lexsort(np.array([dis, -levels])), kind='mergesort') # 计算适应度\n chooseFlag = ea.selecting('dup', population.FitnV, NUM) # 调用低级选择算子dup进行基于适应度排序的选择,保留NUM个个体\n return population[chooseFlag], globalNDSet\n\n def reinsertion_update(self, population, archive_acc, NUM, globalNDSet):\n\n \"\"\"\n 描述:\n 重插入个体产生新一代种群(采用父子合并选择的策略)。\n NUM为所需要保留到下一代的个体数目,globalNDSet为全局非支配解存档。\n \"\"\"\n\n # 父子两代合并\n population = archive_acc\n globalNDSet = archive_acc # 将population与全局存档合并\n # 非支配排序分层\n [levels, criLevel] = self.ndSort(globalNDSet.ObjV, None, None, globalNDSet.CV, self.problem.maxormins)\n # 更新全局存档\n globalNDSet = globalNDSet[np.where(levels == 1)[0]]\n if globalNDSet.CV is not None: # CV不为None说明有设置约束条件\n globalNDSet = globalNDSet[np.where(np.all(globalNDSet.CV <= 0, 1))[0]] # 排除非可行解\n if globalNDSet.sizes > self.MAXSIZE or population.sizes > self.NP:\n if globalNDSet.sizes > self.MAXSIZE:\n dis = ea.crowdis(globalNDSet.ObjV, np.ones(globalNDSet.sizes)) # 计算拥挤距离\n globalNDSet = globalNDSet[np.argsort(-dis)[:self.MAXSIZE]] # 根据拥挤距离选择符合个数限制的解保留在存档中\n if population.sizes > NUM:\n dis = ea.crowdis(population.ObjV, np.ones(population.sizes)) # 计算拥挤距离\n population.FitnV = np.argsort(np.lexsort(np.array([dis, -levels])), kind='mergesort').reshape(-1,\n 1) # 计算适应度\n chooseFlag = ea.selecting('dup', population.FitnV, NUM) # 调用低级选择算子dup进行基于适应度排序的选择,保留NUM个个体\n population = population[chooseFlag]\n return population, globalNDSet\n\n def reinsertion_surrogate(self, population):\n\n \"\"\"\n 描述:\n surrogate 训练存档更新\n 重插入个体产生新一代种群(采用父子合并选择的策略)。\n NUM为所需要保留到下一代的个体数目。\n \"\"\"\n\n # 父子两代合并\n population, ind = remove_duplication(population)\n if population.sizes > self.MAXSIZE_surrogate:\n # 非支配排序分层\n [levels, criLevel] = self.ndSort(population.ObjV, None, None, population.CV, self.problem.maxormins)\n # 选择个体保留到下一代\n dis = ea.crowdis(population.ObjV, levels) # 计算拥挤距离\n population.FitnV[:, 0] = np.argsort(np.lexsort(np.array([dis, -levels])), kind='mergesort') # 计算适应度\n chooseFlag = ea.selecting('dup', population.FitnV,\n self.MAXSIZE_surrogate) # 调用低级选择算子dup进行基于适应度排序的选择,保留NUM个个体\n population = population[chooseFlag]\n return population\n\n def reinsertion_acc(self, pop_acc):\n\n \"\"\"\n 描述:\n acc 训练存档更新\n 重插入个体产生新一代种群(采用父子合并选择的策略)。\n NUM为所需要保留到下一代的个体数目。\n \"\"\"\n\n # 父子两代合并\n globalACCSet = pop_acc\n if globalACCSet.sizes > self.MAXSIZE_acc:\n globalACCSet.FitnV = globalACCSet.ObjV[:, 0].reshape(-1, 1)\n chooseFlag = ea.selecting('dup', globalACCSet.FitnV, self.MAXSIZE_acc)\n globalACCSet = globalACCSet[chooseFlag]\n return globalACCSet\n\n def run(self, prophetPop=None): # prophetPop为先知种群(即包含先验知识的种群)\n # ==========================初始化配置===========================\n population = self.population\n NIND = population.sizes\n self.initialization() # 初始化算法模板的一些动态参数\n # ===========================准备进化============================\n population.initChrom() # 初始化种群染色体矩阵\n self.call_aimFunc(population) # 计算种群的目标函数值\n # 插入先验知识(注意:这里不会对先知种群prophetPop的合法性进行检查,故应确保prophetPop是一个种群类且拥有合法的Chrom、ObjV、Phen等属性)\n if prophetPop is not None:\n population = (prophetPop + population)[:NIND] # 插入先知种群\n [levels, criLevel] = self.ndSort(population.ObjV, NIND, None, population.CV,\n self.problem.maxormins) # 对NIND个个体进行非支配分层\n dis = ea.crowdis(population.ObjV, levels) # 计算拥挤距离\n population.FitnV = np.argsort(np.lexsort(np.array([dis, -levels])), kind='mergesort').reshape(-1, 1) # 计算适应度\n globalNDSet = population[np.where(levels == 1)[0]] # 创建全局存档,该全局存档贯穿进化始终,随着进化不断更新\n if globalNDSet.CV is not None: # CV不为None说明有设置约束条件\n globalNDSet = globalNDSet[np.where(np.all(globalNDSet.CV <= 0, 1))[0]] # 排除非可行解\n population.save(self.folder + '/pop' + '%04d' % self.currentGen)\n globalNDSet.save(self.folder + '/nds' + '%04d' % self.currentGen)\n train_arc = population\n train_arc = self.reinsertion_surrogate(train_arc)\n train_arc.save(self.folder + '/train_arc' + '%04d' % self.currentGen)\n pop_acc, ind = remove_duplication(population)\n globalACCSet = self.reinsertion_acc(pop_acc)\n pop_acc.save(self.folder + '/pop_acc' + '%04d' % self.currentGen)\n globalACCSet.save(self.folder + '/glb_acc' + '%04d' % self.currentGen)\n # ===========================开始进化============================\n if self.surrog_flag:\n population, globalNDSet = self.evo_with_surrogate(population, NIND, globalNDSet,\n globalACCSet, train_arc, pop_acc)\n else:\n population, globalNDSet = self.evo_without_surrogate(population, NIND, globalNDSet,\n globalACCSet, train_arc, pop_acc)\n return self.finishing(population, globalNDSet) # 调用finishing完成后续工作并返回结果\n\n def evo_without_surrogate(self, population, NIND, globalNDSet, globalACCSet, train_arc, pop_acc):\n while self.terminated(population) == False:\n rate_gen = self.currentGen / self.MAXGEN\n XOVR = self.XOVR_max # - self.XOVR_rng * rate_gen\n Pm = self.Pm_min # + self.Pm_rng * (1 - rate_gen) * (1 - rate_gen)\n if self.Encoding == 'BG':\n self.recOper.XOVR = XOVR\n self.mutOper.Pm_min = Pm\n self.gmOper.Pm_min = Pm\n else:\n self.pmOper.Pm_min = Pm\n self.gmOper.Pm_min = Pm\n if True: # self.currentGen % self.gap_surrogate:\n if True: # self.currentGen % 2:\n population.FitnV = population.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n globalACCSet.FitnV = globalACCSet.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n if self.CC_flag:\n for _ in range(len(self.grps)):\n population.FitnV = population.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n globalACCSet.FitnV = globalACCSet.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n offspring = self.evo_one_grp(population, NIND, _, pop_acc, globalACCSet, False)\n all_offspring = offspring if _ == 0 else all_offspring + offspring\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n offspring, ind = remove_duplication(all_offspring)\n else:\n offspring = self.evo_all_var_no_mut0(population, NIND, pop_acc, globalACCSet, False)\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n pop_acc = pop_acc + offspring\n globalACCSet = self.reinsertion_acc(pop_acc)\n else:\n self.get_surrogate(train_arc)\n population_bkp, globalNDSet_bkp = population, globalNDSet\n for ig in range(self.ngn_surrogate):\n if ig % 2:\n population.FitnV = population.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n globalACCSet.FitnV = globalACCSet.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n if self.CC_flag:\n for _ in range(len(self.grps)):\n offspring = self.evo_one_grp(population, NIND, _, pop_acc, globalACCSet, True)\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n else:\n offspring = self.evo_all_var_no_mut0(population, NIND, pop_acc, globalACCSet, True)\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n not_duplicate = np.logical_not([any(all(x == a) for a in pop_acc.Phen) for x in population.Phen])\n offspring = population[not_duplicate]\n offspring, ind = remove_duplication(offspring)\n self.useSurrogate = False\n self.call_aimFunc(offspring)\n pop_acc = pop_acc + offspring\n globalACCSet = self.reinsertion_acc(pop_acc)\n population, globalNDSet = population_bkp, globalNDSet_bkp\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n population.save(self.folder + '/pop' + '%04d' % self.currentGen)\n globalNDSet.save(self.folder + '/nds' + '%04d' % self.currentGen)\n train_arc = self.reinsertion_surrogate(train_arc + offspring)\n train_arc.save(self.folder + '/train_arc' + '%04d' % self.currentGen)\n pop_acc.save(self.folder + '/pop_acc' + '%04d' % self.currentGen)\n globalACCSet.save(self.folder + '/glb_acc' + '%04d' % self.currentGen)\n return population, globalNDSet\n\n def evo_with_surrogate(self, population, NIND, globalNDSet, globalACCSet, train_arc, pop_acc):\n while self.terminated(population) == False:\n rate_gen = self.currentGen / self.MAXGEN\n XOVR = self.XOVR_max # - self.XOVR_rng * rate_gen\n Pm = self.Pm_min # + self.Pm_rng * (1 - rate_gen) * (1 - rate_gen)\n if self.Encoding == 'BG':\n self.recOper.XOVR = XOVR\n self.mutOper.Pm_min = Pm\n self.gmOper.Pm_min = Pm\n else:\n self.pmOper.Pm_min = Pm\n self.gmOper.Pm_min = Pm\n if self.currentGen % self.gap_surrogate:\n population.FitnV = population.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n globalACCSet.FitnV = globalACCSet.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n if self.CC_flag:\n self.get_surrogate(train_arc)\n for _ in range(len(self.grps)):\n tmp_offspring = self.evo_one_grp(population, NIND, _, pop_acc, globalACCSet, True)\n offspring = tmp_offspring if _ == 0 else offspring + tmp_offspring\n offspring, ind = remove_duplication(offspring)\n offspring.FitnV = offspring.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n chooseFlag = ea.selecting('dup', offspring.FitnV, (NIND + 1) // 2)\n rem_ind = [_ for _ in [i for i in range(offspring.sizes)] if _ not in chooseFlag]\n pop_rem = offspring[rem_ind]\n if len(pop_acc) > 0:\n vrt_pred = self.surrogate.predict_variances(pop_rem.Phen)\n pop_rem.FitnV = vrt_pred.reshape(-1, 1) # 计算适应度\n chooseFlag2 = ea.selecting('dup', pop_rem.FitnV, NIND // 2)\n chooseFlag = np.concatenate((chooseFlag, np.array(rem_ind)[chooseFlag2]))\n chooseFlag = np.unique(chooseFlag)\n offspring = offspring[chooseFlag]\n self.useSurrogate = False\n self.call_aimFunc(offspring) # 求进化后个体的目标函数值\n else:\n offspring = self.evo_all_var_no_mut0(population, NIND, pop_acc, globalACCSet, False)\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n pop_acc = pop_acc + offspring\n globalACCSet = self.reinsertion_acc(pop_acc)\n else:\n self.get_surrogate(train_arc)\n population_bkp, globalNDSet_bkp = population, globalNDSet\n for ig in range(self.ngn_surrogate):\n if self.CC_flag:\n for _ in range(len(self.grps)):\n # if ig % 2:\n population.FitnV = population.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n globalACCSet.FitnV = globalACCSet.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n offspring = self.evo_one_grp(population, NIND, _, pop_acc, globalACCSet, True)\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n else:\n # if ig % 2:\n population.FitnV = population.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n globalACCSet.FitnV = globalACCSet.ObjV[:, 0].reshape(-1, 1) # 计算适应度\n offspring = self.evo_all_var_no_mut0(population, NIND, pop_acc, globalACCSet, True)\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n not_duplicate = np.logical_not([any(all(x == a) for a in pop_acc.Phen) for x in population.Phen])\n offspring = population[not_duplicate]\n offspring, ind = remove_duplication(offspring)\n self.useSurrogate = False\n self.call_aimFunc(offspring)\n pop_acc = pop_acc + offspring\n globalACCSet = self.reinsertion_acc(pop_acc)\n population, globalNDSet = population_bkp, globalNDSet_bkp\n population, globalNDSet = self.reinsertion(population, offspring, NIND, globalNDSet)\n population.save(self.folder + '/pop' + '%04d' % self.currentGen)\n globalNDSet.save(self.folder + '/nds' + '%04d' % self.currentGen)\n train_arc = self.reinsertion_surrogate(train_arc + offspring)\n train_arc.save(self.folder + '/train_arc' + '%04d' % self.currentGen)\n pop_acc.save(self.folder + '/pop_acc' + '%04d' % self.currentGen)\n globalACCSet.save(self.folder + '/glb_acc' + '%04d' % self.currentGen)\n return population, globalNDSet\n\n def evo_one_grp(self, population, NIND, i_grp, pop_acc, globalACCSet, useSurrogate):\n _ = i_grp\n # 选择个体参与进化\n if self.Encoding == 'BG':\n offspring = population[ea.selecting('tour', population.FitnV, NIND)]\n offspring.Chrom = self.recOper.do(offspring.Chrom) # 重组\n else:\n r0 = ea.selecting('ecs', population.FitnV, NIND) # 得到基向量索引\n offspring = population.copy() # 存储子代种群\n offspring.Chrom = self.mutOper.do(offspring.Encoding, offspring.Chrom, offspring.Field, [r0]) # 变异\n if self.CC_type == 'CC_copy':\n offspring.Chrom[:, self.rem_grps[_]] = population.Chrom[:, self.rem_grps[_]]\n elif self.CC_type == 'CC_cross':\n tmp_pop01 = population[ea.selecting('tour', population.FitnV, NIND)]\n tmp_pop02 = population[ea.selecting('tour', population.FitnV, NIND)]\n tmp_Chrom = self.crsOper2.do(np.vstack([tmp_pop01.Chrom, tmp_pop02.Chrom])) # 重组\n tmp_Chrom = self.crsOper1.do(np.vstack([population.Chrom, tmp_Chrom])) # 重组\n offspring.Chrom[:, self.rem_grps[_]] = tmp_Chrom[:, self.rem_grps[_]]\n elif self.CC_type == 'CC_toBest':\n ind_candid = np.argsort(population.ObjV[:, 0])[:2].tolist()\n tmp_Chrom0 = population[random.choices(ind_candid, k=NIND)].Chrom\n offspring.Chrom[:, self.rem_grps[_]] = tmp_Chrom0[:, self.rem_grps[_]]\n if self.Encoding == 'BG':\n offspring.Chrom = self.mutOper.do(offspring.Encoding, offspring.Chrom, offspring.Field) # 变异\n else:\n offspring.Chrom = self.pmOper.do(offspring.Encoding, offspring.Chrom, offspring.Field) # 变异\n offspring.Phen = offspring.decoding()\n not_duplicate = np.logical_not([any(all(x == a) for a in pop_acc.Phen) for x in offspring.Phen])\n offspring = offspring[not_duplicate]\n offspring, ind = remove_duplication(offspring)\n self.useSurrogate = useSurrogate\n self.call_aimFunc(offspring) # 求进化后个体的目标函数值\n return offspring\n\n def evo_one_grp_no_mut(self, population, NIND, i_grp, pop_acc, globalACCSet, useSurrogate):\n _ = i_grp\n # 选择个体参与进化\n r0 = ea.selecting('ecs', population.FitnV, NIND) # 得到基向量索引\n offspring = population.copy() # 存储子代种群\n if self.CC_type == 'CC_copy':\n ind_candid = np.argsort(population.ObjV[:, 0])[:2].tolist()\n tmp_Chrom0 = population[random.choices(ind_candid, k=NIND)].Chrom\n offspring.Chrom[:, self.rem_grps[_]] = tmp_Chrom0[:, self.rem_grps[_]]\n elif self.CC_type == 'CC_cross':\n tmp_pop01 = population[ea.selecting('tour', population.FitnV, NIND)]\n tmp_pop02 = population[ea.selecting('tour', population.FitnV, NIND)]\n tmp_Chrom = self.crsOper2.do(np.vstack([tmp_pop01.Chrom, tmp_pop02.Chrom])) # 重组\n tmp_Chrom = self.crsOper1.do(np.vstack([population.Chrom, tmp_Chrom])) # 重组\n offspring.Chrom[:, self.rem_grps[_]] = tmp_Chrom[:, self.rem_grps[_]]\n elif self.CC_type == 'CC_toBest':\n tmp_pop = population[ea.selecting('ecs', population.FitnV, NIND)]\n offspring.Chrom[:, self.rem_grps[_]] = tmp_pop.Chrom[:, self.rem_grps[_]]\n offspring.Chrom = self.pmOper.do(offspring.Encoding, offspring.Chrom, offspring.Field) # 变异\n offspring.Phen = offspring.decoding()\n not_duplicate = np.logical_not([any(all(x == a) for a in pop_acc.Phen) for x in offspring.Phen])\n offspring = offspring[not_duplicate]\n offspring, ind = remove_duplication(offspring)\n self.useSurrogate = useSurrogate\n self.call_aimFunc(offspring) # 求进化后个体的目标函数值\n return offspring\n\n def evo_all_var_no_mut(self, population, NIND, pop_acc, globalACCSet, useSurrogate):\n if self.Encoding == 'BG':\n offspring = population[ea.selecting('tour', population.FitnV, NIND)]\n offspring.Chrom = self.recOper.do(offspring.Chrom) # 重组\n else:\n offspring = globalACCSet # [ea.selecting('ecs', globalACCSet.FitnV, NIND)]\n tmp_pop01 = population[ea.selecting('tour', population.FitnV, NIND)]\n tmp_pop02 = population[ea.selecting('tour', population.FitnV, NIND)]\n tmp_Chrom = self.crsOper2.do(np.vstack([tmp_pop01.Chrom, tmp_pop02.Chrom])) # 重组\n tmp_Chrom = self.crsOper1.do(np.vstack([offspring.Chrom, tmp_Chrom])) # 重组\n offspring.Chrom = self.pmOper.do(offspring.Encoding, tmp_Chrom, offspring.Field) # 变异\n if self.Encoding == 'BG':\n offspring.Chrom = self.mutOper.do(offspring.Encoding, offspring.Chrom, offspring.Field) # 变异\n else:\n offspring.Chrom = self.pmOper.do(offspring.Encoding, offspring.Chrom, offspring.Field) # 变异\n offspring.Phen = offspring.decoding()\n not_duplicate = np.logical_not([any(all(x == a) for a in pop_acc.Phen) for x in offspring.Phen])\n offspring = offspring[not_duplicate]\n offspring, ind = remove_duplication(offspring)\n self.useSurrogate = useSurrogate\n self.call_aimFunc(offspring) # 求进化后个体的目标函数值\n return offspring\n\n def evo_all_var_no_mut0(self, population, NIND, pop_acc, globalACCSet, useSurrogate):\n if self.Encoding == 'BG':\n offspring = population[ea.selecting('tour', population.FitnV, NIND)]\n offspring.Chrom = self.recOper.do(offspring.Chrom) # 重组\n else:\n offspring = population[ea.selecting('ecs', population.FitnV, NIND)]\n tmp_pop01 = population[ea.selecting('tour', population.FitnV, NIND)]\n tmp_pop02 = population[ea.selecting('tour', population.FitnV, NIND)]\n tmp_Chrom = self.crsOper2.do(np.vstack([tmp_pop01.Chrom, tmp_pop02.Chrom])) # 重组\n tmp_Chrom = self.crsOper1.do(np.vstack([offspring.Chrom, tmp_Chrom])) # 重组\n offspring.Chrom = self.pmOper.do(offspring.Encoding, tmp_Chrom, offspring.Field) # 变异\n if self.Encoding == 'BG':\n offspring.Chrom = self.mutOper.do(offspring.Encoding, offspring.Chrom, offspring.Field) # 变异\n else:\n offspring.Chrom = self.pmOper.do(offspring.Encoding, offspring.Chrom, offspring.Field) # 变异\n offspring.Phen = offspring.decoding()\n not_duplicate = np.logical_not([any(all(x == a) for a in pop_acc.Phen) for x in offspring.Phen])\n offspring = offspring[not_duplicate]\n offspring, ind = remove_duplication(offspring)\n self.useSurrogate = useSurrogate\n self.call_aimFunc(offspring) # 求进化后个体的目标函数值\n return offspring\n\n def evo_all_var_dp(self, population, NIND, pop_acc, globalACCSet, useSurrogate):\n offspring = globalACCSet.copy() # 存储子代种群\n tmp_Chrom = self.xodp.do(offspring.Chrom) # 重组\n tmp_Chrom = self.pmOper.do(offspring.Encoding, tmp_Chrom, offspring.Field) # 变异\n offspring.Chrom = self.gmOper.do(offspring.Encoding, tmp_Chrom, offspring.Field) # 变异\n offspring.Phen = offspring.decoding()\n not_duplicate = np.logical_not([any(all(x == a) for a in pop_acc.Phen) for x in offspring.Phen])\n offspring = offspring[not_duplicate]\n offspring, ind = remove_duplication(offspring)\n self.useSurrogate = useSurrogate\n self.call_aimFunc(offspring) # 求进化后个体的目标函数值\n return offspring\n\n def evo_all_var_sp(self, population, NIND, pop_acc, globalACCSet, useSurrogate):\n offspring = globalACCSet.copy() # 存储子代种群\n tmp_Chrom = self.xosp.do(offspring.Chrom) # 重组\n tmp_Chrom = self.pmOper.do(offspring.Encoding, tmp_Chrom, offspring.Field) # 变异\n offspring.Chrom = self.gmOper.do(offspring.Encoding, tmp_Chrom, offspring.Field) # 变异\n offspring.Phen = offspring.decoding()\n not_duplicate = np.logical_not([any(all(x == a) for a in pop_acc.Phen) for x in offspring.Phen])\n offspring = offspring[not_duplicate]\n offspring, ind = remove_duplication(offspring)\n self.useSurrogate = useSurrogate\n self.call_aimFunc(offspring) # 求进化后个体的目标函数值\n return offspring\n\n def get_surrogate(self, train_arc):\n if self.name_surrog in ['KRG_MIXINT']:\n xt = np.concatenate([self.problem.search_space.generate_surrogate_actions_4_solution(_)\n for _ in train_arc.Phen])\n self.surrogate = get_surrogate_model(self.name_surrog, xt, train_arc.ObjV[:, 0],\n self.problem.maxN_layers,\n self.problem.Dim, self.problem.xlimits)\n else:\n self.surrogate = get_surrogate_model(self.name_surrog, train_arc.Phen, train_arc.ObjV[:, 0],\n self.problem.maxN_layers,\n self.problem.Dim, self.problem.xlimits)\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.where", "numpy.all", "numpy.sort", "numpy.unique", "numpy.cumsum", "numpy.argsort", "numpy.vstack" ] ]
NimrodCarmon/isofit
[ "e9a04d328bd4171131f3ce8e8b4eadf9a6901747" ]
[ "isofit/utils/apply_oe.py" ]
[ "#! /usr/bin/env python3\n#\n# Authors: David R Thompson and Philip G. Brodrick\n#\n\nimport argparse\nimport os\nfrom os.path import join, exists, split, abspath\nfrom shutil import copyfile\nfrom datetime import datetime\nfrom spectral.io import envi\nimport logging\nimport json\nimport gdal\nimport numpy as np\nfrom sklearn import mixture\nimport subprocess\nfrom sys import platform\nfrom typing import List\n\nfrom isofit.utils import segment, extractions, empirical_line\nfrom isofit.core import isofit, common\n\nEPS = 1e-6\nCHUNKSIZE = 256\nSEGMENTATION_SIZE = 400\n\nUNCORRELATED_RADIOMETRIC_UNCERTAINTY = 0.02\n\nINVERSION_WINDOWS = [[400.0, 1300.0], [1450, 1780.0], [2050.0, 2450.0]]\n\n\ndef main():\n \"\"\" This is a helper script to apply OE over a flightline using the MODTRAN radiative transfer engine.\n\n The goal is to run isofit in a fairly 'standard' way, accounting for the types of variation that might be\n considered typical. For instance, we use the observation (obs) and location (loc) files to determine appropriate\n MODTRAN view-angle geometry look up tables, and provide a heuristic means of determing atmospheric water ranges.\n\n This code also proivdes the capicity for speedup through the empirical line solution.\n\n Args:\n input_radiance (str): radiance data cube [expected ENVI format]\n input_loc (str): location data cube, (Lon, Lat, Elevation) [expected ENVI format]\n input_obs (str): observation data cube, (path length, to-sensor azimuth, to-sensor zenith, to-sun azimuth,\n to-sun zenith, phase, slope, aspect, cosine i, UTC time) [expected ENVI format]\n working_directory (str): directory to stage multiple outputs, will contain subdirectories\n sensor (str): the sensor used for acquisition, will be used to set noise and datetime settings. choices are:\n [ang, avcl, neon, prism]\n copy_input_files (Optional, int): flag to choose to copy input_radiance, input_loc, and input_obs locally into\n the working_directory. 0 for no, 1 for yes. Default 0\n modtran_path (Optional, str): Location of MODTRAN utility, alternately set with MODTRAN_DIR environment variable\n wavelength_path (Optional, str): Location to get wavelength information from, if not specified the radiance\n header will be used\n surface_category (Optional, str): The type of isofit surface priors to use. Default is multicomponent_surface\n aerosol_climatology_path (Optional, str): Specific aerosol climatology information to use in MODTRAN,\n default None\n rdn_factors_path (Optional, str): Specify a radiometric correction factor, if desired. default None\n surface_path (Optional, str): Path to surface model - required if surface is multicomponent_surface (default\n above). Alternately set with ISOFIT_SURFACE_MODEL environment variable. default None\n channelized_uncertainty_path (Optional, str): path to a channelized uncertainty file. default None\n lut_config_file (Optional, str): Path to a look up table configuration file, which will override defaults\n chocies. default None\n logging_level (Optional, str): Logging level with which to run isofit. Default INFO\n log_file (Optional, str): File path to write isofit logs to. Default None\n n_cores (Optional, int): Number of cores to run isofit with. Substantial parallelism is available, and full\n runs will be very slow in serial. Suggested to max this out on the available system. Default 1\n presolve (Optional, int): Flag to use a presolve mode to estimate the available atmospheric water range. Runs\n a preliminary inversion over the image with a 1-D LUT of water vapor, and uses the resulting range (slightly\n expanded) to bound determine the full LUT. Advisable to only use with small cubes or in concert with the\n empirical_line setting, or a significant speed penalty will be incurred. Choices - 0 off, 1 on. Default 0\n empirical_line (Optional, int): Use an empirical line interpolation to run full inversions over only a subset\n of pixels, determined using a SLIC superpixel segmentation, and use a KDTREE Of local solutions to\n interpolate radiance->reflectance. Generally a good option if not trying to analyze the atmospheric state\n at fine scale resolution. Choices - 0 off, 1 on. Default 0\n ray_temp_dir (Optional, str): Location of temporary directory for ray parallelization engine. Default is\n '/tmp/ray'\n\n Reference:\n D.R. Thompson, A. Braverman,P.G. Brodrick, A. Candela, N. Carbon, R.N. Clark,D. Connelly, R.O. Green, R.F.\n Kokaly, L. Li, N. Mahowald, R.L. Miller, G.S. Okin, T.H.Painter, G.A. Swayze, M. Turmon, J. Susilouto, and\n D.S. Wettergreen. Quantifying Uncertainty for Remote Spectroscopy of Surface Composition. Remote Sensing of\n Environment, 2020. doi: https://doi.org/10.1016/j.rse.2020.111898.\n\n\n Returns:\n np.array\n\n \"\"\"\n # Parse arguments\n parser = argparse.ArgumentParser(description=\"Apply OE to a block of data.\")\n parser.add_argument('input_radiance', type=str)\n parser.add_argument('input_loc', type=str)\n parser.add_argument('input_obs', type=str)\n parser.add_argument('working_directory', type=str)\n parser.add_argument('sensor', type=str, choices=['ang', 'avcl', 'neon', 'prism'])\n parser.add_argument('--copy_input_files', type=int, choices=[0,1], default=0)\n parser.add_argument('--modtran_path', type=str)\n parser.add_argument('--wavelength_path', type=str)\n parser.add_argument('--surface_category', type=str, default=\"multicomponent_surface\")\n parser.add_argument('--aerosol_climatology_path', type=str, default=None)\n parser.add_argument('--rdn_factors_path', type=str)\n parser.add_argument('--surface_path', type=str)\n parser.add_argument('--channelized_uncertainty_path', type=str)\n parser.add_argument('--lut_config_file', type=str)\n parser.add_argument('--logging_level', type=str, default=\"INFO\")\n parser.add_argument('--log_file', type=str, default=None)\n parser.add_argument('--n_cores', type=int, default=1)\n parser.add_argument('--presolve', choices=[0,1], type=int, default=0)\n parser.add_argument('--empirical_line', choices=[0,1], type=int, default=0)\n parser.add_argument('--ray_temp_dir', type=str, default='/tmp/ray')\n\n args = parser.parse_args()\n\n if args.copy_input_files == 1:\n args.copy_input_files = True\n else:\n args.copy_input_files = False\n\n if args.log_file is None:\n logging.basicConfig(format='%(message)s', level=args.logging_level)\n else:\n logging.basicConfig(format='%(message)s', level=args.logging_level, filename=args.log_file)\n\n lut_params = LUTConfig(args.lut_config_file)\n\n paths = Pathnames(args)\n paths.make_directories()\n paths.stage_files()\n\n # Based on the sensor type, get appropriate year/month/day info fro intial condition.\n # We'll adjust for line length and UTC day overrun later\n if args.sensor == 'ang':\n # parse flightline ID (AVIRIS-NG assumptions)\n dt = datetime.strptime(paths.fid[3:], '%Y%m%dt%H%M%S')\n dayofyear = dt.timetuple().tm_yday\n elif args.sensor == 'avcl':\n # parse flightline ID (AVIRIS-CL assumptions)\n dt = datetime.strptime('20{}t000000'.format(paths.fid[1:7]), '%Y%m%dt%H%M%S')\n dayofyear = dt.timetuple().tm_yday\n elif args.sensor == 'neon':\n dt = datetime.strptime(paths.fid, 'NIS01_%Y%m%d_%H%M%S')\n dayofyear = dt.timetuple().tm_yday\n elif args.sensor == 'prism':\n dt = datetime.strptime(paths.fid[3:], '%Y%m%dt%H%M%S')\n dayofyear = dt.timetuple().tm_yday\n\n h_m_s, day_increment, mean_path_km, mean_to_sensor_azimuth, mean_to_sensor_zenith, valid, \\\n to_sensor_azimuth_lut_grid, to_sensor_zenith_lut_grid = get_metadata_from_obs(paths.obs_working_path, lut_params)\n\n if day_increment:\n dayofyear += 1\n\n gmtime = float(h_m_s[0] + h_m_s[1] / 60.)\n\n # get radiance file, wavelengths\n if args.wavelength_path:\n chn, wl, fwhm = np.loadtxt(args.wavelength_path).T\n else:\n radiance_dataset = envi.open(paths.radiance_working_path + '.hdr')\n wl = np.array([float(w) for w in radiance_dataset.metadata['wavelength']])\n if 'fwhm' in radiance_dataset.metadata:\n fwhm = np.array([float(f) for f in radiance_dataset.metadata['fwhm']])\n elif 'FWHM' in radiance_dataset.metadata:\n fwhm = np.array([float(f) for f in radiance_dataset.metadata['FWHM']])\n else:\n fwhm = np.ones(wl.shape) * (wl[1] - wl[0])\n\n # Close out radiance dataset to avoid potential confusion\n del radiance_dataset\n\n # Convert to microns if needed\n if wl[0] > 100:\n wl = wl / 1000.0\n fwhm = fwhm / 1000.0\n\n # write wavelength file\n wl_data = np.concatenate([np.arange(len(wl))[:, np.newaxis], wl[:, np.newaxis],\n fwhm[:, np.newaxis]], axis=1)\n np.savetxt(paths.wavelength_path, wl_data, delimiter=' ')\n\n mean_latitude, mean_longitude, mean_elevation_km, elevation_lut_grid = get_metadata_from_loc(paths.loc_working_path, lut_params)\n\n # Need a 180 - here, as this is already in MODTRAN convention\n mean_altitude_km = mean_elevation_km + np.cos(np.deg2rad(180 - mean_to_sensor_zenith)) * mean_path_km\n\n logging.info('Path (km): %f, 180 - To-sensor Zenith (deg): %f, To-sensor Azimuth (deg) : %f, Altitude: %6.2f km' %\n (mean_path_km, mean_to_sensor_zenith, mean_to_sensor_azimuth, mean_altitude_km))\n\n\n\n # Superpixel segmentation\n if args.empirical_line == 1:\n if not exists(paths.lbl_working_path) or not exists(paths.radiance_working_path):\n logging.info('Segmenting...')\n segment(spectra=(paths.radiance_working_path, paths.lbl_working_path),\n flag=-9999, npca=5, segsize=SEGMENTATION_SIZE, nchunk=CHUNKSIZE)\n\n # Extract input data per segment\n for inp, outp in [(paths.radiance_working_path, paths.rdn_subs_path),\n (paths.obs_working_path, paths.obs_subs_path),\n (paths.loc_working_path, paths.loc_subs_path)]:\n if not exists(outp):\n logging.info('Extracting ' + outp)\n extractions(inputfile=inp, labels=paths.lbl_working_path,\n output=outp, chunksize=CHUNKSIZE, flag=-9999)\n\n\n if args.presolve == 1:\n\n # write modtran presolve template\n write_modtran_template(atmosphere_type='ATM_MIDLAT_SUMMER', fid=paths.fid, altitude_km=mean_altitude_km,\n dayofyear=dayofyear, latitude=mean_latitude, longitude=mean_longitude,\n to_sensor_azimuth=mean_to_sensor_azimuth, to_sensor_zenith=mean_to_sensor_zenith,\n gmtime=gmtime, elevation_km=mean_elevation_km,\n output_file=paths.h2o_template_path, ihaze_type='AER_NONE')\n\n max_water = calc_modtran_max_water(paths)\n\n # run H2O grid as necessary\n if not exists(paths.h2o_subs_path + '.hdr') or not exists(paths.h2o_subs_path):\n # Write the presolve connfiguration file\n h2o_grid = np.linspace(0.01, max_water - 0.01, 10).round(2)\n logging.info('Pre-solve H2O grid: {}'.format(h2o_grid))\n logging.info('Writing H2O pre-solve configuration file.')\n build_presolve_config(paths, h2o_grid, args.n_cores, args.empirical_line == 1, args.surface_category)\n\n # Run modtran retrieval\n logging.info('Run ISOFIT initial guess')\n retrieval_h2o = isofit.Isofit(paths.h2o_config_path, level='INFO', logfile=args.log_file)\n retrieval_h2o.run()\n\n # clean up unneeded storage\n for to_rm in ['*r_k', '*t_k', '*tp7', '*wrn', '*psc', '*plt', '*7sc', '*acd']:\n cmd = 'rm ' + join(paths.lut_h2o_directory, to_rm)\n logging.info(cmd)\n os.system(cmd)\n else:\n logging.info('Existing h2o-presolve solutions found, using those.')\n\n h2o = envi.open(paths.h2o_subs_path + '.hdr')\n h2o_est = h2o.read_band(-1)[:].flatten()\n\n p05 = np.percentile(h2o_est[h2o_est > lut_params.h2o_min], 5)\n p95 = np.percentile(h2o_est[h2o_est > lut_params.h2o_min], 95)\n margin = (p95-p05) * 0.25\n h2o_lo = max(lut_params.h2o_min, p05 - margin)\n h2o_hi = min(max_water, max(lut_params.h2o_min, p95 + margin))\n h2o_lut_grid = np.linspace(h2o_lo, h2o_hi,\n lut_params.num_h2o_lut_elements)\n\n if (np.abs(h2o_lut_grid[-1] - h2o_lut_grid[0]) < 0.03):\n new_h2o_lut_grid = np.linspace(h2o_lut_grid[0] - 0.1* np.ceil(lut_params.num_h2o_lut_elements/2.), \n h2o_lut_grid[0] + 0.1*np.ceil(lut_params.num_h2o_lut_elements/2.), \n lut_params.num_h2o_lut_elements)\n logging.warning('Warning: h2o lut grid from presolve detected as {}-{}, which is very narrow. Expanding to {}-{}. Advised to check presolve solutions thoroughly.'.format(h2o_lut_grid[0],h2o_lut_grid[-1], new_h2o_lut_grid[0], new_h2o_lut_grid[-1]))\n h2o_lut_grid = new_h2o_lut_grid\n else:\n h2o_lut_grid = np.linspace(lut_params.default_h2o_lut_range[0], lut_params.default_h2o_lut_range[1],\n lut_params.num_h2o_lut_elements)\n\n\n logging.info('Full (non-aerosol) LUTs:\\nElevation: {}\\nTo-sensor azimuth: {}\\nTo-sensor zenith: {}\\nh2o-vis: {}:'.format(elevation_lut_grid, to_sensor_azimuth_lut_grid, to_sensor_zenith_lut_grid, h2o_lut_grid))\n\n logging.info(paths.state_subs_path)\n if not exists(paths.state_subs_path) or \\\n not exists(paths.uncert_subs_path) or \\\n not exists(paths.rfl_subs_path):\n\n write_modtran_template(atmosphere_type='ATM_MIDLAT_SUMMER', fid=paths.fid, altitude_km=mean_altitude_km,\n dayofyear=dayofyear, latitude=mean_latitude, longitude=mean_longitude,\n to_sensor_azimuth=mean_to_sensor_azimuth, to_sensor_zenith=mean_to_sensor_zenith,\n gmtime=gmtime, elevation_km=mean_elevation_km, output_file=paths.modtran_template_path)\n\n logging.info('Writing main configuration file.')\n build_main_config(paths, lut_params, h2o_lut_grid, elevation_lut_grid, to_sensor_azimuth_lut_grid,\n to_sensor_zenith_lut_grid, mean_latitude, mean_longitude, dt, \n args.empirical_line == 1, args.n_cores, args.surface_category)\n\n # Run modtran retrieval\n logging.info('Running ISOFIT with full LUT')\n retrieval_full = isofit.Isofit(paths.modtran_config_path, level='INFO', logfile=args.log_file)\n retrieval_full.run()\n\n # clean up unneeded storage\n for to_rm in ['*r_k', '*t_k', '*tp7', '*wrn', '*psc', '*plt', '*7sc', '*acd']:\n cmd = 'rm ' + join(paths.lut_modtran_directory, to_rm)\n logging.info(cmd)\n os.system(cmd)\n\n if not exists(paths.rfl_working_path) or not exists(paths.uncert_working_path):\n # Empirical line\n logging.info('Empirical line inference')\n empirical_line(reference_radiance_file=paths.rdn_subs_path,\n reference_reflectance_file=paths.rfl_subs_path,\n reference_uncertainty_file=paths.uncert_subs_path,\n reference_locations_file=paths.loc_subs_path,\n segmentation_file=paths.lbl_working_path,\n input_radiance_file=paths.radiance_working_path,\n input_locations_file=paths.loc_working_path,\n output_reflectance_file=paths.rfl_working_path,\n output_uncertainty_file=paths.uncert_working_path,\n isofit_config=paths.modtran_config_path)\n\n logging.info('Done.')\n\nclass Pathnames():\n \"\"\" Class to determine and hold the large number of relative and absolute paths that are needed for isofit and\n MODTRAN configuration files.\n\n Args:\n args: an argparse Namespace object with all inputs\n \"\"\"\n\n def __init__(self, args: argparse.Namespace):\n\n # Determine FID based on sensor name\n if args.sensor == 'ang':\n self.fid = split(args.input_radiance)[-1][:18]\n logging.info('Flightline ID: %s' % self.fid)\n elif args.sensor == 'prism':\n self.fid = split(args.input_radiance)[-1][:18]\n logging.info('Flightline ID: %s' % self.fid)\n elif args.sensor == 'avcl':\n self.fid = split(args.input_radiance)[-1][:16]\n logging.info('Flightline ID: %s' % self.fid)\n elif args.sensor == 'neon':\n self.fid = split(args.input_radiance)[-1][:21]\n\n # Names from inputs\n self.aerosol_climatology = args.aerosol_climatology_path\n self.input_radiance_file = args.input_radiance\n self.input_loc_file = args.input_loc\n self.input_obs_file = args.input_obs\n self.working_directory = abspath(args.working_directory)\n\n self.lut_modtran_directory = abspath(join(self.working_directory, 'lut_full/'))\n\n if args.surface_path:\n self.surface_path = args.surface_path\n else:\n self.surface_path = os.getenv('ISOFIT_SURFACE_MODEL')\n if self.surface_path is None:\n logging.info('No surface model defined')\n\n # set up some sub-directories\n self.lut_h2o_directory = abspath(join(self.working_directory, 'lut_h2o/'))\n self.config_directory = abspath(join(self.working_directory, 'config/'))\n self.data_directory = abspath(join(self.working_directory, 'data/'))\n self.input_data_directory = abspath(join(self.working_directory, 'input/'))\n self.output_directory = abspath(join(self.working_directory, 'output/'))\n\n\n # define all output names\n rdn_fname = self.fid + '_rdn'\n self.rfl_working_path = abspath(join(self.output_directory, rdn_fname.replace('_rdn', '_rfl')))\n self.uncert_working_path = abspath(join(self.output_directory, rdn_fname.replace('_rdn', '_uncert')))\n self.lbl_working_path = abspath(join(self.output_directory, rdn_fname.replace('_rdn', '_lbl')))\n self.state_working_path = abspath(join(self.output_directory, rdn_fname.replace('_rdn', '_state')))\n self.surface_working_path = abspath(join(self.data_directory, 'surface.mat'))\n\n if args.copy_input_files is True:\n self.radiance_working_path = abspath(join(self.input_data_directory, rdn_fname))\n self.obs_working_path = abspath(join(self.input_data_directory, self.fid + '_obs'))\n self.loc_working_path = abspath(join(self.input_data_directory, self.fid + '_loc'))\n else:\n self.radiance_working_path = self.input_radiance_file\n self.obs_working_path = self.input_obs_file\n self.loc_working_path = self.input_loc_file\n\n if args.channelized_uncertainty_path:\n self.input_channelized_uncertainty_path = args.channelized_uncertainty_path\n else:\n self.input_channelized_uncertainty_path = os.getenv('ISOFIT_CHANNELIZED_UNCERTAINTY')\n\n self.channelized_uncertainty_working_path = abspath(join(self.data_directory, 'channelized_uncertainty.txt'))\n\n self.rdn_subs_path = abspath(join(self.input_data_directory, self.fid + '_subs_rdn'))\n self.obs_subs_path = abspath(join(self.input_data_directory, self.fid + '_subs_obs'))\n self.loc_subs_path = abspath(join(self.input_data_directory, self.fid + '_subs_loc'))\n self.rfl_subs_path = abspath(join(self.output_directory, self.fid + '_subs_rfl'))\n self.atm_coeff_path = abspath(join(self.output_directory, self.fid + '_subs_atm'))\n self.state_subs_path = abspath(join(self.output_directory, self.fid + '_subs_state'))\n self.uncert_subs_path = abspath(join(self.output_directory, self.fid + '_subs_uncert'))\n self.h2o_subs_path = abspath(join(self.output_directory, self.fid + '_subs_h2o'))\n\n self.wavelength_path = abspath(join(self.data_directory, 'wavelengths.txt'))\n\n self.modtran_template_path = abspath(join(self.config_directory, self.fid + '_modtran_tpl.json'))\n self.h2o_template_path = abspath(join(self.config_directory, self.fid + '_h2o_tpl.json'))\n\n self.modtran_config_path = abspath(join(self.config_directory, self.fid + '_modtran.json'))\n self.h2o_config_path = abspath(join(self.config_directory, self.fid + '_h2o.json'))\n\n if args.modtran_path:\n self.modtran_path = args.modtran_path\n else:\n self.modtran_path = os.getenv('MODTRAN_DIR')\n\n # isofit file should live at isofit/isofit/core/isofit.py\n self.isofit_path = os.path.dirname(os.path.dirname(os.path.dirname(isofit.__file__)))\n\n if args.sensor == 'ang':\n self.noise_path = join(self.isofit_path, 'data', 'avirisng_noise.txt')\n elif args.sensor == 'avcl':\n self.noise_path = join(self.isofit_path, 'data', 'avirisc_noise.txt')\n else:\n self.noise_path = None\n logging.info('no noise path found, proceeding without')\n #quit()\n\n self.aerosol_tpl_path = join(self.isofit_path, 'data', 'aerosol_template.json')\n self.rdn_factors_path = args.rdn_factors_path\n\n self.ray_temp_dir = args.ray_temp_dir\n\n def make_directories(self):\n \"\"\" Build required subdirectories inside working_directory\n \"\"\"\n for dpath in [self.working_directory, self.lut_h2o_directory, self.lut_modtran_directory, self.config_directory,\n self.data_directory, self.input_data_directory, self.output_directory]:\n if not exists(dpath):\n os.mkdir(dpath)\n\n def stage_files(self):\n \"\"\" Stage data files by copying into working directory\n \"\"\"\n files_to_stage = [(self.input_radiance_file, self.radiance_working_path, True),\n (self.input_obs_file, self.obs_working_path, True),\n (self.input_loc_file, self.loc_working_path, True),\n (self.surface_path, self.surface_working_path, False)]\n\n if (self.input_channelized_uncertainty_path is not None):\n files_to_stage.append((self.input_channelized_uncertainty_path, self.channelized_uncertainty_working_path, False))\n else:\n self.channelized_uncertainty_working_path = None\n logging.info('No valid channelized uncertainty file found, proceeding without uncertainty')\n\n\n for src, dst, hasheader in files_to_stage:\n if not exists(dst):\n logging.info('Staging %s to %s' % (src, dst))\n copyfile(src, dst)\n if hasheader:\n copyfile(src + '.hdr', dst + '.hdr')\n\n\nclass SerialEncoder(json.JSONEncoder):\n \"\"\"Encoder for json to help ensure json objects can be passed to the workflow manager.\n \"\"\"\n\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n else:\n return super(SerialEncoder, self).default(obj)\n\n\nclass LUTConfig:\n \"\"\" A look up table class, containing default grid options. All properties may be overridden with the optional\n input configuration file path\n\n Args:\n lut_config_file: configuration file to override default values\n \"\"\"\n\n def __init__(self, lut_config_file: str = None):\n if lut_config_file is not None:\n with open(lut_config_file, 'r') as f:\n lut_config = json.load(f)\n\n self.num_elev_lut_elements = 1\n self.num_h2o_lut_elements = 3\n self.num_to_sensor_azimuth_lut_elements = 1\n self.num_to_sensor_zenith_lut_elements = 1\n\n # Setting any of these to '1' will also remove that aerosol from the statevector\n self.num_aerosol_1_lut_elements = 1\n self.num_aerosol_2_lut_elements = 3\n self.num_aerosol_3_lut_elements = 3\n\n self.aerosol_1_lut_range = [0.001, 0.5]\n self.aerosol_2_lut_range = [0.001, 0.5]\n self.aerosol_3_lut_range = [0.001, 0.5]\n\n self.h2o_min = 0.2\n\n self.zenith_min_spacing = 2\n\n self.default_h2o_lut_range = [0.05, 5]\n\n\n # overwrite anything that comes in from the config file\n if lut_config_file is not None:\n for key in lut_config:\n if key in self.__dict__:\n setattr(self, key, lut_config[key])\n\n\ndef load_climatology(config_path: str, latitude: float, longitude: float, acquisition_datetime: datetime,\n isofit_path: str, lut_params: LUTConfig) -> (np.array, np.array, np.array):\n \"\"\" Load climatology data, based on location and configuration\n\n Args:\n config_path: path to the base configuration directory for isofit\n latitude: latitude to set for the segment (mean of acquisition suggested)\n longitude: latitude to set for the segment (mean of acquisition suggested)\n acquisition_datetime: datetime to use for the segment( mean of acquisition suggested)\n isofit_path: base path to isofit installation (needed for data path references)\n lut_params: parameters to use to define lut grid\n\n :Returns\n tuple containing:\n aerosol_state_vector - A dictionary that defines the aerosol state vectors for isofit\n aerosol_lut_grid - A dictionary of the aerosol lookup table (lut) grid to be explored\n aerosol_model_path - A path to the location of the aerosol model to use with MODTRAN.\n\n \"\"\"\n\n aerosol_model_path = join(isofit_path, 'data', 'aerosol_model.txt')\n aerosol_state_vector = {}\n aerosol_lut_grid = {}\n aerosol_lut_ranges = [lut_params.aerosol_1_lut_range, lut_params.aerosol_2_lut_range, lut_params.aerosol_2_lut_range]\n num_aerosol_lut_elements = [lut_params.num_aerosol_1_lut_elements, lut_params.num_aerosol_2_lut_elements, lut_params.num_aerosol_3_lut_elements]\n for _a, alr in enumerate(aerosol_lut_ranges):\n if num_aerosol_lut_elements[_a] != 1:\n aerosol_state_vector['AERFRAC_{}'.format(_a)] = {\n \"bounds\": [float(alr[0]), float(alr[1])],\n \"scale\": 1,\n \"init\": float((alr[1] - alr[0]) / 10. + alr[0]),\n \"prior_sigma\": 10.0,\n \"prior_mean\": float((alr[1] - alr[0]) / 10. + alr[0])}\n\n aerosol_lut = np.linspace(alr[0], alr[1], num_aerosol_lut_elements[_a])\n aerosol_lut_grid['AERFRAC_{}'.format(_a)] = [float(q) for q in aerosol_lut]\n\n logging.info('Loading Climatology')\n # If a configuration path has been provided, use it to get relevant info\n if config_path is not None:\n month = acquisition_datetime.timetuple().tm_mon\n year = acquisition_datetime.timetuple().tm_year\n with open(config_path, 'r') as fin:\n for case in json.load(fin)['cases']:\n match = True\n logging.info('matching', latitude, longitude, month, year)\n for criterion, interval in case['criteria'].items():\n logging.info(criterion, interval, '...')\n if criterion == 'latitude':\n if latitude < interval[0] or latitude > interval[1]:\n match = False\n if criterion == 'longitude':\n if longitude < interval[0] or longitude > interval[1]:\n match = False\n if criterion == 'month':\n if month < interval[0] or month > interval[1]:\n match = False\n if criterion == 'year':\n if year < interval[0] or year > interval[1]:\n match = False\n\n if match:\n aerosol_state_vector = case['aerosol_state_vector']\n aerosol_lut_grid = case['aerosol_lut_grid']\n aerosol_model_path = case['aerosol_mdl_path']\n break\n\n logging.info('Climatology Loaded. Aerosol State Vector:\\n{}\\nAerosol LUT Grid:\\n{}\\nAerosol model path:{}'.format(\n aerosol_state_vector, aerosol_lut_grid, aerosol_model_path))\n return aerosol_state_vector, aerosol_lut_grid, aerosol_model_path\n\n\ndef calc_modtran_max_water(paths: Pathnames) -> float:\n \"\"\"MODTRAN may put a ceiling on \"legal\" H2O concentrations. This function calculates that ceiling. The intended\n use is to make sure the LUT does not contain useless gridpoints above it.\n\n Args:\n paths: object containing references to all relevant file locations\n\n Returns:\n max_water - maximum MODTRAN H2OSTR value for provided obs conditions\n \"\"\"\n\n max_water = None\n # TODO: this is effectively redundant from the radiative_transfer->modtran. Either devise a way\n # to port in from there, or put in utils to reduce redundancy.\n xdir = {\n 'linux': 'linux',\n 'darwin': 'macos',\n 'windows': 'windows'\n }\n name = 'H2O_bound_test'\n filebase = os.path.join(paths.lut_h2o_directory, name)\n with open(paths.h2o_template_path, 'r') as f:\n bound_test_config = json.load(f)\n\n bound_test_config['MODTRAN'][0]['MODTRANINPUT']['NAME'] = name\n bound_test_config['MODTRAN'][0]['MODTRANINPUT']['ATMOSPHERE']['H2OSTR'] = 50\n with open(filebase + '.json', 'w') as fout:\n fout.write(json.dumps(bound_test_config, cls=SerialEncoder, indent=4, sort_keys=True))\n\n cwd = os.getcwd()\n os.chdir(paths.lut_h2o_directory)\n cmd = os.path.join(paths.modtran_path, 'bin', xdir[platform], 'mod6c_cons ' + filebase + '.json')\n try:\n subprocess.call(cmd, shell=True, timeout=10)\n except:\n pass\n os.chdir(cwd)\n\n with open(filebase + '.tp6', errors='ignore') as tp6file:\n for count, line in enumerate(tp6file):\n if 'The water column is being set to the maximum' in line:\n max_water = line.split(',')[1].strip()\n max_water = float(max_water.split(' ')[0])\n break\n\n if max_water is None:\n logging.error('Could not find MODTRAN H2O upper bound in file {}'.format(filebase + '.tp6'))\n raise KeyError('Could not find MODTRAN H2O upper bound')\n\n return max_water\n\n\ndef find_angular_seeds(angle_data_input: np.array, num_points: int, units: str = 'd') -> np.array:\n \"\"\" Find either angular data 'center points' (num_points = 1), or a lut set that spans\n angle variation in a systematic fashion.\n\n Args:\n angle_data_input: set of angle data to use to find center points\n num_points: the number of points to find - if 1, this will return a single point (the 'centerpoint'), if > 1,\n this will return a numpy array spanning the specified number of poitns\n units: specifies if data are in degrees (default) or radians\n\n :Returns:\n central_angle - angular data center point or lut set spanning space\n\n \"\"\"\n\n # Convert everything to radians so we don't have to track throughout\n if units == 'r':\n angle_data = np.rad2deg(angle_data_input)\n else:\n angle_data = angle_data_input.copy()\n\n spatial_data = np.hstack([np.cos(np.deg2rad(angle_data)).reshape(-1, 1),\n np.sin(np.deg2rad(angle_data)).reshape(-1, 1)])\n\n # find which quadrants have data\n quadrants = np.zeros((2,2))\n if np.any(np.logical_and(spatial_data[:,0] > 0, spatial_data[:,1] > 0 )):\n quadrants[1,0] = 1\n if np.any(np.logical_and(spatial_data[:,0] > 0, spatial_data[:,1] < 0 )):\n quadrants[1,1] += 1\n if np.any(np.logical_and(spatial_data[:,0] < 0, spatial_data[:,1] > 0 )):\n quadrants[0,0] += 1\n if np.any(np.logical_and(spatial_data[:,0] < 0, spatial_data[:,1] < 0 )):\n quadrants[0,1] += 1\n\n # Handle the case where angles are < 180 degrees apart\n if np.sum(quadrants) < 3 and num_points != 1:\n if (np.sum(quadrants[1,:]) == 2):\n # If angles cross the 0-degree line:\n angle_spread = np.linspace(np.min(angle_data+180), np.max(angle_data+180), num_points) - 180\n return angle_spread\n else:\n # Otherwise, just space things out:\n return np.linspace(np.min(angle_data), np.max(angle_data), num_points)\n else:\n # If we're greater than 180 degree spread, there's no universal answer. Try GMM.\n\n if num_points == 2:\n logging.warning('2 angle interpolation selected when angle divergence > 180. '\n 'At least 3 points are recommended')\n\n # We initialize the GMM with a static seed for repeatability across runs\n gmm = mixture.GaussianMixture(n_components=num_points, covariance_type='full',\n random_state=1)\n gmm.fit(spatial_data)\n central_angles = np.degrees(np.arctan2(gmm.means_[:,1], gmm.means_[:,0]))\n if (num_points == 1):\n return central_angles[0]\n\n ca_quadrants = np.zeros((2, 2))\n if np.any(np.logical_and(gmm.means_[:, 0] > 0, gmm.means_[:, 1] > 0)):\n ca_quadrants[1, 0] = 1\n elif np.any(np.logical_and(gmm.means_[:, 0] > 0, gmm.means_[:, 1] < 0)):\n ca_quadrants[1, 1] += 1\n elif np.any(np.logical_and(gmm.means_[:, 0] < 0, gmm.means_[:, 1] > 0)):\n ca_quadrants[0, 0] += 1\n elif np.any(np.logical_and(gmm.means_[:, 0] < 0, gmm.means_[:, 1] < 0)):\n ca_quadrants[0, 1] += 1\n\n if np.sum(ca_quadrants) < np.sum(quadrants):\n logging.warning('GMM angles {} span {} quadrants, while data spans {} quadrants'.format(central_angles,\n np.sum(ca_quadrants), np.sum(quadrants)))\n\n return central_angles\n\n\ndef get_metadata_from_obs(obs_file: str, lut_params: LUTConfig, trim_lines: int = 5,\n max_flight_duration_h: int = 8, nodata_value: float = -9999) -> \\\n (List, bool, float, float, float, np.array, List, List):\n \"\"\" Get metadata needed for complete runs from the observation file\n (bands: path length, to-sensor azimuth, to-sensor zenith, to-sun azimuth,\n to-sun zenith, phase, slope, aspect, cosine i, UTC time).\n\n Args:\n obs_file: file name to pull data from\n lut_params: parameters to use to define lut grid\n trim_lines: number of lines to ignore at beginning and end of file (good if lines contain values that are\n erroneous but not nodata\n max_flight_duration_h: maximum length of the current acquisition, used to check if we've lapped a UTC day\n nodata_value: value to ignore from location file\n\n :Returns:\n tuple containing:\n h_m_s - list of the mean-time hour, minute, and second within the line\n increment_day - indicator of whether the UTC day has been changed since the beginning of the line time\n mean_path_km - mean distance between sensor and ground in km for good data\n mean_to_sensor_azimuth - mean to-sensor-azimuth for good data\n mean_to_sensor_zenith_rad - mean to-sensor-zenith in radians for good data\n valid - boolean array indicating which pixels were NOT nodata\n to_sensor_azimuth_lut_grid - the to-sensor azimuth angle look up table for good data\n to_sensor_zenith_lut_grid - the to-sensor zenith look up table for good data\n \"\"\"\n obs_dataset = gdal.Open(obs_file, gdal.GA_ReadOnly)\n\n # Initialize values to populate\n valid = np.zeros((obs_dataset.RasterYSize, obs_dataset.RasterXSize), dtype=bool)\n\n path_km = np.zeros((obs_dataset.RasterYSize, obs_dataset.RasterXSize))\n to_sensor_azimuth = np.zeros((obs_dataset.RasterYSize, obs_dataset.RasterXSize))\n to_sensor_zenith = np.zeros((obs_dataset.RasterYSize, obs_dataset.RasterXSize))\n time = np.zeros((obs_dataset.RasterYSize, obs_dataset.RasterXSize))\n\n for line in range(obs_dataset.RasterYSize):\n\n # Read line in\n obs_line = obs_dataset.ReadAsArray(0, line, obs_dataset.RasterXSize, 1)\n\n # Populate valid\n valid[line,:] = np.logical_not(np.any(np.isclose(obs_line,nodata_value),axis=0))\n\n path_km[line,:] = obs_line[0, ...] / 1000.\n to_sensor_azimuth[line,:] = obs_line[1, ...]\n to_sensor_zenith[line,:] = obs_line[2, ...]\n time[line,:] = obs_line[9, ...]\n\n if trim_lines != 0:\n actual_valid = valid.copy()\n valid[:trim_lines,:] = False\n valid[-trim_lines:,:] = False\n\n mean_path_km = np.mean(path_km[valid])\n del path_km\n\n mean_to_sensor_azimuth = find_angular_seeds(to_sensor_azimuth[valid], 1) % 360\n mean_to_sensor_zenith = 180 - find_angular_seeds(to_sensor_zenith[valid], 1)\n\n #geom_margin = EPS * 2.0\n if lut_params.num_to_sensor_zenith_lut_elements == 1:\n to_sensor_zenith_lut_grid = None\n else:\n to_sensor_zenith_lut_grid = np.sort(180 - find_angular_seeds(to_sensor_zenith[valid], lut_params.num_to_sensor_zenith_lut_elements))\n if (to_sensor_zenith_lut_grid[1] - to_sensor_zenith_lut_grid[0] < lut_params.zenith_min_spacing):\n to_sensor_zenith_lut_grid = None\n\n if lut_params.num_to_sensor_azimuth_lut_elements == 1:\n to_sensor_azimuth_lut_grid = None\n else:\n to_sensor_azimuth_lut_grid = np.sort(np.array([x % 360 for x in find_angular_seeds(to_sensor_azimuth[valid], lut_params.num_to_sensor_azimuth_lut_elements)]))\n\n del to_sensor_azimuth\n del to_sensor_zenith\n\n # Make time calculations\n mean_time = np.mean(time[valid])\n min_time = np.min(time[valid])\n max_time = np.max(time[valid])\n\n increment_day = False\n # UTC day crossover corner case\n if (max_time > 24 - max_flight_duration_h and\n min_time < max_flight_duration_h):\n time[np.logical_and(time < max_flight_duration_h,valid)] += 24\n mean_time = np.mean(time[valid])\n\n # This means the majority of the line was really in the next UTC day,\n # increment the line accordingly\n if (mean_time > 24):\n mean_time -= 24\n increment_day = True\n\n # Calculate hour, minute, second\n h_m_s = [np.floor(mean_time)]\n h_m_s.append(np.floor((mean_time - h_m_s[-1]) * 60))\n h_m_s.append(np.floor((mean_time - h_m_s[-2] - h_m_s[-1] / 60.) * 3600))\n\n if trim_lines != 0:\n valid = actual_valid\n\n return h_m_s, increment_day, mean_path_km, mean_to_sensor_azimuth, mean_to_sensor_zenith, valid, \\\n to_sensor_azimuth_lut_grid, to_sensor_zenith_lut_grid\n\n\ndef get_metadata_from_loc(loc_file: str, lut_params: LUTConfig, trim_lines: int = 5, nodata_value: float = -9999) -> \\\n (float, float, float, np.array):\n \"\"\" Get metadata needed for complete runs from the location file (bands long, lat, elev).\n\n Args:\n loc_file: file name to pull data from\n lut_params: parameters to use to define lut grid\n trim_lines: number of lines to ignore at beginning and end of file (good if lines contain values that are\n erroneous but not nodata\n nodata_value: value to ignore from location file\n\n :Returns:\n tuple containing:\n mean_latitude - mean latitude of good values from the location file\n mean_longitude - mean latitude of good values from the location file\n mean_elevation_km - mean ground estimate of good values from the location file\n elevation_lut_grid - the elevation look up table, based on globals and values from location file\n \"\"\"\n\n loc_dataset = gdal.Open(loc_file, gdal.GA_ReadOnly)\n\n loc_data = np.zeros((loc_dataset.RasterCount, loc_dataset.RasterYSize, loc_dataset.RasterXSize))\n for line in range(loc_dataset.RasterYSize):\n # Read line in\n loc_data[:,line:line+1,:] = loc_dataset.ReadAsArray(0, line, loc_dataset.RasterXSize, 1)\n\n valid = np.logical_not(np.any(loc_data == nodata_value,axis=0))\n if trim_lines != 0:\n valid[:trim_lines, :] = False\n valid[-trim_lines:, :] = False\n\n # Grab zensor position and orientation information\n mean_latitude = find_angular_seeds(loc_data[1,valid].flatten(),1)\n mean_longitude = find_angular_seeds(-1 * loc_data[0,valid].flatten(),1)\n\n mean_elevation_km = np.mean(loc_data[2,valid]) / 1000.0\n\n # make elevation grid\n if lut_params.num_elev_lut_elements == 1:\n elevation_lut_grid = None\n else:\n min_elev = np.min(loc_data[2, valid])/1000.\n max_elev = np.max(loc_data[2, valid])/1000.\n elevation_lut_grid = np.linspace(max(min_elev, EPS),\n max_elev,\n lut_params.num_elev_lut_elements)\n\n return mean_latitude, mean_longitude, mean_elevation_km, elevation_lut_grid\n\n\n\ndef build_presolve_config(paths: Pathnames, h2o_lut_grid: np.array, n_cores: int=-1,\n use_emp_line=0, surface_category=\"multicomponent_surface\"):\n \"\"\" Write an isofit config file for a presolve, with limited info.\n\n Args:\n paths: object containing references to all relevant file locations\n h2o_lut_grid: the water vapor look up table grid isofit should use for this solve\n n_cores: number of cores to use in processing\n \"\"\"\n\n # Determine number of spectra included in each retrieval. If we are\n # operating on segments, this will average down instrument noise\n if use_emp_line:\n spectra_per_inversion = SEGMENTATION_SIZE\n else: \n spectra_per_inversion = 1 \n\n radiative_transfer_config = {\n \"radiative_transfer_engines\": {\n \"vswir\": {\n \"engine_name\": 'modtran',\n \"lut_path\": paths.lut_h2o_directory,\n \"template_file\": paths.h2o_template_path,\n \"engine_base_dir\": paths.modtran_path,\n \"lut_names\": [\"H2OSTR\"],\n \"statevector_names\": [\"H2OSTR\"],\n }\n },\n \"statevector\": {\n \"H2OSTR\": {\n \"bounds\": [float(np.min(h2o_lut_grid)), float(np.max(h2o_lut_grid))],\n \"scale\": 0.01,\n \"init\": np.percentile(h2o_lut_grid,25),\n \"prior_sigma\": 100.0,\n \"prior_mean\": 1.5}\n },\n \"lut_grid\": {\n \"H2OSTR\": [float(x) for x in h2o_lut_grid],\n },\n \"unknowns\": {\n \"H2O_ABSCO\": 0.0\n }\n }\n\n # make isofit configuration\n isofit_config_h2o = {'ISOFIT_base': paths.isofit_path,\n 'output': {'estimated_state_file': paths.h2o_subs_path},\n 'input': {},\n 'forward_model': {\n 'instrument': {'wavelength_file': paths.wavelength_path,\n 'integrations': spectra_per_inversion,\n 'unknowns': {\n 'uncorrelated_radiometric_uncertainty': UNCORRELATED_RADIOMETRIC_UNCERTAINTY}},\n 'surface': {\"surface_category\": surface_category,\n 'surface_file': paths.surface_working_path,\n 'select_on_init': True},\n 'radiative_transfer': radiative_transfer_config},\n \"implementation\": {\n \"ray_temp_dir\": paths.ray_temp_dir,\n 'inversion': {'windows': INVERSION_WINDOWS},\n \"n_cores\": n_cores}\n }\n\n if paths.channelized_uncertainty_working_path is not None:\n isofit_config_h2o['forward_model']['unknowns'][\n 'channelized_radiometric_uncertainty_file'] = paths.channelized_uncertainty_working_path\n\n if paths.noise_path is not None:\n isofit_config_h2o['forward_model']['instrument']['parametric_noise_file'] = paths.noise_path\n else:\n isofit_config_h2o['forward_model']['instrument']['SNR'] = 1000\n\n if paths.rdn_factors_path:\n isofit_config_h2o['input']['radiometry_correction_file'] = paths.rdn_factors_path\n\n if use_emp_line == 1:\n isofit_config_h2o['input']['measured_radiance_file'] = paths.rdn_subs_path\n isofit_config_h2o['input']['loc_file'] = paths.loc_subs_path\n isofit_config_h2o['input']['obs_file'] = paths.obs_subs_path\n else:\n isofit_config_h2o['input']['measured_radiance_file'] = paths.radiance_working_path\n isofit_config_h2o['input']['loc_file'] = paths.loc_working_path\n isofit_config_h2o['input']['obs_file'] = paths.obs_working_path\n\n\n\n # write modtran_template\n with open(paths.h2o_config_path, 'w') as fout:\n fout.write(json.dumps(isofit_config_h2o, cls=SerialEncoder, indent=4, sort_keys=True))\n\n\ndef build_main_config(paths: Pathnames, lut_params: LUTConfig, h2o_lut_grid: np.array = None,\n elevation_lut_grid: np.array = None, to_sensor_azimuth_lut_grid: np.array = None,\n to_sensor_zenith_lut_grid: np.array = None, mean_latitude: float = None,\n mean_longitude: float = None, dt: datetime = None, use_emp_line: bool = True, \n n_cores: int = -1, surface_category='multicomponent_surface'):\n \"\"\" Write an isofit config file for the main solve, using the specified pathnames and all given info\n\n Args:\n paths: object containing references to all relevant file locations\n lut_params: configuration parameters for the lut grid\n h2o_lut_grid: the water vapor look up table grid isofit should use for this solve\n elevation_lut_grid: the ground elevation look up table grid isofit should use for this solve\n to_sensor_azimuth_lut_grid: the to-sensor azimuth angle look up table grid isofit should use for this solve\n to_sensor_zenith_lut_grid: the to-sensor zenith angle look up table grid isofit should use for this solve\n mean_latitude: the latitude isofit should use for this solve\n mean_longitude: the longitude isofit should use for this solve\n dt: the datetime object corresponding to this flightline to use for this solve\n use_emp_line: flag whether or not to set up for the empirical line estimation\n n_cores: the number of cores to use during processing\n\n \"\"\"\n\n # Determine number of spectra included in each retrieval. If we are\n # operating on segments, this will average down instrument noise\n if use_emp_line:\n spectra_per_inversion = SEGMENTATION_SIZE\n else: \n spectra_per_inversion = 1 \n\n radiative_transfer_config = {\n\n \"radiative_transfer_engines\": {\n \"vswir\": {\n \"engine_name\": 'modtran',\n \"lut_path\": paths.lut_modtran_directory,\n \"aerosol_template_file\": paths.aerosol_tpl_path,\n \"template_file\": paths.modtran_template_path,\n \"engine_base_dir\": paths.modtran_path,\n #lut_names - populated below\n #statevector_names - populated below\n }\n },\n \"statevector\": {\n \"H2OSTR\": {\n \"bounds\": [h2o_lut_grid[0], h2o_lut_grid[-1]],\n \"scale\": 0.01,\n \"init\": (h2o_lut_grid[1] + h2o_lut_grid[-1]) / 2.0,\n \"prior_sigma\": 100.0,\n \"prior_mean\": (h2o_lut_grid[1] + h2o_lut_grid[-1]) / 2.0,\n }\n },\n \"lut_grid\": {},\n \"unknowns\": {\n \"H2O_ABSCO\": 0.0\n }\n }\n if h2o_lut_grid is not None:\n radiative_transfer_config['lut_grid']['H2OSTR'] = [max(0.0, float(q)) for q in h2o_lut_grid]\n if elevation_lut_grid is not None:\n radiative_transfer_config['lut_grid']['GNDALT'] = [max(0.0, float(q)) for q in elevation_lut_grid]\n if to_sensor_azimuth_lut_grid is not None:\n radiative_transfer_config['lut_grid']['TRUEAZ'] = [float(q) for q in to_sensor_azimuth_lut_grid]\n if to_sensor_zenith_lut_grid is not None:\n radiative_transfer_config['lut_grid']['OBSZEN'] = [float(q) for q in to_sensor_zenith_lut_grid] # modtran convension\n\n # add aerosol elements from climatology\n aerosol_state_vector, aerosol_lut_grid, aerosol_model_path = \\\n load_climatology(paths.aerosol_climatology, mean_latitude, mean_longitude, dt,\n paths.isofit_path, lut_params=lut_params)\n radiative_transfer_config['statevector'].update(aerosol_state_vector)\n radiative_transfer_config['lut_grid'].update(aerosol_lut_grid)\n radiative_transfer_config['radiative_transfer_engines']['vswir']['aerosol_model_file'] = aerosol_model_path\n\n # MODTRAN should know about our whole LUT grid and all of our statevectors, so copy them in\n radiative_transfer_config['radiative_transfer_engines']['vswir']['statevector_names'] = list(radiative_transfer_config['statevector'].keys())\n radiative_transfer_config['radiative_transfer_engines']['vswir']['lut_names'] = list(radiative_transfer_config['lut_grid'].keys())\n\n # make isofit configuration\n isofit_config_modtran = {'ISOFIT_base': paths.isofit_path,\n 'input': {},\n 'output': {},\n 'forward_model': {\n 'instrument': {'wavelength_file': paths.wavelength_path,\n 'integrations': spectra_per_inversion,\n 'unknowns': {\n 'uncorrelated_radiometric_uncertainty': UNCORRELATED_RADIOMETRIC_UNCERTAINTY}},\n \"surface\": {\"surface_file\": paths.surface_working_path,\n \"surface_category\": surface_category,\n \"select_on_init\": True},\n \"radiative_transfer\": radiative_transfer_config},\n \"implementation\": {\n \"ray_temp_dir\": paths.ray_temp_dir,\n \"inversion\": {\"windows\": INVERSION_WINDOWS},\n \"n_cores\": n_cores}\n }\n\n if use_emp_line == 1:\n isofit_config_modtran['input']['measured_radiance_file'] = paths.rdn_subs_path\n isofit_config_modtran['input']['loc_file'] = paths.loc_subs_path\n isofit_config_modtran['input']['obs_file'] = paths.obs_subs_path\n isofit_config_modtran['output']['estimated_state_file'] = paths.state_subs_path\n isofit_config_modtran['output']['posterior_uncertainty_file'] = paths.uncert_subs_path\n isofit_config_modtran['output']['estimated_reflectance_file'] = paths.rfl_subs_path\n isofit_config_modtran['output']['atmospheric_coefficients_file'] = paths.atm_coeff_path\n else:\n isofit_config_modtran['input']['measured_radiance_file'] = paths.radiance_working_path\n isofit_config_modtran['input']['loc_file'] = paths.loc_working_path\n isofit_config_modtran['input']['obs_file'] = paths.obs_working_path\n isofit_config_modtran['output']['posterior_uncertainty_file'] = paths.uncert_working_path\n isofit_config_modtran['output']['estimated_reflectance_file'] = paths.rfl_working_path\n isofit_config_modtran['output']['estimated_state_file'] = paths.state_working_path\n\n if paths.channelized_uncertainty_working_path is not None:\n isofit_config_modtran['forward_model']['instrument']['unknowns'][\n 'channelized_radiometric_uncertainty_file'] = paths.channelized_uncertainty_working_path\n\n if paths.noise_path is not None:\n isofit_config_modtran['forward_model']['instrument']['parametric_noise_file'] = paths.noise_path\n else:\n isofit_config_modtran['forward_model']['instrument']['SNR'] = 1000\n\n if paths.rdn_factors_path:\n isofit_config_modtran['input']['radiometry_correction_file'] = \\\n paths.rdn_factors_path\n\n # write modtran_template\n with open(paths.modtran_config_path, 'w') as fout:\n fout.write(json.dumps(isofit_config_modtran, cls=SerialEncoder, indent=4, sort_keys=True))\n\n\ndef write_modtran_template(atmosphere_type: str, fid: str, altitude_km: float, dayofyear: int,\n latitude: float, longitude: float, to_sensor_azimuth: float, to_sensor_zenith: float,\n gmtime: float, elevation_km: float, output_file: str, ihaze_type: str = 'AER_RURAL'):\n \"\"\" Write a MODTRAN template file for use by isofit look up tables\n\n Args:\n atmosphere_type: label for the type of atmospheric profile to use in modtran\n fid: flight line id (name)\n altitude_km: altitude of the sensor in km\n dayofyear: the current day of the given year\n latitude: acquisition latitude\n longitude: acquisition longitude\n to_sensor_azimuth: azimuth view angle to the sensor, in degrees (AVIRIS convention)\n to_sensor_zenith: azimuth view angle to the sensor, in degrees (MODTRAN convention: 180 - AVIRIS convention)\n gmtime: greenwich mean time\n elevation_km: elevation of the land surface in km\n output_file: location to write the modtran template file to\n\n \"\"\"\n # make modtran configuration\n h2o_template = {\"MODTRAN\": [{\n \"MODTRANINPUT\": {\n \"NAME\": fid,\n \"DESCRIPTION\": \"\",\n \"CASE\": 0,\n \"RTOPTIONS\": {\n \"MODTRN\": \"RT_CORRK_FAST\",\n \"LYMOLC\": False,\n \"T_BEST\": False,\n \"IEMSCT\": \"RT_SOLAR_AND_THERMAL\",\n \"IMULT\": \"RT_DISORT\",\n \"DISALB\": False,\n \"NSTR\": 8,\n \"SOLCON\": 0.0\n },\n \"ATMOSPHERE\": {\n \"MODEL\": atmosphere_type,\n \"M1\": atmosphere_type,\n \"M2\": atmosphere_type,\n \"M3\": atmosphere_type,\n \"M4\": atmosphere_type,\n \"M5\": atmosphere_type,\n \"M6\": atmosphere_type,\n \"CO2MX\": 410.0,\n \"H2OSTR\": 1.0,\n \"H2OUNIT\": \"g\",\n \"O3STR\": 0.3,\n \"O3UNIT\": \"a\"\n },\n \"AEROSOLS\": {\"IHAZE\": ihaze_type},\n \"GEOMETRY\": {\n \"ITYPE\": 3,\n \"H1ALT\": altitude_km,\n \"IDAY\": dayofyear,\n \"IPARM\": 11,\n \"PARM1\": latitude,\n \"PARM2\": longitude,\n \"TRUEAZ\": to_sensor_azimuth,\n \"OBSZEN\": to_sensor_zenith,\n \"GMTIME\": gmtime\n },\n \"SURFACE\": {\n \"SURFTYPE\": \"REFL_LAMBER_MODEL\",\n \"GNDALT\": elevation_km,\n \"NSURF\": 1,\n \"SURFP\": {\"CSALB\": \"LAMB_CONST_0_PCT\"}\n },\n \"SPECTRAL\": {\n \"V1\": 340.0,\n \"V2\": 2520.0,\n \"DV\": 0.1,\n \"FWHM\": 0.1,\n \"YFLAG\": \"R\",\n \"XFLAG\": \"N\",\n \"FLAGS\": \"NT A \",\n \"BMNAME\": \"p1_2013\"\n },\n \"FILEOPTIONS\": {\n \"NOPRNT\": 2,\n \"CKPRNT\": True\n }\n }\n }]}\n\n # write modtran_template\n with open(output_file, 'w') as fout:\n fout.write(json.dumps(h2o_template, cls=SerialEncoder, indent=4, sort_keys=True))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.isclose", "numpy.min", "numpy.mean", "numpy.deg2rad", "numpy.max", "numpy.logical_and", "numpy.savetxt", "numpy.zeros", "numpy.percentile", "numpy.loadtxt", "numpy.arctan2", "numpy.floor", "numpy.ceil", "numpy.sum", "numpy.rad2deg", "numpy.ones", "numpy.any", "sklearn.mixture.GaussianMixture", "numpy.abs", "numpy.linspace" ] ]
s0mya/CompilerGym
[ "8a4fd04cc28f47b6b710ea8fa54f526e994ca764" ]
[ "compiler_gym/views/observation_space_spec.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport json\nfrom typing import Callable, Optional, Union\n\nimport networkx as nx\nimport numpy as np\nfrom gym.spaces import Box, Space\n\nfrom compiler_gym.service.proto import Observation, ObservationSpace, ScalarRange\nfrom compiler_gym.spaces.scalar import Scalar\nfrom compiler_gym.spaces.sequence import Sequence\nfrom compiler_gym.util.gym_type_hints import ObservationType\n\n\ndef _json2nx(observation):\n json_data = json.loads(observation.string_value)\n return nx.readwrite.json_graph.node_link_graph(\n json_data, multigraph=True, directed=True\n )\n\n\ndef _scalar_range2tuple(sr: ScalarRange, defaults=(-np.inf, np.inf)):\n \"\"\"Convert a ScalarRange to a tuple of (min, max) bounds.\"\"\"\n return (\n sr.min.value if sr.HasField(\"min\") else defaults[0],\n sr.max.value if sr.HasField(\"max\") else defaults[1],\n )\n\n\nclass ObservationSpaceSpec:\n \"\"\"Specification of an observation space.\n\n :ivar id: The name of the observation space.\n :vartype id: str\n\n :ivar index: The index into the list of observation spaces that the service\n supports.\n :vartype index: int\n\n :ivar space: The space.\n :vartype space: Space\n\n :ivar deterministic: Whether the observation space is deterministic.\n :vartype deterministic: bool\n\n :ivar platform_dependent: Whether the observation values depend on the\n execution environment of the service.\n :vartype platform_dependent: bool\n\n :ivar default_value: A default observation. This value will be returned by\n :func:`CompilerEnv.step() <compiler_gym.envs.CompilerEnv.step>` if\n :func:`CompilerEnv.observation_space <compiler_gym.envs.CompilerEnv.observation_space>`\n is set and the service terminates.\n \"\"\"\n\n def __init__(\n self,\n id: str,\n index: int,\n space: Space,\n translate: Callable[[Union[ObservationType, Observation]], ObservationType],\n to_string: Callable[[ObservationType], str],\n deterministic: bool,\n platform_dependent: bool,\n default_value: ObservationType,\n ):\n \"\"\"Constructor. Don't call directly, use make_derived_space().\"\"\"\n self.id: str = id\n self.index: int = index\n self.space = space\n self.deterministic = deterministic\n self.platform_dependent = platform_dependent\n self.default_value = default_value\n self.translate = translate\n self.to_string = to_string\n\n def __hash__(self) -> int:\n # Quickly hash observation spaces by comparing the index into the list\n # of spaces returned by the environment. This means that you should not\n # hash between observation spaces from different environments as this\n # will cause collisions, e.g.\n #\n # # not okay:\n # >>> obs = set(env.observation.spaces).union(\n # other_env.observation.spaces\n # )\n #\n # If you want to hash between environments, consider using the string id\n # to identify the observation spaces.\n return self.index\n\n def __repr__(self) -> str:\n return f\"ObservationSpaceSpec({self.id})\"\n\n def __eq__(self, rhs) -> bool:\n \"\"\"Equality check.\"\"\"\n if not isinstance(rhs, ObservationSpaceSpec):\n return False\n return (\n self.id == rhs.id\n and self.index == rhs.index\n and self.space == rhs.space\n and self.platform_dependent == rhs.platform_dependent\n and self.deterministic == rhs.deterministic\n )\n\n @classmethod\n def from_proto(cls, index: int, proto: ObservationSpace):\n \"\"\"Construct a space from an ObservationSpace message.\"\"\"\n shape_type = proto.WhichOneof(\"shape\")\n\n def make_box(scalar_range_list, dtype, defaults):\n bounds = [_scalar_range2tuple(r, defaults) for r in scalar_range_list]\n return Box(\n low=np.array([b[0] for b in bounds], dtype=dtype),\n high=np.array([b[1] for b in bounds], dtype=dtype),\n dtype=dtype,\n )\n\n def make_scalar(scalar_range, dtype, defaults):\n scalar_range_tuple = _scalar_range2tuple(scalar_range, defaults)\n return Scalar(\n min=dtype(scalar_range_tuple[0]),\n max=dtype(scalar_range_tuple[1]),\n dtype=dtype,\n )\n\n def make_seq(size_range, dtype, defaults, scalar_range=None):\n return Sequence(\n size_range=_scalar_range2tuple(size_range, defaults),\n dtype=dtype,\n opaque_data_format=proto.opaque_data_format,\n scalar_range=scalar_range,\n )\n\n # Translate from protocol buffer specification to python. There are\n # three variables to derive:\n # (1) space: the gym.Space instance describing the space.\n # (2) translate: is a callback that translates from an Observation\n # message to a python type.\n # (3) to_string: is a callback that translates from a python type to a\n # string for printing.\n if proto.opaque_data_format == \"json://networkx/MultiDiGraph\":\n # TODO(cummins): Add a Graph space.\n space = make_seq(proto.string_size_range, str, (0, None))\n\n def translate(observation):\n return nx.readwrite.json_graph.node_link_graph(\n json.loads(observation.string_value), multigraph=True, directed=True\n )\n\n def to_string(observation):\n return json.dumps(\n nx.readwrite.json_graph.node_link_data(observation), indent=2\n )\n\n elif proto.opaque_data_format == \"json://\":\n space = make_seq(proto.string_size_range, str, (0, None))\n\n def translate(observation):\n return json.loads(observation.string_value)\n\n def to_string(observation):\n return json.dumps(observation, indent=2)\n\n elif shape_type == \"int64_range_list\":\n space = make_box(\n proto.int64_range_list.range,\n np.int64,\n (np.iinfo(np.int64).min, np.iinfo(np.int64).max),\n )\n\n def translate(observation):\n return np.array(observation.int64_list.value, dtype=np.int64)\n\n to_string = str\n elif shape_type == \"double_range_list\":\n space = make_box(\n proto.double_range_list.range, np.float64, (-np.inf, np.inf)\n )\n\n def translate(observation):\n return np.array(observation.double_list.value, dtype=np.float64)\n\n to_string = str\n elif shape_type == \"string_size_range\":\n space = make_seq(proto.string_size_range, str, (0, None))\n\n def translate(observation):\n return observation.string_value\n\n to_string = str\n elif shape_type == \"binary_size_range\":\n space = make_seq(proto.binary_size_range, bytes, (0, None))\n\n def translate(observation):\n return observation.binary_value\n\n to_string = str\n elif shape_type == \"scalar_int64_range\":\n space = make_scalar(\n proto.scalar_int64_range,\n int,\n (np.iinfo(np.int64).min, np.iinfo(np.int64).max),\n )\n\n def translate(observation):\n return int(observation.scalar_int64)\n\n to_string = str\n elif shape_type == \"scalar_double_range\":\n space = make_scalar(proto.scalar_double_range, float, (-np.inf, np.inf))\n\n def translate(observation):\n return float(observation.scalar_double)\n\n to_string = str\n elif shape_type == \"double_sequence\":\n space = make_seq(\n proto.double_sequence.length_range,\n np.float64,\n (-np.inf, np.inf),\n make_scalar(\n proto.double_sequence.scalar_range, np.float64, (-np.inf, np.inf)\n ),\n )\n\n def translate(observation):\n return np.array(observation.double_list.value, dtype=np.float64)\n\n to_string = str\n else:\n raise TypeError(\n f\"Unknown shape '{shape_type}' for ObservationSpace:\\n{proto}\"\n )\n\n return cls(\n id=proto.name,\n index=index,\n space=space,\n translate=translate,\n to_string=to_string,\n deterministic=proto.deterministic,\n platform_dependent=proto.platform_dependent,\n default_value=translate(proto.default_value),\n )\n\n def make_derived_space(\n self,\n id: str,\n translate: Callable[[ObservationType], ObservationType],\n space: Optional[Space] = None,\n deterministic: Optional[bool] = None,\n default_value: Optional[ObservationType] = None,\n platform_dependent: Optional[bool] = None,\n to_string: Callable[[ObservationType], str] = None,\n ) -> \"ObservationSpaceSpec\":\n \"\"\"Create a derived observation space.\n\n :param id: The name of the derived observation space.\n :param translate: A callback function to compute a derived observation\n from the base observation.\n :param space: The :code:`gym.Space` describing the observation space.\n :param deterministic: Whether the observation space is deterministic.\n If not provided, the value is inherited from the base observation\n space.\n :param default_value: The default value for the observation space. If\n not provided, the value is derived from the default value of the\n base observation space.\n :param platform_dependent: Whether the derived observation space is\n platform-dependent. If not provided, the value is inherited from\n the base observation space.\n :param to_string: A callback to convert and observation to a string\n representation. If not provided, the callback is inherited from the\n base observation space.\n :return: A new ObservationSpaceSpec.\n \"\"\"\n return ObservationSpaceSpec(\n id=id,\n index=self.index,\n space=space or self.space,\n translate=lambda observation: translate(self.translate(observation)),\n to_string=to_string or self.to_string,\n default_value=(\n translate(self.default_value)\n if default_value is None\n else default_value\n ),\n deterministic=(\n self.deterministic if deterministic is None else deterministic\n ),\n platform_dependent=(\n self.platform_dependent\n if platform_dependent is None\n else platform_dependent\n ),\n )\n" ]
[ [ "numpy.array", "numpy.iinfo" ] ]
ognpa/govhack2018
[ "a6e2260a13225b032b3518ba4a6a5de706a96833" ]
[ "src/app.py" ]
[ "#!flask/bin/python\nfrom flask import Flask\nimport pandas as pd\nimport requests\nfrom flask import send_file\nimport matplotlib\nmatplotlib.use(\"agg\")\nfrom flask import jsonify\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n#%matplotlib inline\napp = Flask(__name__)\n\n\nall_data_sets={'ato':'ato.xlsx',\n 'postcodes':'postcodes.csv'\n \n }\n\n\n\[email protected]('/ato/sz')\ndef atoSize():\n #ato=pd.read_excel('ato.xlsx',sheet_name='ATO Data')\n return str(ato.shape[0])\n\[email protected]('/ato/cols')\ndef atoCols():\n #ato=pd.read_excel('ato.xlsx',sheet_name='ATO Data')\n dataset_list = ' ,'.join(ato.columns)\n dataset=[]\n for i in ato.columns:\n dataset.append(i)\n return jsonify(dataset)\n \n \n\n\[email protected]('/ato/dtypes')\ndef atoDtypes():\n #ato=pd.read_excel('ato.xlsx',sheet_name='ATO Data')\n dtype_dict={}\n for i in ato.columns:\n dtype_dict[i]=str(ato[i].dtype)\n \n return jsonify(dtype_dict)\n\n\[email protected]('/list_all')\ndef listAllSupported():\n return jsonify(list(all_data_sets.keys()))\n\n#@app.route('/all')\n#def allDatasets():\n# #https://search.data.gov.au/search?q=electricity\n# r = requests.get('https://search.data.gov.au/search?q=electricity')\n# return r.text\n\n\[email protected]('/ato/graph2.0/<name>')\ndef atoGraph2View(name):\n return send_file(name+\".png\", mimetype='image/png')\n\n\n\[email protected]('/ato/graph2.0')\[email protected]('/ato/graph2.0/')\ndef atoGraph2():\n all_graphs=[]\n plt.figure(1)\n sns.countplot(ato['Income year']) \n plt.savefig(\"1.png\")\n plt.show()\n \n plt.figure(2)\n sns.pointplot(x=\"Income year\", y=\"Net tax\", data=ato)\n plt.savefig(\"3.png\")\n plt.show()\n\n \n\n all_graphs=['1.png','2.png']\n \n return str(len(all_graphs))\n\[email protected]('/ato/graph')\ndef atoGraph():\n # ato=pd.read_excel('ato.xlsx',sheet_name='ATO Data')\n g=sns.countplot(ato['Income year']).get_figure()\n g.savefig(\"output.png\")\n return send_file(\"output.png\", mimetype='image/png')\n\n\n\[email protected]('/')\ndef index():\n # ato=pd.read_excel('ato.xlsx',sheet_name='ATO Data')\n return \"hello world\"\n\n\nif __name__ == \"__main__\":\n ato=pd.read_excel('ato.xlsx',sheet_name='ATO Data')\n\n app.run(host=\"0.0.0.0\", port=80)\n \n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.savefig", "pandas.read_excel", "matplotlib.pyplot.figure", "matplotlib.pyplot.show" ] ]
FMsunyh/pysot
[ "1581d98cd5b4f09a1f3a31e45076a039558567ba" ]
[ "training_dataset/vid/par_crop.py" ]
[ "from os.path import join, isdir\nfrom os import listdir, mkdir, makedirs\nimport cv2\nimport numpy as np\nimport glob\nimport xml.etree.ElementTree as ET\nfrom concurrent import futures\nimport sys\nimport time\n\nVID_base_path = '/home/syh/train_data/ILSVRC2015'\nann_base_path = join(VID_base_path, 'Annotations/VID/train/')\nsub_sets = sorted({'a', 'b', 'c', 'd', 'e'})\n\n\n# Print iterations progress (thanks StackOverflow)\ndef printProgress(iteration, total, prefix='', suffix='', decimals=1, barLength=100):\n \"\"\"\n Call in a loop to create terminal progress bar\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals in percent complete (Int)\n barLength - Optional : character length of bar (Int)\n \"\"\"\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\n percents = formatStr.format(100 * (iteration / float(total)))\n filledLength = int(round(barLength * iteration / float(total)))\n bar = '' * filledLength + '-' * (barLength - filledLength)\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),\n if iteration == total:\n sys.stdout.write('\\x1b[2K\\r')\n sys.stdout.flush()\n\n\ndef crop_hwc(image, bbox, out_sz, padding=(0, 0, 0)):\n a = (out_sz-1) / (bbox[2]-bbox[0])\n b = (out_sz-1) / (bbox[3]-bbox[1])\n c = -a * bbox[0]\n d = -b * bbox[1]\n mapping = np.array([[a, 0, c],\n [0, b, d]]).astype(np.float)\n crop = cv2.warpAffine(image, mapping, (out_sz, out_sz), borderMode=cv2.BORDER_CONSTANT, borderValue=padding)\n return crop\n\n\ndef pos_s_2_bbox(pos, s):\n return [pos[0]-s/2, pos[1]-s/2, pos[0]+s/2, pos[1]+s/2]\n\n\ndef crop_like_SiamFC(image, bbox, context_amount=0.5, exemplar_size=127, instanc_size=255, padding=(0, 0, 0)):\n target_pos = [(bbox[2]+bbox[0])/2., (bbox[3]+bbox[1])/2.]\n target_size = [bbox[2]-bbox[0], bbox[3]-bbox[1]]\n wc_z = target_size[1] + context_amount * sum(target_size)\n hc_z = target_size[0] + context_amount * sum(target_size)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = exemplar_size / s_z\n d_search = (instanc_size - exemplar_size) / 2\n pad = d_search / scale_z\n s_x = s_z + 2 * pad\n\n z = crop_hwc(image, pos_s_2_bbox(target_pos, s_z), exemplar_size, padding)\n x = crop_hwc(image, pos_s_2_bbox(target_pos, s_x), instanc_size, padding)\n return z, x\n\n\ndef crop_video(sub_set, video, crop_path, instanc_size):\n video_crop_base_path = join(crop_path, sub_set, video)\n if not isdir(video_crop_base_path): makedirs(video_crop_base_path)\n\n sub_set_base_path = join(ann_base_path, sub_set)\n xmls = sorted(glob.glob(join(sub_set_base_path, video, '*.xml')))\n for xml in xmls:\n xmltree = ET.parse(xml)\n # size = xmltree.findall('size')[0]\n # frame_sz = [int(it.text) for it in size]\n objects = xmltree.findall('object')\n objs = []\n filename = xmltree.findall('filename')[0].text\n\n im = cv2.imread(xml.replace('xml', 'JPEG').replace('Annotations', 'Data'))\n avg_chans = np.mean(im, axis=(0, 1))\n for object_iter in objects:\n trackid = int(object_iter.find('trackid').text)\n # name = (object_iter.find('name')).text\n bndbox = object_iter.find('bndbox')\n # occluded = int(object_iter.find('occluded').text)\n\n bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text),\n int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)]\n z, x = crop_like_SiamFC(im, bbox, instanc_size=instanc_size, padding=avg_chans)\n cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(int(filename), trackid)), z)\n cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(int(filename), trackid)), x)\n\n\ndef main(instanc_size=511, num_threads=24):\n crop_path = './crop{:d}'.format(instanc_size)\n if not isdir(crop_path): mkdir(crop_path)\n\n for sub_set in sub_sets:\n sub_set_base_path = join(ann_base_path, sub_set)\n videos = sorted(listdir(sub_set_base_path))\n n_videos = len(videos)\n with futures.ProcessPoolExecutor(max_workers=num_threads) as executor:\n fs = [executor.submit(crop_video, sub_set, video, crop_path, instanc_size) for video in videos]\n for i, f in enumerate(futures.as_completed(fs)):\n # Write progress to error so that it can be seen\n printProgress(i, n_videos, prefix=sub_set, suffix='Done ', barLength=40)\n\n\nif __name__ == '__main__':\n since = time.time()\n main(int(sys.argv[1]), int(sys.argv[2]))\n time_elapsed = time.time() - since\n print('Total complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n" ]
[ [ "numpy.array", "numpy.mean", "numpy.sqrt" ] ]
mahakbansal/ChessAlphaZero
[ "2b3f823fdc252d7fd32de0b5e4e53aece9082dd5" ]
[ "chess/tensorflow/NNet.py" ]
[ "import os\nimport shutil\nimport time\nimport random\nimport numpy as np\nimport math\nimport sys\nsys.path.append('../../')\nfrom utils import *\nfrom pytorch_classification.utils import Bar, AverageMeter\nfrom NeuralNet import NeuralNet\nfrom chess.pythonchess import chess as chess\n\n\nimport tensorflow as tf\nfrom .ChessNNet import ChessNNet as onnet\n\nargs = dotdict({\n 'lr': 0.001,\n 'dropout': 0.3,\n 'epochs': 10,\n 'batch_size': 64,\n 'num_channels': 512,\n})\n\nclass NNetWrapper(NeuralNet):\n def __init__(self, game):\n self.nnet = onnet(game, args)\n self.board_x, self.board_y = game.getBoardSize()\n self.action_size = game.getActionSize()\n\n self.sess = tf.Session(graph=self.nnet.graph)\n self.saver = None\n with tf.Session() as temp_sess:\n temp_sess.run(tf.global_variables_initializer())\n self.sess.run(tf.variables_initializer(self.nnet.graph.get_collection('variables')))\n\n def train(self, examples):\n \"\"\"\n examples: list of examples, each example is of form (board, pi, v)\n \"\"\"\n\n for epoch in range(args.epochs):\n print('EPOCH ::: ' + str(epoch+1))\n data_time = AverageMeter()\n batch_time = AverageMeter()\n pi_losses = AverageMeter()\n v_losses = AverageMeter()\n end = time.time()\n\n bar = Bar('Training Net', max=int(len(examples)/args.batch_size))\n batch_idx = 0\n\n # self.sess.run(tf.local_variables_initializer())\n while batch_idx < int(len(examples)/args.batch_size):\n sample_ids = np.random.randint(len(examples), size=args.batch_size)\n boards, pis, vs = list(zip(*[examples[i] for i in sample_ids]))\n\n # predict and compute gradient and do SGD step\n input_dict = {self.nnet.input_boards: boards, self.nnet.target_pis: pis, self.nnet.target_vs: vs, self.nnet.dropout: args.dropout, self.nnet.isTraining: True}\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # record loss\n self.sess.run(self.nnet.train_step, feed_dict=input_dict)\n pi_loss, v_loss = self.sess.run([self.nnet.loss_pi, self.nnet.loss_v], feed_dict=input_dict)\n pi_losses.update(pi_loss, len(boards))\n v_losses.update(v_loss, len(boards))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n batch_idx += 1\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss_pi: {lpi:.4f} | Loss_v: {lv:.3f}'.format(\n batch=batch_idx,\n size=int(len(examples)/args.batch_size),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n lpi=pi_losses.avg,\n lv=v_losses.avg,\n )\n bar.next()\n bar.finish()\n\n def bb2array(self, b): #board to vector of len 64\n \tx = np.zeros(64, dtype=np.int8)\n \t#print('Flipping: ', flip)\n \tfor pos in range(64) :\n \t\tpiece = b.piece_type_at(pos) #Gets the piece type at the given square. 0==>blank,1,2,3,4,5,6\n \t\tif piece :\n \t\t\tcolor = int(bool(b.occupied_co[chess.BLACK] & chess.BB_SQUARES[pos])) #to check if piece is black or white\n \t\t\t#print ('piece: ', piece, 'b.occupied_co[chess.BLACK]: ', b.occupied_co[chess.BLACK], 'chess.BB_SQUARES[pos]: ', chess.BB_SQUARES[pos], 'color: ', color, 'pos: ', pos, '\\t', b.occupied_co[chess.BLACK] & chess.BB_SQUARES[pos])\n \t\t\tcol = int(pos % 8)\n \t\t\trow = int(pos / 8)\n \t#\t\tif flip:\n \t#\t\trow = 7-row\n \t#\t\tcolor = 1 - color\n \t\t\tx[row * 8 + col] = -piece if color else piece\n \tt = b.turn\n \tc = b.castling_rights\n \te = b.ep_square\n \th = b.halfmove_clock\n \tf = b.fullmove_number\n \treturn x\n def vector2matrix(self, x):\n y = np.reshape(x, (8,8))\n return y\n\n def predict(self, chessboard):\n \"\"\"\n board: np array with board\n \"\"\"\n\n #changing the chess-pgn board object to vector\n X = np.array([self.bb2array(chessboard)])\n #self.pieces = self.vector2matrix(X[0])\n #self.chessboard = board\n board = self.vector2matrix(X[0])\n # timing\n start = time.time()\n\n # preparing input\n board = board[np.newaxis, :, :]\n\n # run\n prob, v = self.sess.run([self.nnet.prob, self.nnet.v], feed_dict={self.nnet.input_boards: board, self.nnet.dropout: 0, self.nnet.isTraining: False})\n\n #print('PREDICTION TIME TAKEN : {0:03f}'.format(time.time()-start))\n return prob[0], v[0]\n\n def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n filepath = os.path.join(folder, filename)\n if not os.path.exists(folder):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(folder))\n os.mkdir(folder)\n else:\n print(\"Checkpoint Directory exists! \")\n if self.saver == None:\n self.saver = tf.train.Saver(self.nnet.graph.get_collection('variables'))\n with self.nnet.graph.as_default():\n self.saver.save(self.sess, filepath)\n\n def load_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):\n filepath = os.path.join(folder, filename)\n if not os.path.exists(filepath+'.meta'):\n raise(\"No model in path {}\".format(filepath))\n with self.nnet.graph.as_default():\n self.saver = tf.train.Saver()\n self.saver.restore(self.sess, filepath)\n" ]
[ [ "numpy.reshape", "numpy.zeros", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.global_variables_initializer" ] ]
simeonreusch/ztf_plan_obs
[ "fc2f6521a99d1386654400d18e8aea9dd1536e02" ]
[ "ztf_plan_obs/multiday_plan.py" ]
[ "#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport os\nfrom datetime import datetime, date\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom tqdm import tqdm\nfrom astropy.time import Time\nfrom astropy import units as u\nfrom ztf_plan_obs.plan import PlanObservation\nfrom ztf_plan_obs.plan import round_time, short_time\n\nNIGHTS = [1, 2, 3, 5, 7, 9]\nSHORT_NIGHTS = NIGHTS[1:]\nONE_FILTER_NIGHTS = NIGHTS[1:-1]\n\n\nclass MultiDayObservation:\n \"\"\" \"\"\"\n\n def __init__(\n self,\n name: str,\n ra: float = None,\n dec: float = None,\n startdate = None,\n verbose: bool = True,\n **kwargs,\n ):\n\n self.name = name\n self.ra = ra\n self.dec = dec\n\n today = date.today()\n now = datetime.now()\n\n if startdate is None:\n now_astropy = Time(str(now), format=\"iso\", scale=\"utc\", out_subfmt=\"date\")\n next_days = [(now_astropy + i - 1).value for i in NIGHTS]\n else:\n startdate_astropy = Time(str(startdate), format=\"iso\", scale=\"utc\", out_subfmt=\"date\")\n next_days = [(startdate_astropy + i - 1).value for i in NIGHTS]\n\n if self.ra is None:\n plan_initial = PlanObservation(\n name=name, date=str(today), alertsource=\"icecube\"\n )\n else:\n plan_initial = PlanObservation(\n name=name, date=str(today), ra=self.ra, dec=self.dec\n )\n\n ra = plan_initial.ra\n dec = plan_initial.dec\n\n observable = []\n g_band_start = []\n g_band_end = []\n r_band_start = []\n r_band_end = []\n\n pdf_outfile = os.path.join(name, f\"{name}_multiday.pdf\")\n\n with PdfPages(pdf_outfile) as pdf:\n for i, day in enumerate(tqdm(next_days)):\n if NIGHTS[i] not in SHORT_NIGHTS:\n plan = PlanObservation(\n name=name, date=day, ra=ra, dec=dec, verbose=False\n )\n else:\n if NIGHTS[i] in ONE_FILTER_NIGHTS:\n bands = [\"g\"]\n else:\n bands = [\"g\", \"r\"]\n plan = PlanObservation(\n name=name,\n date=day,\n ra=ra,\n dec=dec,\n observationlength=30,\n bands=bands,\n verbose=False,\n )\n\n observable.append(plan.observable)\n\n if observable:\n g_band_start.append(plan.g_band_recommended_time_start)\n g_band_end.append(plan.g_band_recommended_time_end)\n r_band_start.append(plan.r_band_recommended_time_start)\n r_band_end.append(plan.r_band_recommended_time_end)\n else:\n g_band_start.append(None)\n g_band_end.append(None)\n r_band_start.append(None)\n r_band_end.append(None)\n\n ax = plan.plot_target()\n plt.tight_layout()\n pdf.savefig()\n plt.close()\n\n self.summarytext = f\"\\nYour multi-day observation plan for {name}\\n\"\n\n self.summarytext += \"-------------------------------------------------\\n\"\n self.summarytext += \"g-band observations\\n\"\n for i, item in enumerate(g_band_start):\n if item is not None:\n if observable[i]:\n self.summarytext += f\"Night {NIGHTS[i]} {short_time(item.value)} - {short_time(g_band_end[i].value)}\\n\"\n else:\n self.summarytext += f\"Night {NIGHTS[i]} NOT OBSERVABLE\\n\"\n self.summarytext += \"-------------------------------------------------\\n\"\n\n self.summarytext += \"\\n-------------------------------------------------\\n\"\n self.summarytext += \"r-band observations\\n\"\n\n for i, item in enumerate(r_band_start):\n if NIGHTS[i] not in ONE_FILTER_NIGHTS:\n if item is not None:\n if observable[i]:\n self.summarytext += f\"Night {NIGHTS[i]} {short_time(item.value)} - {short_time(r_band_end[i].value)}\\n\"\n else:\n self.summarytext += f\"Night {NIGHTS[i]} NOT OBSERVABLE\\n\"\n self.summarytext += \"-------------------------------------------------\\n\\n\"\n\n\n def print_plan(self):\n print(self.summarytext)\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.tight_layout" ] ]
pingguokiller/hoenas
[ "8e4969e4270f665eda0dadde4f3c69379a0a2db0" ]
[ "models/archinfo.py" ]
[ "import numpy as np\nimport utils\nimport json\nfrom models.operations import candidateNameList\n\n\n# arch is represented by 2 parts: op and edge.\nclass ArchInfo(object):\n #\n def __init__(self):\n self.NODE_SIZE = 14\n self.OP_SIZE = len(candidateNameList)\n self.CELL_SIZE = 4\n self.EDGE_SIZE = 14\n\n #op info, -1 is the random code.\n self.normalOpArch = {\n 'randomNum': self.NODE_SIZE,\n 'op': -1*np.ones(self.NODE_SIZE, dtype=np.int8)\n }\n self.reduceOpArch = {\n 'randomNum': self.NODE_SIZE,\n 'op': -1*np.ones(self.NODE_SIZE, dtype=np.int8)\n }\n\n # edge info\n self.normalEdgeArch = {\n 'randomNum': self.CELL_SIZE,\n 'edge': [],\n }\n self.reduceEdgeArch = {\n 'randomNum': self.CELL_SIZE,\n 'edge': [],\n }\n tmpEdgeSize = 2\n for cellIndex in range(self.CELL_SIZE):\n if cellIndex == 0:\n nodeEdgeInfo = {\n 'randomType': False,\n 'edge': np.ones(tmpEdgeSize, dtype=np.int8)\n }\n else:\n nodeEdgeInfo = {\n 'randomType': True,\n 'edge': -1*np.ones(tmpEdgeSize, dtype=np.int8)\n }\n self.normalEdgeArch['edge'].append(nodeEdgeInfo.copy())\n self.reduceEdgeArch['edge'].append(nodeEdgeInfo.copy())\n tmpEdgeSize += 1\n\n self.performance = {\n 'testacc': 0,\n 'latency': 0,\n 'sparsity': 0,\n 'accdrop': 0\n }\n\n # json to string\n def tostring(self):\n jsonInfo = {\n 'normalOpArch': {\n 'randomNum': self.normalOpArch['randomNum'],\n 'op': self.normalOpArch['op'].tolist()\n },\n 'reduceOpArch': {\n 'randomNum': self.reduceOpArch['randomNum'],\n 'op': self.reduceOpArch['op'].tolist()\n },\n 'normalEdgeArch': {\n 'randomNum': self.normalEdgeArch['randomNum'],\n 'edge': [{\n 'randomType': edgeinfo['randomType'],\n 'edge': edgeinfo['edge'].tolist()\n } for edgeinfo in self.normalEdgeArch['edge']]\n },\n 'reduceEdgeArch': {\n 'randomNum': self.reduceEdgeArch['randomNum'],\n 'edge': [{\n 'randomType': edgeinfo['randomType'],\n 'edge': edgeinfo['edge'].tolist()\n } for edgeinfo in self.reduceEdgeArch['edge']]\n },\n 'performance': self.performance,\n }\n jsonStr = json.dumps(jsonInfo)\n return jsonInfo, jsonStr\n\n # copy\n def copy(self):\n newArchInfo = ArchInfo()\n newArchInfo.normalOpArch['randomNum'] = self.normalOpArch['randomNum']\n newArchInfo.normalOpArch['op'] = self.normalOpArch['op'].copy()\n\n newArchInfo.reduceOpArch['randomNum'] = self.reduceOpArch['randomNum']\n newArchInfo.reduceOpArch['op'] = self.reduceOpArch['op'].copy()\n\n newArchInfo.normalEdgeArch['randomNum'] = self.normalEdgeArch['randomNum']\n for edgeindex in range(len(newArchInfo.normalEdgeArch['edge'])):\n newArchInfo.normalEdgeArch['edge'][edgeindex]['randomType'] = self.normalEdgeArch['edge'][edgeindex]['randomType']\n newArchInfo.normalEdgeArch['edge'][edgeindex]['edge'] = self.normalEdgeArch['edge'][edgeindex]['edge'].copy()\n\n newArchInfo.reduceEdgeArch['randomNum'] = self.reduceEdgeArch['randomNum']\n for edgeindex in range(len(newArchInfo.reduceEdgeArch['edge'])):\n newArchInfo.reduceEdgeArch['edge'][edgeindex]['randomType'] = self.reduceEdgeArch['edge'][edgeindex]['randomType']\n newArchInfo.reduceEdgeArch['edge'][edgeindex]['edge'] = self.reduceEdgeArch['edge'][edgeindex]['edge'].copy()\n\n if self.reduceEdgeArch['edge'][1]['edge'][0] != -1 :\n pass\n\n return newArchInfo\n\n\n # archi mutation, in order \"normalOpArch, reduceOpArch, normalEdgeArch, reduceEdgeArch\"\n def mutation(self):\n newArchList = []\n if self.normalOpArch['randomNum'] > 0:\n for opIndex in range(self.OP_SIZE):\n newArchInfo = self.copy()\n newArchInfo.normalOpArch['randomNum'] = newArchInfo.normalOpArch['randomNum'] - 1\n newArchInfo.normalOpArch['op'][newArchInfo.normalOpArch['randomNum']] = opIndex\n newArchList.append(newArchInfo)\n elif self.reduceOpArch['randomNum'] > 0:\n for opIndex in range(self.OP_SIZE):\n newArchInfo = self.copy()\n newArchInfo.reduceOpArch['randomNum'] = newArchInfo.reduceOpArch['randomNum'] - 1\n newArchInfo.reduceOpArch['op'][newArchInfo.reduceOpArch['randomNum']] = opIndex\n newArchList.append(newArchInfo)\n elif self.normalEdgeArch['randomNum'] > 1:\n edgeLen = self.normalEdgeArch['randomNum'] + 1\n combinList = utils.combination(range(edgeLen), 2)\n for combineEdge in combinList:\n newArchInfo = self.copy()\n newArchInfo.normalEdgeArch['randomNum'] = newArchInfo.normalEdgeArch['randomNum'] - 1\n newArchInfo.normalEdgeArch['edge'][newArchInfo.normalEdgeArch['randomNum']]['randomType'] = False\n newArchInfo.normalEdgeArch['edge'][newArchInfo.normalEdgeArch['randomNum']]['edge'] = np.array([1 if edgeIndex in combineEdge else 0 for edgeIndex in range(edgeLen)], dtype=np.int8)\n newArchList.append(newArchInfo)\n\n # for comparison to darts. darts have 'none' operation\n combinList = utils.combination(range(edgeLen), 1)\n for combineEdge in combinList:\n newArchInfo = self.copy()\n newArchInfo.normalEdgeArch['randomNum'] = newArchInfo.normalEdgeArch['randomNum'] - 1\n newArchInfo.normalEdgeArch['edge'][newArchInfo.normalEdgeArch['randomNum']]['randomType'] = False\n newArchInfo.normalEdgeArch['edge'][newArchInfo.normalEdgeArch['randomNum']]['edge'] = np.array([1 if edgeIndex in combineEdge else 0 for edgeIndex in range(edgeLen)], dtype=np.int8)\n newArchList.append(newArchInfo)\n\n elif self.reduceEdgeArch['randomNum'] > 1:\n edgeLen = self.reduceEdgeArch['randomNum']+1\n\n combinList = utils.combination(range(edgeLen), 2)\n for combineEdge in combinList:\n newArchInfo = self.copy()\n newArchInfo.reduceEdgeArch['randomNum'] = newArchInfo.reduceEdgeArch['randomNum'] - 1\n newArchInfo.reduceEdgeArch['edge'][newArchInfo.reduceEdgeArch['randomNum']]['randomType'] = False\n newArchInfo.reduceEdgeArch['edge'][newArchInfo.reduceEdgeArch['randomNum']]['edge'] = np.array([1 if edgeIndex in combineEdge else 0 for edgeIndex in range(edgeLen)], dtype=np.int8)\n newArchList.append(newArchInfo)\n\n combinList = utils.combination(range(edgeLen), 1)\n for combineEdge in combinList:\n newArchInfo = self.copy()\n newArchInfo.reduceEdgeArch['randomNum'] = newArchInfo.reduceEdgeArch['randomNum'] - 1\n newArchInfo.reduceEdgeArch['edge'][newArchInfo.reduceEdgeArch['randomNum']]['randomType'] = False\n newArchInfo.reduceEdgeArch['edge'][newArchInfo.reduceEdgeArch['randomNum']]['edge'] = np.array([1 if edgeIndex in combineEdge else 0 for edgeIndex in range(edgeLen)], dtype=np.int8)\n newArchList.append(newArchInfo)\n else:\n return []\n\n return newArchList" ]
[ [ "numpy.ones" ] ]
jeromekelleher/tszip
[ "735b4459b4043b03d43222ca6a1ef0aec9a9589a" ]
[ "tscompress/compression.py" ]
[ "# MIT License\n#\n# Copyright (c) 2019 Tskit Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nCompression utilities for tskit tree sequences.\n\"\"\"\nimport logging\nimport os\n\nimport numcodecs\nimport zarr\nimport numpy as np\nimport tskit\n\nlogger = logging.getLogger(__name__)\n\n\ndef compress(ts, path):\n \"\"\"\n Compresses the specified tree sequence and writes it to the specified\n path.\n \"\"\"\n logging.info(\"Compressing to {}\".format(path))\n try:\n store = zarr.ZipStore(path, mode='w')\n root = zarr.group(store=store)\n compress_zarr(ts, root)\n store.close()\n except Exception as e:\n os.unlink(path)\n raise e\n\n\ndef compress_zarr(ts, root):\n # TODO this current version is the most extreme option where we throw away\n # all the non-site information.\n\n # First reduce to site topology\n tables = ts.dump_tables()\n tables.simplify(reduce_to_site_topology=True)\n\n nodes = root.create_group(\"nodes\")\n flags = nodes.empty(\"flags\", shape=len(tables.nodes), dtype=np.uint8)\n flags[:] = tables.nodes.flags\n logger.debug(flags.info)\n\n # Get the indexes into the position array.\n pos_map = np.hstack([tables.sites.position, [tables.sequence_length]])\n pos_map[0] = 0\n left_mapped = np.searchsorted(pos_map, tables.edges.left)\n if np.any(pos_map[left_mapped] != tables.edges.left):\n raise ValueError(\"Invalid left coordinates\")\n right_mapped = np.searchsorted(pos_map, tables.edges.right)\n if np.any(pos_map[right_mapped] != tables.edges.right):\n raise ValueError(\"Invalid right coordinates\")\n\n filters = [numcodecs.Delta(dtype=np.int32, astype=np.int32)]\n compressor = numcodecs.Blosc(cname='zstd', clevel=9, shuffle=numcodecs.Blosc.SHUFFLE)\n edges = root.create_group(\"edges\")\n parent = edges.empty(\n \"parent\", shape=len(tables.edges), dtype=np.int32, filters=filters,\n compressor=compressor)\n child = edges.empty(\n \"child\", shape=len(tables.edges), dtype=np.int32, filters=filters,\n compressor=compressor)\n left = edges.empty(\n \"left\", shape=len(tables.edges), dtype=np.uint32, filters=filters,\n compressor=compressor)\n right = edges.empty(\n \"right\", shape=len(tables.edges), dtype=np.uint32, filters=filters,\n compressor=compressor)\n parent[:] = tables.edges.parent\n child[:] = tables.edges.child\n left[:] = left_mapped\n right[:] = right_mapped\n\n mutations = root.create_group(\"mutations\")\n site = mutations.empty(\n \"site\", shape=len(tables.mutations), dtype=np.int32, compressor=compressor)\n node = mutations.empty(\n \"node\", shape=len(tables.mutations), dtype=np.int32, compressor=compressor)\n site[:] = tables.mutations.site\n node[:] = tables.mutations.node\n\n\ndef decompress(path):\n \"\"\"\n Returns a decompressed tskit tree sequence read from the specified path.\n \"\"\"\n store = zarr.ZipStore(path, mode='r')\n root = zarr.group(store=store)\n return decompress_zarr(root)\n\n\ndef decompress_zarr(root):\n site = root[\"mutations/site\"][:]\n num_sites = site[-1] + 1\n n = site.shape[0]\n tables = tskit.TableCollection(num_sites)\n tables.mutations.set_columns(\n node=root[\"mutations/node\"],\n site=site,\n derived_state=np.zeros(n, dtype=np.int8) + ord(\"1\"),\n derived_state_offset=np.arange(n + 1, dtype=np.uint32))\n tables.sites.set_columns(\n position=np.arange(num_sites),\n ancestral_state=np.zeros(num_sites, dtype=np.int8) + ord(\"0\"),\n ancestral_state_offset=np.arange(num_sites + 1, dtype=np.uint32))\n flags = root[\"nodes/flags\"][:]\n n = flags.shape[0]\n tables.nodes.set_columns(\n flags=flags.astype(np.uint32),\n time=np.arange(n))\n tables.edges.set_columns(\n left=root[\"edges/left\"],\n right=root[\"edges/right\"],\n parent=root[\"edges/parent\"],\n child=root[\"edges/child\"])\n return tables.tree_sequence()\n" ]
[ [ "numpy.zeros", "numpy.hstack", "numpy.any", "numpy.arange", "numpy.searchsorted" ] ]
surfmachine/ai
[ "c79b348ce7782c0d3c0c4421c75007be54e55693" ]
[ "src/17_dqn_branching/enjoy_lunar_lander.py" ]
[ "from tqdm import tqdm\nimport torch \nfrom branching_dqn_lunar_lander import BranchingQNetwork, TensorEnv, ExperienceReplayMemory, AgentConfig, BranchingTensorEnv\n\nll = 'LunarLander-v2'\n\nbins = 4\nenv = BranchingTensorEnv(ll, bins)\n \nagent = BranchingQNetwork(env.observation_space.shape[0], env.action_space.n, bins)\nagent.load_state_dict(torch.load('./runs/{}/model_state_dict'.format(ll)))\n\nprint(agent)\nfor ep in tqdm(range(10)):\n\n s = env.reset()\n done = False\n ep_reward = 0\n while not done: \n\n with torch.no_grad(): \n out = agent(s).squeeze(0)\n action = torch.argmax(out, dim = 1).numpy().reshape(-1)\n print(action)\n s, r, done, _ = env.step(action)\n\n env.render()\n ep_reward += r \n\n print('Ep reward: {:.3f}'.format(ep_reward))\n\nenv.close()" ]
[ [ "torch.no_grad", "torch.argmax" ] ]
Javesun99/HyperNets
[ "6ebe64c440c7bc1e7e2ea39c898fe90cf850d61e" ]
[ "sound-event-detection/SEDNet.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os\nimport numpy as np\nimport utility_functions as uf\nfrom hypercomplex_layers import QuaternionConv, QuaternionLinear, PHMConv, PHMLinear\n\n'''\nPytorch implementation of the original SELDNet: https://arxiv.org/pdf/1807.00129.pdf\nAnd an augmented version of it, adapted for the output of the L#DAS21 challenge.\n'''\n\nclass Seldnet_vanilla(nn.Module):\n def __init__(self, time_dim, freq_dim=256, input_channels=8, output_classes=14,\n pool_size=[[8,2],[8,2],[2,2]], pool_time=False, n_cnn_filters=64,\n rnn_size=128, n_rnn=2,fc_size=128, dropout_perc=0., class_overlaps=3.,\n verbose=False):\n super(Seldnet_vanilla, self).__init__()\n self.verbose = verbose\n self.time_dim = time_dim\n self.freq_dim = freq_dim\n sed_output_size = output_classes * class_overlaps #here 3 is the max number of simultaneus sounds from the same class\n doa_output_size = sed_output_size * 3 #here 3 is the number of spatial dimensions xyz\n if pool_time:\n self.time_pooled_size = int(time_dim / np.prod(np.array(pool_size), axis=0)[-1])\n else:\n self.time_pooled_size = time_dim\n #building CNN feature extractor\n conv_layers = []\n in_chans = input_channels\n for p in pool_size:\n curr_chans = n_cnn_filters\n if pool_time:\n pool = [p[0],p[1]]\n else:\n pool = [p[0],1]\n conv_layers.append(\n nn.Sequential(\n nn.Conv2d(in_chans, out_channels=curr_chans,\n kernel_size=3, stride=1, padding=1), #padding 1 = same with kernel = 3\n nn.BatchNorm2d(n_cnn_filters),\n nn.ReLU(),\n nn.MaxPool2d(pool),\n nn.Dropout(dropout_perc)))\n in_chans = curr_chans\n\n self.cnn = nn.Sequential(*conv_layers)\n\n self.rnn = nn.GRU(128, rnn_size, num_layers=n_rnn, batch_first=True,\n bidirectional=True, dropout=dropout_perc)\n\n self.sed = nn.Sequential(\n nn.Linear(256, fc_size),\n nn.Dropout(dropout_perc),\n nn.Linear(fc_size, sed_output_size),\n nn.Sigmoid())\n\n # self.doa = nn.Sequential(\n # nn.Linear(256, fc_size),\n # nn.Dropout(dropout_perc),\n # nn.Linear(fc_size, doa_output_size),\n # nn.Tanh())\n\n def forward(self, x):\n x = self.cnn(x)\n if self.verbose:\n print ('cnn out ', x.shape) #target dim: [batch, n_cnn_filters, 2, time_frames]\n x = x.permute(0,3,1,2) #[batch, time, channels, freq]\n if self.verbose:\n print ('permuted: ', x.shape) #target dim: [batch, time_frames, n_cnn_filters, 2]\n x = x.reshape(x.shape[0], self.time_pooled_size, -1)\n if self.verbose:\n print ('reshaped: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n x, h = self.rnn(x)\n if self.verbose:\n print ('rnn out: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n sed = self.sed(x)\n # doa = self.doa(x)\n if self.verbose:\n print ('sed prediction: ', sed.shape) #target dim: [batch, time, sed_output_size]\n # print ('doa prediction: ', doa.shape) #target dim: [batch, time, doa_output_size]\n\n return sed\n\nclass Seldnet_augmented(nn.Module):\n def __init__(self, time_dim, freq_dim=256, input_channels=4, output_classes=14,\n pool_size=[[8,2],[8,2],[2,2],[1,1]], cnn_filters=[64,128,256,512], pool_time=True,\n rnn_size=256, n_rnn=3, fc_size=1024, dropout_perc=0.3, class_overlaps=3.,\n verbose=False):\n super(Seldnet_augmented, self).__init__()\n self.verbose = verbose\n self.time_dim = time_dim\n self.freq_dim = freq_dim\n sed_output_size = output_classes * class_overlaps #here 3 is the max number of simultaneus sounds from the same class\n doa_output_size = sed_output_size * 3 #here 3 is the number of spatial dimensions xyz\n if pool_time:\n self.time_pooled_size = int(time_dim / np.prod(np.array(pool_size), axis=0)[-1])\n else:\n self.time_pooled_size = time_dim\n #building CNN feature extractor\n conv_layers = []\n in_chans = input_channels\n for i, (p,c) in enumerate(zip(pool_size, cnn_filters)):\n curr_chans = c\n\n if pool_time:\n pool = [p[0],p[1]]\n else:\n pool = [p[0],1]\n conv_layers.append(\n nn.Sequential(\n nn.Conv2d(in_chans, out_channels=curr_chans,\n kernel_size=3, stride=1, padding=1), #padding 1 = same with kernel = 3\n nn.BatchNorm2d(c),\n nn.ReLU(),\n nn.MaxPool2d(pool),\n nn.Dropout(dropout_perc)))\n in_chans = curr_chans\n\n self.cnn = nn.Sequential(*conv_layers)\n\n self.rnn = nn.GRU(1024, rnn_size, num_layers=n_rnn, batch_first=True,\n bidirectional=True, dropout=dropout_perc)\n\n self.sed = nn.Sequential(\n nn.Linear(rnn_size*2, fc_size),\n nn.ReLU(),\n nn.Linear(fc_size, fc_size),\n nn.ReLU(),\n nn.Linear(fc_size, fc_size),\n nn.ReLU(),\n nn.Dropout(dropout_perc),\n nn.Linear(fc_size, sed_output_size),\n nn.Sigmoid())\n\n # self.doa = nn.Sequential(\n # nn.Linear(rnn_size*2, fc_size),\n # nn.ReLU(),\n # nn.Linear(fc_size, fc_size),\n # nn.ReLU(),\n # nn.Linear(fc_size, fc_size),\n # nn.ReLU(),\n # nn.Dropout(dropout_perc),\n # nn.Linear(fc_size, doa_output_size),\n # nn.Tanh())\n\n def forward(self, x):\n x = self.cnn(x)\n if self.verbose:\n print ('cnn out ', x.shape) #target dim: [batch, n_cnn_filters, 2, time_frames]\n x = x.permute(0,3,1,2) #[batch, time, channels, freq]\n if self.verbose:\n print ('permuted: ', x.shape) #target dim: [batch, time_frames, n_cnn_filters, 2]\n x = x.reshape(x.shape[0], self.time_pooled_size, -1)\n if self.verbose:\n print ('reshaped: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n x, h = self.rnn(x)\n if self.verbose:\n print ('rnn out: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n sed = self.sed(x)\n # doa = self.doa(x)\n if self.verbose:\n print ('sed prediction: ', sed.shape) #target dim: [batch, time, sed_output_size]\n # print ('doa prediction: ', doa.shape) #target dim: [batch, time, doa_output_size]\n\n return sed\n\n\n########################\n## Quaternion SeldNet ##\n########################\n\n\nclass QSeldnet_augmented(nn.Module):\n def __init__(self, time_dim, freq_dim=256, input_channels=4, output_classes=14,\n pool_size=[[8,2],[8,2],[2,2],[1,1]], cnn_filters=[64,128,256,512], pool_time=True,\n rnn_size=256, n_rnn=3, fc_size=1024, dropout_perc=0.3, class_overlaps=3.,\n verbose=False):\n super(QSeldnet_augmented, self).__init__()\n self.verbose = verbose\n self.time_dim = time_dim\n self.freq_dim = freq_dim\n sed_output_size = output_classes * class_overlaps #here 3 is the max number of simultaneus sounds from the same class\n doa_output_size = sed_output_size * 3 #here 3 is the number of spatial dimensions xyz\n if pool_time:\n self.time_pooled_size = int(time_dim / np.prod(np.array(pool_size), axis=0)[-1])\n else:\n self.time_pooled_size = time_dim\n #building CNN feature extractor\n conv_layers = []\n in_chans = input_channels\n for i, (p,c) in enumerate(zip(pool_size, cnn_filters)):\n curr_chans = c\n\n if pool_time:\n pool = [p[0],p[1]]\n else:\n pool = [p[0],1]\n conv_layers.append(\n nn.Sequential(\n QuaternionConv(in_chans, out_channels=curr_chans,\n kernel_size=3, stride=1, padding=1), #padding 1 = same with kernel = 3\n nn.BatchNorm2d(c),\n nn.ReLU(),\n nn.MaxPool2d(pool),\n nn.Dropout(dropout_perc)))\n in_chans = curr_chans\n\n self.cnn = nn.Sequential(*conv_layers)\n\n self.rnn = nn.GRU(1024, rnn_size, num_layers=n_rnn, batch_first=True,\n bidirectional=True, dropout=dropout_perc)\n\n self.sed = nn.Sequential(\n# QuaternionLinear(rnn_size*2, fc_size),\n nn.Linear(rnn_size*2, fc_size),\n nn.ReLU(),\n# QuaternionLinear(fc_size, fc_size),\n nn.Linear(fc_size, fc_size),\n nn.ReLU(),\n# QuaternionLinear(fc_size, fc_size),\n nn.Linear(fc_size, fc_size),\n nn.ReLU(),\n nn.Dropout(dropout_perc),\n nn.Linear(fc_size, sed_output_size),\n nn.Sigmoid())\n\n # self.doa = nn.Sequential(\n # nn.Linear(rnn_size*2, fc_size),\n # nn.ReLU(),\n # nn.Linear(fc_size, fc_size),\n # nn.ReLU(),\n # nn.Linear(fc_size, fc_size),\n # nn.ReLU(),\n # nn.Dropout(dropout_perc),\n # nn.Linear(fc_size, doa_output_size),\n # nn.Tanh())\n\n def forward(self, x):\n x = self.cnn(x)\n if self.verbose:\n print ('cnn out ', x.shape) #target dim: [batch, n_cnn_filters, 2, time_frames]\n x = x.permute(0,3,1,2) #[batch, time, channels, freq]\n if self.verbose:\n print ('permuted: ', x.shape) #target dim: [batch, time_frames, n_cnn_filters, 2]\n x = x.reshape(x.shape[0], self.time_pooled_size, -1)\n if self.verbose:\n print ('reshaped: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n x, h = self.rnn(x)\n if self.verbose:\n print ('rnn out: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n sed = self.sed(x)\n # doa = self.doa(x)\n if self.verbose:\n print ('sed prediction: ', sed.shape) #target dim: [batch, time, sed_output_size]\n # print ('doa prediction: ', doa.shape) #target dim: [batch, time, doa_output_size]\n\n return sed\n\n\n#################\n## PHM SeldNet ##\n#################\n\n\nclass PHMSeldnet_augmented(nn.Module):\n def __init__(self, time_dim, freq_dim=256, input_channels=4, output_classes=14,\n pool_size=[[8,2],[8,2],[2,2],[1,1]], cnn_filters=[64,128,256,512], pool_time=True,\n rnn_size=256, n_rnn=3, fc_size=1024, dropout_perc=0.3, class_overlaps=3.,\n verbose=False, n=2):\n super(PHMSeldnet_augmented, self).__init__()\n self.verbose = verbose\n self.time_dim = time_dim\n self.freq_dim = freq_dim\n sed_output_size = output_classes * class_overlaps #here 3 is the max number of simultaneus sounds from the same class\n doa_output_size = sed_output_size * 3 #here 3 is the number of spatial dimensions xyz\n if pool_time:\n self.time_pooled_size = int(time_dim / np.prod(np.array(pool_size), axis=0)[-1])\n else:\n self.time_pooled_size = time_dim\n #building CNN feature extractor\n conv_layers = []\n in_chans = input_channels\n for i, (p,c) in enumerate(zip(pool_size, cnn_filters)):\n curr_chans = c\n\n if pool_time:\n pool = [p[0],p[1]]\n else:\n pool = [p[0],1]\n conv_layers.append(\n nn.Sequential(\n PHMConv(n, in_chans, out_features=curr_chans,\n kernel_size=3, stride=1, padding=1, cuda=False), #padding 1 = same with kernel = 3\n nn.BatchNorm2d(c),\n nn.ReLU(),\n nn.MaxPool2d(pool),\n nn.Dropout(dropout_perc)))\n in_chans = curr_chans\n\n self.cnn = nn.Sequential(*conv_layers)\n\n self.rnn = nn.GRU(1024, rnn_size, num_layers=n_rnn, batch_first=True,\n bidirectional=True, dropout=dropout_perc)\n\n self.sed = nn.Sequential(\n# PHMLinear(n, rnn_size*2, fc_size),\n nn.Linear(rnn_size*2, fc_size),\n nn.ReLU(),\n# PHMLinear(n, fc_size, fc_size),\n nn.Linear(fc_size, fc_size),\n nn.ReLU(),\n# PHMLinear(n, fc_size, fc_size),\n nn.Linear(fc_size, fc_size),\n nn.ReLU(),\n nn.Dropout(dropout_perc),\n nn.Linear(fc_size, sed_output_size),\n nn.Sigmoid())\n\n # self.doa = nn.Sequential(\n # nn.Linear(rnn_size*2, fc_size),\n # nn.ReLU(),\n # nn.Linear(fc_size, fc_size),\n # nn.ReLU(),\n # nn.Linear(fc_size, fc_size),\n # nn.ReLU(),\n # nn.Dropout(dropout_perc),\n # nn.Linear(fc_size, doa_output_size),\n # nn.Tanh())\n\n def forward(self, x):\n x = self.cnn(x)\n if self.verbose:\n print ('cnn out ', x.shape) #target dim: [batch, n_cnn_filters, 2, time_frames]\n x = x.permute(0,3,1,2) #[batch, time, channels, freq]\n if self.verbose:\n print ('permuted: ', x.shape) #target dim: [batch, time_frames, n_cnn_filters, 2]\n x = x.reshape(x.shape[0], self.time_pooled_size, -1)\n if self.verbose:\n print ('reshaped: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n x, h = self.rnn(x)\n if self.verbose:\n print ('rnn out: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n sed = self.sed(x)\n # doa = self.doa(x)\n if self.verbose:\n print ('sed prediction: ', sed.shape) #target dim: [batch, time, sed_output_size]\n # print ('doa prediction: ', doa.shape) #target dim: [batch, time, doa_output_size]\n\n return sed\n\n \n#################\n## PHM SeldNet ##\n#################\n\n\nclass Full_PHMSeldnet_augmented(nn.Module):\n def __init__(self, time_dim, freq_dim=256, input_channels=4, output_classes=14,\n pool_size=[[8,2],[8,2],[2,2],[1,1]], cnn_filters=[64,128,256,512], pool_time=True,\n rnn_size=256, n_rnn=3, fc_size=1024, dropout_perc=0.3, class_overlaps=3.,\n verbose=False, n=2):\n super(Full_PHMSeldnet_augmented, self).__init__()\n self.verbose = verbose\n self.time_dim = time_dim\n self.freq_dim = freq_dim\n sed_output_size = output_classes * class_overlaps #here 3 is the max number of simultaneus sounds from the same class\n doa_output_size = sed_output_size * 3 #here 3 is the number of spatial dimensions xyz\n if pool_time:\n self.time_pooled_size = int(time_dim / np.prod(np.array(pool_size), axis=0)[-1])\n else:\n self.time_pooled_size = time_dim\n #building CNN feature extractor\n conv_layers = []\n in_chans = input_channels\n for i, (p,c) in enumerate(zip(pool_size, cnn_filters)):\n curr_chans = c\n\n if pool_time:\n pool = [p[0],p[1]]\n else:\n pool = [p[0],1]\n conv_layers.append(\n nn.Sequential(\n PHMConv(n, in_chans, out_features=curr_chans,\n kernel_size=3, stride=1, padding=1, cuda=False), #padding 1 = same with kernel = 3\n nn.BatchNorm2d(c),\n nn.ReLU(),\n nn.MaxPool2d(pool),\n nn.Dropout(dropout_perc)))\n in_chans = curr_chans\n\n self.cnn = nn.Sequential(*conv_layers)\n\n self.rnn = nn.GRU(1024, rnn_size, num_layers=n_rnn, batch_first=True,\n bidirectional=True, dropout=dropout_perc)\n\n self.sed = nn.Sequential(\n PHMLinear(n, rnn_size*2, fc_size),\n# nn.Linear(rnn_size*2, fc_size),\n nn.ReLU(),\n PHMLinear(n, fc_size, fc_size),\n# nn.Linear(fc_size, fc_size),\n nn.ReLU(),\n PHMLinear(n, fc_size, fc_size),\n# nn.Linear(fc_size, fc_size),\n nn.ReLU(),\n nn.Dropout(dropout_perc),\n nn.Linear(fc_size, sed_output_size),\n nn.Sigmoid())\n\n # self.doa = nn.Sequential(\n # nn.Linear(rnn_size*2, fc_size),\n # nn.ReLU(),\n # nn.Linear(fc_size, fc_size),\n # nn.ReLU(),\n # nn.Linear(fc_size, fc_size),\n # nn.ReLU(),\n # nn.Dropout(dropout_perc),\n # nn.Linear(fc_size, doa_output_size),\n # nn.Tanh())\n\n def forward(self, x):\n x = self.cnn(x)\n if self.verbose:\n print ('cnn out ', x.shape) #target dim: [batch, n_cnn_filters, 2, time_frames]\n x = x.permute(0,3,1,2) #[batch, time, channels, freq]\n if self.verbose:\n print ('permuted: ', x.shape) #target dim: [batch, time_frames, n_cnn_filters, 2]\n x = x.reshape(x.shape[0], self.time_pooled_size, -1)\n if self.verbose:\n print ('reshaped: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n x, h = self.rnn(x)\n if self.verbose:\n print ('rnn out: ', x.shape) #target dim: [batch, 2*n_cnn_filters]\n sed = self.sed(x)\n # doa = self.doa(x)\n if self.verbose:\n print ('sed prediction: ', sed.shape) #target dim: [batch, time, sed_output_size]\n # print ('doa prediction: ', doa.shape) #target dim: [batch, time, doa_output_size]\n\n return sed\n\n \n\ndef count_parameters(model):\n return sum(p.numel() for p in model.cnn.parameters() if p.requires_grad)\n\n\ndef test_model():\n '''\n Test model's i/o shapes with the default prepocessing parameters\n '''\n #create dummy input spectrogram\n in_chans = 8\n sample = np.ones((in_chans,32000*60))\n nperseg = 512\n noverlap = 112\n sp = uf.spectrum_fast(sample, nperseg=nperseg, noverlap=noverlap, output_phase=False)\n sp = torch.tensor(sp.reshape(1,sp.shape[0],sp.shape[1],sp.shape[2])).float()\n\n #create model\n #the dimension of the input spectrogram and the pooling/processing dimension of the model\n #create 1 prediction (sed and doa) for each 100-milliseconds label frame\n model_vanilla = Seldnet_vanilla(sp.shape[-1],pool_time=True, class_overlaps=3,input_channels=in_chans,verbose=True)\n model_augmented = Seldnet_augmented(sp.shape[-1],pool_time=True, class_overlaps=3,input_channels=in_chans,verbose=True)\n\n qseld = QSeldnet_augmented(sp.shape[-1],pool_time=True, class_overlaps=3,input_channels=in_chans,verbose=True)\n phmseld_n2 = PHMSeldnet_augmented(sp.shape[-1],pool_time=True, class_overlaps=3,input_channels=in_chans,verbose=True, n=2)\n phmseld_n4 = PHMSeldnet_augmented(sp.shape[-1],pool_time=True, class_overlaps=3,input_channels=in_chans,verbose=True, n=4)\n phmseld_n8 = PHMSeldnet_augmented(sp.shape[-1],pool_time=True, class_overlaps=3,input_channels=in_chans,verbose=True, n=8)\n\n print ('Input shape: ', sp.shape)\n\n print ('\\nTesting Seldnet')\n sed = model_augmented(sp)\n print ('SED shape: ', sed.shape) #target shape sed=[batch,600(label frames),42] doa=[batch, 600(label frames),126\n params = count_parameters(model_augmented)\n print(\"Number of parameters\", params)\n\n # Test QSeldNet #\n sed = qseld(sp)\n print ('\\nTesting QSeldnet') \n print ('SED shape: ', sed.shape) #target shape sed=[batch,600(label frames),42] doa=[batch, 600(label frames),126\n params = count_parameters(qseld)\n print(\"Number of parameters\", params)\n\n # Test PHMSeldNet n=2 #\n sed = phmseld_n2(sp)\n print ('\\nTesting PHMSeldnet with n=2') \n print ('SED shape: ', sed.shape) #target shape sed=[batch,600(label frames),42] doa=[batch, 600(label frames),126\n params = count_parameters(phmseld_n2)\n print(\"Number of parameters\", params)\n\n # Test PHMSeldNet n=4 #\n sed = phmseld_n4(sp)\n print ('\\nTesting PHMSeldnet with n=4') \n print ('SED shape: ', sed.shape) #target shape sed=[batch,600(label frames),42] doa=[batch, 600(label frames),126\n params = count_parameters(phmseld_n4)\n print(\"Number of parameters\", params)\n\n # Test PHMSeldNet n=4 #\n sed = phmseld_n8(sp)\n print ('\\nTesting PHMSeldnet with n=8') \n print ('SED shape: ', sed.shape) #target shape sed=[batch,600(label frames),42] doa=[batch, 600(label frames),126\n params = count_parameters(phmseld_n8)\n print(\"Number of parameters\", params)\n\n\n # print ('\\nTesting Seldnet augmented')\n # print ('Input shape: ', sp.shape)\n # sed, doa = model_vanilla(sp)\n # print ('SED shape: ', sed.shape, \"| DOA shape: \", doa.shape) #target shape sed=[batch,600(label frames),42] doa=[batch, 600(label frames),126\n\nif __name__ == '__main__':\n test_model()\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "numpy.array", "torch.nn.GRU", "torch.nn.Sigmoid", "torch.nn.Sequential", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "numpy.ones", "torch.nn.ReLU", "torch.nn.Conv2d" ] ]
fabianrost84/pandas
[ "a5d251de3af3cf07dfec39baa343633a9989c1d5" ]
[ "pandas/core/internals/blocks.py" ]
[ "# -*- coding: utf-8 -*-\nfrom datetime import date, datetime, timedelta\nimport functools\nimport inspect\nimport re\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import internals as libinternals, lib, tslib, tslibs\nfrom pandas._libs.tslibs import Timedelta, conversion, is_null_datetimelike\nimport pandas.compat as compat\nfrom pandas.compat import range, zip\nfrom pandas.util._validators import validate_bool_kwarg\n\nfrom pandas.core.dtypes.cast import (\n astype_nansafe, find_common_type, infer_dtype_from,\n infer_dtype_from_scalar, maybe_convert_objects, maybe_downcast_to_dtype,\n maybe_infer_dtype_type, maybe_promote, maybe_upcast, soft_convert_objects)\nfrom pandas.core.dtypes.common import (\n _NS_DTYPE, _TD_DTYPE, ensure_platform_int, is_bool_dtype, is_categorical,\n is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,\n is_dtype_equal, is_extension_array_dtype, is_extension_type,\n is_float_dtype, is_integer, is_integer_dtype, is_interval_dtype,\n is_list_like, is_numeric_v_string_like, is_object_dtype, is_period_dtype,\n is_re, is_re_compilable, is_sparse, is_timedelta64_dtype, pandas_dtype)\nimport pandas.core.dtypes.concat as _concat\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype, ExtensionDtype, PandasExtensionDtype)\nfrom pandas.core.dtypes.generic import (\n ABCDataFrame, ABCDatetimeIndex, ABCExtensionArray, ABCIndexClass,\n ABCSeries)\nfrom pandas.core.dtypes.missing import (\n _isna_compat, array_equivalent, isna, notna)\n\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import (\n Categorical, DatetimeArray, ExtensionArray, TimedeltaArray)\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexing import check_setitem_lengths\nfrom pandas.core.internals.arrays import extract_array\nimport pandas.core.missing as missing\nfrom pandas.core.nanops import nanpercentile\n\nfrom pandas.io.formats.printing import pprint_thing\n\n\nclass Block(PandasObject):\n \"\"\"\n Canonical n-dimensional unit of homogeneous dtype contained in a pandas\n data structure\n\n Index-ignorant; let the container take care of that\n \"\"\"\n __slots__ = ['_mgr_locs', 'values', 'ndim']\n is_numeric = False\n is_float = False\n is_integer = False\n is_complex = False\n is_datetime = False\n is_datetimetz = False\n is_timedelta = False\n is_bool = False\n is_object = False\n is_categorical = False\n is_sparse = False\n is_extension = False\n _box_to_block_values = True\n _can_hold_na = False\n _can_consolidate = True\n _verify_integrity = True\n _validate_ndim = True\n _ftype = 'dense'\n _concatenator = staticmethod(np.concatenate)\n\n def __init__(self, values, placement, ndim=None):\n self.ndim = self._check_ndim(values, ndim)\n self.mgr_locs = placement\n self.values = values\n\n if (self._validate_ndim and self.ndim and\n len(self.mgr_locs) != len(self.values)):\n raise ValueError(\n 'Wrong number of items passed {val}, placement implies '\n '{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs)))\n\n def _check_ndim(self, values, ndim):\n \"\"\"\n ndim inference and validation.\n\n Infers ndim from 'values' if not provided to __init__.\n Validates that values.ndim and ndim are consistent if and only if\n the class variable '_validate_ndim' is True.\n\n Parameters\n ----------\n values : array-like\n ndim : int or None\n\n Returns\n -------\n ndim : int\n\n Raises\n ------\n ValueError : the number of dimensions do not match\n \"\"\"\n if ndim is None:\n ndim = values.ndim\n\n if self._validate_ndim and values.ndim != ndim:\n msg = (\"Wrong number of dimensions. values.ndim != ndim \"\n \"[{} != {}]\")\n raise ValueError(msg.format(values.ndim, ndim))\n\n return ndim\n\n @property\n def _holder(self):\n \"\"\"The array-like that can hold the underlying values.\n\n None for 'Block', overridden by subclasses that don't\n use an ndarray.\n \"\"\"\n return None\n\n @property\n def _consolidate_key(self):\n return (self._can_consolidate, self.dtype.name)\n\n @property\n def _is_single_block(self):\n return self.ndim == 1\n\n @property\n def is_view(self):\n \"\"\" return a boolean if I am possibly a view \"\"\"\n return self.values.base is not None\n\n @property\n def is_datelike(self):\n \"\"\" return True if I am a non-datelike \"\"\"\n return self.is_datetime or self.is_timedelta\n\n def is_categorical_astype(self, dtype):\n \"\"\"\n validate that we have a astypeable to categorical,\n returns a boolean if we are a categorical\n \"\"\"\n if dtype is Categorical or dtype is CategoricalDtype:\n # this is a pd.Categorical, but is not\n # a valid type for astypeing\n raise TypeError(\"invalid type {0} for astype\".format(dtype))\n\n elif is_categorical_dtype(dtype):\n return True\n\n return False\n\n def external_values(self, dtype=None):\n \"\"\" return an outside world format, currently just the ndarray \"\"\"\n return self.values\n\n def internal_values(self, dtype=None):\n \"\"\" return an internal format, currently just the ndarray\n this should be the pure internal API format\n \"\"\"\n return self.values\n\n def formatting_values(self):\n \"\"\"Return the internal values used by the DataFrame/SeriesFormatter\"\"\"\n return self.internal_values()\n\n def get_values(self, dtype=None):\n \"\"\"\n return an internal format, currently just the ndarray\n this is often overridden to handle to_dense like operations\n \"\"\"\n if is_object_dtype(dtype):\n return self.values.astype(object)\n return self.values\n\n def to_dense(self):\n return self.values.view()\n\n @property\n def _na_value(self):\n return np.nan\n\n @property\n def fill_value(self):\n return np.nan\n\n @property\n def mgr_locs(self):\n return self._mgr_locs\n\n @mgr_locs.setter\n def mgr_locs(self, new_mgr_locs):\n if not isinstance(new_mgr_locs, libinternals.BlockPlacement):\n new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs)\n\n self._mgr_locs = new_mgr_locs\n\n @property\n def array_dtype(self):\n \"\"\" the dtype to return if I want to construct this block as an\n array\n \"\"\"\n return self.dtype\n\n def make_block(self, values, placement=None, ndim=None):\n \"\"\"\n Create a new block, with type inference propagate any values that are\n not specified\n \"\"\"\n if placement is None:\n placement = self.mgr_locs\n if ndim is None:\n ndim = self.ndim\n\n return make_block(values, placement=placement, ndim=ndim)\n\n def make_block_same_class(self, values, placement=None, ndim=None,\n dtype=None):\n \"\"\" Wrap given values in a block of same type as self. \"\"\"\n if dtype is not None:\n # issue 19431 fastparquet is passing this\n warnings.warn(\"dtype argument is deprecated, will be removed \"\n \"in a future release.\", DeprecationWarning)\n if placement is None:\n placement = self.mgr_locs\n return make_block(values, placement=placement, ndim=ndim,\n klass=self.__class__, dtype=dtype)\n\n def __unicode__(self):\n\n # don't want to print out all of the items here\n name = pprint_thing(self.__class__.__name__)\n if self._is_single_block:\n\n result = '{name}: {len} dtype: {dtype}'.format(\n name=name, len=len(self), dtype=self.dtype)\n\n else:\n\n shape = ' x '.join(pprint_thing(s) for s in self.shape)\n result = '{name}: {index}, {shape}, dtype: {dtype}'.format(\n name=name, index=pprint_thing(self.mgr_locs.indexer),\n shape=shape, dtype=self.dtype)\n\n return result\n\n def __len__(self):\n return len(self.values)\n\n def __getstate__(self):\n return self.mgr_locs.indexer, self.values\n\n def __setstate__(self, state):\n self.mgr_locs = libinternals.BlockPlacement(state[0])\n self.values = state[1]\n self.ndim = self.values.ndim\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n return self.values[slicer]\n\n def getitem_block(self, slicer, new_mgr_locs=None):\n \"\"\"\n Perform __getitem__-like, return result as block.\n\n As of now, only supports slices that preserve dimensionality.\n \"\"\"\n if new_mgr_locs is None:\n if isinstance(slicer, tuple):\n axis0_slicer = slicer[0]\n else:\n axis0_slicer = slicer\n new_mgr_locs = self.mgr_locs[axis0_slicer]\n\n new_values = self._slice(slicer)\n\n if self._validate_ndim and new_values.ndim != self.ndim:\n raise ValueError(\"Only same dim slicing is allowed\")\n\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n @property\n def shape(self):\n return self.values.shape\n\n @property\n def dtype(self):\n return self.values.dtype\n\n @property\n def ftype(self):\n if getattr(self.values, '_pandas_ftype', False):\n dtype = self.dtype.subtype\n else:\n dtype = self.dtype\n return \"{dtype}:{ftype}\".format(dtype=dtype, ftype=self._ftype)\n\n def merge(self, other):\n return _merge_blocks([self, other])\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n \"\"\"\n values = self._concatenator([blk.values for blk in to_concat],\n axis=self.ndim - 1)\n return self.make_block_same_class(\n values, placement=placement or slice(0, len(values), 1))\n\n def iget(self, i):\n return self.values[i]\n\n def set(self, locs, values):\n \"\"\"\n Modify Block in-place with new item value\n\n Returns\n -------\n None\n \"\"\"\n self.values[locs] = values\n\n def delete(self, loc):\n \"\"\"\n Delete given loc(-s) from block in-place.\n \"\"\"\n self.values = np.delete(self.values, loc, 0)\n self.mgr_locs = self.mgr_locs.delete(loc)\n\n def apply(self, func, **kwargs):\n \"\"\" apply the function to my values; return a block if we are not\n one\n \"\"\"\n with np.errstate(all='ignore'):\n result = func(self.values, **kwargs)\n if not isinstance(result, Block):\n result = self.make_block(values=_block_shape(result,\n ndim=self.ndim))\n\n return result\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n \"\"\" fillna on the block with the value. If we fail, then convert to\n ObjectBlock and try again\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n if not self._can_hold_na:\n if inplace:\n return self\n else:\n return self.copy()\n\n mask = isna(self.values)\n if limit is not None:\n if not is_integer(limit):\n raise ValueError('Limit must be an integer')\n if limit < 1:\n raise ValueError('Limit must be greater than 0')\n if self.ndim > 2:\n raise NotImplementedError(\"number of dimensions for 'fillna' \"\n \"is currently limited to 2\")\n mask[mask.cumsum(self.ndim - 1) > limit] = False\n\n # fillna, but if we cannot coerce, then try again as an ObjectBlock\n try:\n values, _ = self._try_coerce_args(self.values, value)\n blocks = self.putmask(mask, value, inplace=inplace)\n blocks = [b.make_block(values=self._try_coerce_result(b.values))\n for b in blocks]\n return self._maybe_downcast(blocks, downcast)\n except (TypeError, ValueError):\n\n # we can't process the value, but nothing to do\n if not mask.any():\n return self if inplace else self.copy()\n\n # operate column-by-column\n def f(m, v, i):\n block = self.coerce_to_target_dtype(value)\n\n # slice out our block\n if i is not None:\n block = block.getitem_block(slice(i, i + 1))\n return block.fillna(value,\n limit=limit,\n inplace=inplace,\n downcast=None)\n\n return self.split_and_operate(mask, f, inplace)\n\n def split_and_operate(self, mask, f, inplace):\n \"\"\"\n split the block per-column, and apply the callable f\n per-column, return a new block for each. Handle\n masking which will not change a block unless needed.\n\n Parameters\n ----------\n mask : 2-d boolean mask\n f : callable accepting (1d-mask, 1d values, indexer)\n inplace : boolean\n\n Returns\n -------\n list of blocks\n \"\"\"\n\n if mask is None:\n mask = np.ones(self.shape, dtype=bool)\n new_values = self.values\n\n def make_a_block(nv, ref_loc):\n if isinstance(nv, Block):\n block = nv\n elif isinstance(nv, list):\n block = nv[0]\n else:\n # Put back the dimension that was taken from it and make\n # a block out of the result.\n try:\n nv = _block_shape(nv, ndim=self.ndim)\n except (AttributeError, NotImplementedError):\n pass\n block = self.make_block(values=nv,\n placement=ref_loc)\n return block\n\n # ndim == 1\n if self.ndim == 1:\n if mask.any():\n nv = f(mask, new_values, None)\n else:\n nv = new_values if inplace else new_values.copy()\n block = make_a_block(nv, self.mgr_locs)\n return [block]\n\n # ndim > 1\n new_blocks = []\n for i, ref_loc in enumerate(self.mgr_locs):\n m = mask[i]\n v = new_values[i]\n\n # need a new block\n if m.any():\n nv = f(m, v, i)\n else:\n nv = v if inplace else v.copy()\n\n block = make_a_block(nv, [ref_loc])\n new_blocks.append(block)\n\n return new_blocks\n\n def _maybe_downcast(self, blocks, downcast=None):\n\n # no need to downcast our float\n # unless indicated\n if downcast is None and self.is_float:\n return blocks\n elif downcast is None and (self.is_timedelta or self.is_datetime):\n return blocks\n\n if not isinstance(blocks, list):\n blocks = [blocks]\n return _extend_blocks([b.downcast(downcast) for b in blocks])\n\n def downcast(self, dtypes=None):\n \"\"\" try to downcast each item to the dict of dtypes if present \"\"\"\n\n # turn it off completely\n if dtypes is False:\n return self\n\n values = self.values\n\n # single block handling\n if self._is_single_block:\n\n # try to cast all non-floats here\n if dtypes is None:\n dtypes = 'infer'\n\n nv = maybe_downcast_to_dtype(values, dtypes)\n return self.make_block(nv)\n\n # ndim > 1\n if dtypes is None:\n return self\n\n if not (dtypes == 'infer' or isinstance(dtypes, dict)):\n raise ValueError(\"downcast must have a dictionary or 'infer' as \"\n \"its argument\")\n\n # operate column-by-column\n # this is expensive as it splits the blocks items-by-item\n def f(m, v, i):\n\n if dtypes == 'infer':\n dtype = 'infer'\n else:\n raise AssertionError(\"dtypes as dict is not supported yet\")\n\n if dtype is not None:\n v = maybe_downcast_to_dtype(v, dtype)\n return v\n\n return self.split_and_operate(None, f, False)\n\n def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):\n return self._astype(dtype, copy=copy, errors=errors, values=values,\n **kwargs)\n\n def _astype(self, dtype, copy=False, errors='raise', values=None,\n **kwargs):\n \"\"\"Coerce to the new type\n\n Parameters\n ----------\n dtype : str, dtype convertible\n copy : boolean, default False\n copy if indicated\n errors : str, {'raise', 'ignore'}, default 'ignore'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n Returns\n -------\n Block\n \"\"\"\n errors_legal_values = ('raise', 'ignore')\n\n if errors not in errors_legal_values:\n invalid_arg = (\"Expected value of kwarg 'errors' to be one of {}. \"\n \"Supplied value is '{}'\".format(\n list(errors_legal_values), errors))\n raise ValueError(invalid_arg)\n\n if (inspect.isclass(dtype) and\n issubclass(dtype, (PandasExtensionDtype, ExtensionDtype))):\n msg = (\"Expected an instance of {}, but got the class instead. \"\n \"Try instantiating 'dtype'.\".format(dtype.__name__))\n raise TypeError(msg)\n\n # may need to convert to categorical\n if self.is_categorical_astype(dtype):\n\n # deprecated 17636\n if ('categories' in kwargs or 'ordered' in kwargs):\n if isinstance(dtype, CategoricalDtype):\n raise TypeError(\n \"Cannot specify a CategoricalDtype and also \"\n \"`categories` or `ordered`. Use \"\n \"`dtype=CategoricalDtype(categories, ordered)`\"\n \" instead.\")\n warnings.warn(\"specifying 'categories' or 'ordered' in \"\n \".astype() is deprecated; pass a \"\n \"CategoricalDtype instead\",\n FutureWarning, stacklevel=7)\n\n categories = kwargs.get('categories', None)\n ordered = kwargs.get('ordered', None)\n if com._any_not_none(categories, ordered):\n dtype = CategoricalDtype(categories, ordered)\n\n if is_categorical_dtype(self.values):\n # GH 10696/18593: update an existing categorical efficiently\n return self.make_block(self.values.astype(dtype, copy=copy))\n\n return self.make_block(Categorical(self.values, dtype=dtype))\n\n # convert dtypes if needed\n dtype = pandas_dtype(dtype)\n # astype processing\n if is_dtype_equal(self.dtype, dtype):\n if copy:\n return self.copy()\n return self\n\n klass = None\n if is_sparse(self.values):\n # special case sparse, Series[Sparse].astype(object) is sparse\n klass = ExtensionBlock\n elif is_object_dtype(dtype):\n klass = ObjectBlock\n elif is_extension_array_dtype(dtype):\n klass = ExtensionBlock\n\n try:\n # force the copy here\n if values is None:\n\n if self.is_extension:\n values = self.values.astype(dtype)\n else:\n if issubclass(dtype.type,\n (compat.text_type, compat.string_types)):\n\n # use native type formatting for datetime/tz/timedelta\n if self.is_datelike:\n values = self.to_native_types()\n\n # astype formatting\n else:\n values = self.get_values()\n\n else:\n values = self.get_values(dtype=dtype)\n\n # _astype_nansafe works fine with 1-d only\n values = astype_nansafe(values.ravel(), dtype, copy=True)\n\n # TODO(extension)\n # should we make this attribute?\n try:\n values = values.reshape(self.shape)\n except AttributeError:\n pass\n\n newb = make_block(values, placement=self.mgr_locs,\n klass=klass, ndim=self.ndim)\n except Exception: # noqa: E722\n if errors == 'raise':\n raise\n newb = self.copy() if copy else self\n\n if newb.is_numeric and self.is_numeric:\n if newb.shape != self.shape:\n raise TypeError(\n \"cannot set astype for copy = [{copy}] for dtype \"\n \"({dtype} [{shape}]) to different shape \"\n \"({newb_dtype} [{newb_shape}])\".format(\n copy=copy, dtype=self.dtype.name,\n shape=self.shape, newb_dtype=newb.dtype.name,\n newb_shape=newb.shape))\n return newb\n\n def convert(self, copy=True, **kwargs):\n \"\"\" attempt to coerce any object types to better types return a copy\n of the block (if copy = True) by definition we are not an ObjectBlock\n here!\n \"\"\"\n\n return self.copy() if copy else self\n\n def _can_hold_element(self, element):\n \"\"\" require the same dtype as ourselves \"\"\"\n dtype = self.values.dtype.type\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, dtype)\n return isinstance(element, dtype)\n\n def _try_cast_result(self, result, dtype=None):\n \"\"\" try to cast the result to our original type, we may have\n roundtripped thru object in the mean-time\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n\n if self.is_integer or self.is_bool or self.is_datetime:\n pass\n elif self.is_float and result.dtype == self.dtype:\n\n # protect against a bool/object showing up here\n if isinstance(dtype, compat.string_types) and dtype == 'infer':\n return result\n if not isinstance(dtype, type):\n dtype = dtype.type\n if issubclass(dtype, (np.bool_, np.object_)):\n if issubclass(dtype, np.bool_):\n if isna(result).all():\n return result.astype(np.bool_)\n else:\n result = result.astype(np.object_)\n result[result == 1] = True\n result[result == 0] = False\n return result\n else:\n return result.astype(np.object_)\n\n return result\n\n # may need to change the dtype here\n return maybe_downcast_to_dtype(result, dtype)\n\n def _try_coerce_args(self, values, other):\n \"\"\" provide coercion to our input arguments \"\"\"\n\n if np.any(notna(other)) and not self._can_hold_element(other):\n # coercion issues\n # let higher levels handle\n raise TypeError(\"cannot convert {} to an {}\".format(\n type(other).__name__,\n type(self).__name__.lower().replace('Block', '')))\n\n return values, other\n\n def _try_coerce_result(self, result):\n \"\"\" reverse of try_coerce_args \"\"\"\n return result\n\n def _try_coerce_and_cast_result(self, result, dtype=None):\n result = self._try_coerce_result(result)\n result = self._try_cast_result(result, dtype=dtype)\n return result\n\n def to_native_types(self, slicer=None, na_rep='nan', quoting=None,\n **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.get_values()\n\n if slicer is not None:\n values = values[:, slicer]\n mask = isna(values)\n\n if not self.is_object and not quoting:\n values = values.astype(str)\n else:\n values = np.array(values, dtype='object')\n\n values[mask] = na_rep\n return values\n\n # block actions ####\n def copy(self, deep=True):\n \"\"\" copy constructor \"\"\"\n values = self.values\n if deep:\n values = values.copy()\n return self.make_block_same_class(values)\n\n def replace(self, to_replace, value, inplace=False, filter=None,\n regex=False, convert=True):\n \"\"\"replace the to_replace value with value, possible to create new\n blocks here this is just a call to putmask. regex is not used here.\n It is used in ObjectBlocks. It is here for API compatibility.\n \"\"\"\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n original_to_replace = to_replace\n\n # try to replace, if we raise an error, convert to ObjectBlock and\n # retry\n try:\n values, to_replace = self._try_coerce_args(self.values,\n to_replace)\n mask = missing.mask_missing(values, to_replace)\n if filter is not None:\n filtered_out = ~self.mgr_locs.isin(filter)\n mask[filtered_out.nonzero()[0]] = False\n\n blocks = self.putmask(mask, value, inplace=inplace)\n if convert:\n blocks = [b.convert(by_item=True, numeric=False,\n copy=not inplace) for b in blocks]\n return blocks\n except (TypeError, ValueError):\n # GH 22083, TypeError or ValueError occurred within error handling\n # causes infinite loop. Cast and retry only if not objectblock.\n if is_object_dtype(self):\n raise\n\n # try again with a compatible block\n block = self.astype(object)\n return block.replace(to_replace=original_to_replace,\n value=value,\n inplace=inplace,\n filter=filter,\n regex=regex,\n convert=convert)\n\n def _replace_single(self, *args, **kwargs):\n \"\"\" no-op on a non-ObjectBlock \"\"\"\n return self if kwargs['inplace'] else self.copy()\n\n def setitem(self, indexer, value):\n \"\"\"Set the value inplace, returning a a maybe different typed block.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n # coerce None values, if appropriate\n if value is None:\n if self.is_numeric:\n value = np.nan\n\n # coerce if block dtype can store value\n values = self.values\n try:\n values, value = self._try_coerce_args(values, value)\n # can keep its own dtype\n if hasattr(value, 'dtype') and is_dtype_equal(values.dtype,\n value.dtype):\n dtype = self.dtype\n else:\n dtype = 'infer'\n\n except (TypeError, ValueError):\n # current dtype cannot store value, coerce to common dtype\n find_dtype = False\n\n if hasattr(value, 'dtype'):\n dtype = value.dtype\n find_dtype = True\n\n elif lib.is_scalar(value):\n if isna(value):\n # NaN promotion is handled in latter path\n dtype = False\n else:\n dtype, _ = infer_dtype_from_scalar(value,\n pandas_dtype=True)\n find_dtype = True\n else:\n dtype = 'infer'\n\n if find_dtype:\n dtype = find_common_type([values.dtype, dtype])\n if not is_dtype_equal(self.dtype, dtype):\n b = self.astype(dtype)\n return b.setitem(indexer, value)\n\n # value must be storeable at this moment\n arr_value = np.array(value)\n\n # cast the values to a type that can hold nan (if necessary)\n if not self._can_hold_element(value):\n dtype, _ = maybe_promote(arr_value.dtype)\n values = values.astype(dtype)\n\n transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)\n values = transf(values)\n\n # length checking\n check_setitem_lengths(indexer, value, values)\n\n def _is_scalar_indexer(indexer):\n # return True if we are all scalar indexers\n\n if arr_value.ndim == 1:\n if not isinstance(indexer, tuple):\n indexer = tuple([indexer])\n return any(isinstance(idx, np.ndarray) and len(idx) == 0\n for idx in indexer)\n return False\n\n def _is_empty_indexer(indexer):\n # return a boolean if we have an empty indexer\n\n if is_list_like(indexer) and not len(indexer):\n return True\n if arr_value.ndim == 1:\n if not isinstance(indexer, tuple):\n indexer = tuple([indexer])\n return any(isinstance(idx, np.ndarray) and len(idx) == 0\n for idx in indexer)\n return False\n\n # empty indexers\n # 8669 (empty)\n if _is_empty_indexer(indexer):\n pass\n\n # setting a single element for each dim and with a rhs that could\n # be say a list\n # GH 6043\n elif _is_scalar_indexer(indexer):\n values[indexer] = value\n\n # if we are an exact match (ex-broadcasting),\n # then use the resultant dtype\n elif (len(arr_value.shape) and\n arr_value.shape[0] == values.shape[0] and\n np.prod(arr_value.shape) == np.prod(values.shape)):\n values[indexer] = value\n try:\n values = values.astype(arr_value.dtype)\n except ValueError:\n pass\n\n # set\n else:\n values[indexer] = value\n\n # coerce and try to infer the dtypes of the result\n values = self._try_coerce_and_cast_result(values, dtype)\n block = self.make_block(transf(values))\n return block\n\n def putmask(self, mask, new, align=True, inplace=False, axis=0,\n transpose=False):\n \"\"\" putmask the data to the block; it is possible that we may create a\n new dtype of block\n\n return the resulting block(s)\n\n Parameters\n ----------\n mask : the condition to respect\n new : a ndarray/object\n align : boolean, perform alignment on other/cond, default is True\n inplace : perform inplace modification, default is False\n axis : int\n transpose : boolean\n Set to True if self is stored with axes reversed\n\n Returns\n -------\n a list of new blocks, the result of the putmask\n \"\"\"\n\n new_values = self.values if inplace else self.values.copy()\n\n new = getattr(new, 'values', new)\n mask = getattr(mask, 'values', mask)\n\n # if we are passed a scalar None, convert it here\n if not is_list_like(new) and isna(new) and not self.is_object:\n new = self.fill_value\n\n if self._can_hold_element(new):\n _, new = self._try_coerce_args(new_values, new)\n\n if transpose:\n new_values = new_values.T\n\n # If the default repeat behavior in np.putmask would go in the\n # wrong direction, then explicitly repeat and reshape new instead\n if getattr(new, 'ndim', 0) >= 1:\n if self.ndim - 1 == new.ndim and axis == 1:\n new = np.repeat(\n new, new_values.shape[-1]).reshape(self.shape)\n new = new.astype(new_values.dtype)\n\n # we require exact matches between the len of the\n # values we are setting (or is compat). np.putmask\n # doesn't check this and will simply truncate / pad\n # the output, but we want sane error messages\n #\n # TODO: this prob needs some better checking\n # for 2D cases\n if ((is_list_like(new) and\n np.any(mask[mask]) and\n getattr(new, 'ndim', 1) == 1)):\n\n if not (mask.shape[-1] == len(new) or\n mask[mask].shape[-1] == len(new) or\n len(new) == 1):\n raise ValueError(\"cannot assign mismatch \"\n \"length to masked array\")\n\n np.putmask(new_values, mask, new)\n\n # maybe upcast me\n elif mask.any():\n if transpose:\n mask = mask.T\n if isinstance(new, np.ndarray):\n new = new.T\n axis = new_values.ndim - axis - 1\n\n # Pseudo-broadcast\n if getattr(new, 'ndim', 0) >= 1:\n if self.ndim - 1 == new.ndim:\n new_shape = list(new.shape)\n new_shape.insert(axis, 1)\n new = new.reshape(tuple(new_shape))\n\n # operate column-by-column\n def f(m, v, i):\n\n if i is None:\n # ndim==1 case.\n n = new\n else:\n\n if isinstance(new, np.ndarray):\n n = np.squeeze(new[i % new.shape[0]])\n else:\n n = np.array(new)\n\n # type of the new block\n dtype, _ = maybe_promote(n.dtype)\n\n # we need to explicitly astype here to make a copy\n n = n.astype(dtype)\n\n nv = _putmask_smart(v, m, n)\n return nv\n\n new_blocks = self.split_and_operate(mask, f, inplace)\n return new_blocks\n\n if inplace:\n return [self]\n\n if transpose:\n new_values = new_values.T\n\n return [self.make_block(new_values)]\n\n def coerce_to_target_dtype(self, other):\n \"\"\"\n coerce the current block to a dtype compat for other\n we will return a block, possibly object, and not raise\n\n we can also safely try to coerce to the same dtype\n and will receive the same block\n \"\"\"\n\n # if we cannot then coerce to object\n dtype, _ = infer_dtype_from(other, pandas_dtype=True)\n\n if is_dtype_equal(self.dtype, dtype):\n return self\n\n if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype):\n # we don't upcast to bool\n return self.astype(object)\n\n elif ((self.is_float or self.is_complex) and\n (is_integer_dtype(dtype) or is_float_dtype(dtype))):\n # don't coerce float/complex to int\n return self\n\n elif (self.is_datetime or\n is_datetime64_dtype(dtype) or\n is_datetime64tz_dtype(dtype)):\n\n # not a datetime\n if not ((is_datetime64_dtype(dtype) or\n is_datetime64tz_dtype(dtype)) and self.is_datetime):\n return self.astype(object)\n\n # don't upcast timezone with different timezone or no timezone\n mytz = getattr(self.dtype, 'tz', None)\n othertz = getattr(dtype, 'tz', None)\n\n if str(mytz) != str(othertz):\n return self.astype(object)\n\n raise AssertionError(\"possible recursion in \"\n \"coerce_to_target_dtype: {} {}\".format(\n self, other))\n\n elif (self.is_timedelta or is_timedelta64_dtype(dtype)):\n\n # not a timedelta\n if not (is_timedelta64_dtype(dtype) and self.is_timedelta):\n return self.astype(object)\n\n raise AssertionError(\"possible recursion in \"\n \"coerce_to_target_dtype: {} {}\".format(\n self, other))\n\n try:\n return self.astype(dtype)\n except (ValueError, TypeError, OverflowError):\n pass\n\n return self.astype(object)\n\n def interpolate(self, method='pad', axis=0, index=None, values=None,\n inplace=False, limit=None, limit_direction='forward',\n limit_area=None, fill_value=None, coerce=False,\n downcast=None, **kwargs):\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n def check_int_bool(self, inplace):\n # Only FloatBlocks will contain NaNs.\n # timedelta subclasses IntBlock\n if (self.is_bool or self.is_integer) and not self.is_timedelta:\n if inplace:\n return self\n else:\n return self.copy()\n\n # a fill na type method\n try:\n m = missing.clean_fill_method(method)\n except ValueError:\n m = None\n\n if m is not None:\n r = check_int_bool(self, inplace)\n if r is not None:\n return r\n return self._interpolate_with_fill(method=m, axis=axis,\n inplace=inplace, limit=limit,\n fill_value=fill_value,\n coerce=coerce,\n downcast=downcast)\n # validate the interp method\n m = missing.clean_interp_method(method, **kwargs)\n\n r = check_int_bool(self, inplace)\n if r is not None:\n return r\n return self._interpolate(method=m, index=index, values=values,\n axis=axis, limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value, inplace=inplace,\n downcast=downcast, **kwargs)\n\n def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,\n limit=None, fill_value=None, coerce=False,\n downcast=None):\n \"\"\" fillna but using the interpolate machinery \"\"\"\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n # if we are coercing, then don't force the conversion\n # if the block can't hold the type\n if coerce:\n if not self._can_hold_na:\n if inplace:\n return [self]\n else:\n return [self.copy()]\n\n values = self.values if inplace else self.values.copy()\n values, fill_value = self._try_coerce_args(values, fill_value)\n values = missing.interpolate_2d(values, method=method, axis=axis,\n limit=limit, fill_value=fill_value,\n dtype=self.dtype)\n values = self._try_coerce_result(values)\n\n blocks = [self.make_block_same_class(values, ndim=self.ndim)]\n return self._maybe_downcast(blocks, downcast)\n\n def _interpolate(self, method=None, index=None, values=None,\n fill_value=None, axis=0, limit=None,\n limit_direction='forward', limit_area=None,\n inplace=False, downcast=None, **kwargs):\n \"\"\" interpolate using scipy wrappers \"\"\"\n\n inplace = validate_bool_kwarg(inplace, 'inplace')\n data = self.values if inplace else self.values.copy()\n\n # only deal with floats\n if not self.is_float:\n if not self.is_integer:\n return self\n data = data.astype(np.float64)\n\n if fill_value is None:\n fill_value = self.fill_value\n\n if method in ('krogh', 'piecewise_polynomial', 'pchip'):\n if not index.is_monotonic:\n raise ValueError(\"{0} interpolation requires that the \"\n \"index be monotonic.\".format(method))\n # process 1-d slices in the axis direction\n\n def func(x):\n\n # process a 1-d slice, returning it\n # should the axis argument be handled below in apply_along_axis?\n # i.e. not an arg to missing.interpolate_1d\n return missing.interpolate_1d(index, x, method=method, limit=limit,\n limit_direction=limit_direction,\n limit_area=limit_area,\n fill_value=fill_value,\n bounds_error=False, **kwargs)\n\n # interp each column independently\n interp_values = np.apply_along_axis(func, axis, data)\n\n blocks = [self.make_block_same_class(interp_values)]\n return self._maybe_downcast(blocks, downcast)\n\n def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):\n \"\"\"\n Take values according to indexer and return them as a block.bb\n\n \"\"\"\n\n # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock\n # so need to preserve types\n # sparse is treated like an ndarray, but needs .get_values() shaping\n\n values = self.values\n if self.is_sparse:\n values = self.get_values()\n\n if fill_tuple is None:\n fill_value = self.fill_value\n new_values = algos.take_nd(values, indexer, axis=axis,\n allow_fill=False, fill_value=fill_value)\n else:\n fill_value = fill_tuple[0]\n new_values = algos.take_nd(values, indexer, axis=axis,\n allow_fill=True, fill_value=fill_value)\n\n if new_mgr_locs is None:\n if axis == 0:\n slc = libinternals.indexer_as_slice(indexer)\n if slc is not None:\n new_mgr_locs = self.mgr_locs[slc]\n else:\n new_mgr_locs = self.mgr_locs[indexer]\n else:\n new_mgr_locs = self.mgr_locs\n\n if not is_dtype_equal(new_values.dtype, self.dtype):\n return self.make_block(new_values, new_mgr_locs)\n else:\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def diff(self, n, axis=1):\n \"\"\" return block for the diff of the values \"\"\"\n new_values = algos.diff(self.values, n, axis=axis)\n return [self.make_block(values=new_values)]\n\n def shift(self, periods, axis=0, fill_value=None):\n \"\"\" shift the block by periods, possibly upcast \"\"\"\n\n # convert integer to float if necessary. need to do a lot more than\n # that, handle boolean etc also\n new_values, fill_value = maybe_upcast(self.values, fill_value)\n\n # make sure array sent to np.roll is c_contiguous\n f_ordered = new_values.flags.f_contiguous\n if f_ordered:\n new_values = new_values.T\n axis = new_values.ndim - axis - 1\n\n if np.prod(new_values.shape):\n new_values = np.roll(new_values, ensure_platform_int(periods),\n axis=axis)\n\n axis_indexer = [slice(None)] * self.ndim\n if periods > 0:\n axis_indexer[axis] = slice(None, periods)\n else:\n axis_indexer[axis] = slice(periods, None)\n new_values[tuple(axis_indexer)] = fill_value\n\n # restore original order\n if f_ordered:\n new_values = new_values.T\n\n return [self.make_block(new_values)]\n\n def where(self, other, cond, align=True, errors='raise',\n try_cast=False, axis=0, transpose=False):\n \"\"\"\n evaluate the block; return result block(s) from the result\n\n Parameters\n ----------\n other : a ndarray/object\n cond : the condition to respect\n align : boolean, perform alignment on other/cond\n errors : str, {'raise', 'ignore'}, default 'raise'\n - ``raise`` : allow exceptions to be raised\n - ``ignore`` : suppress exceptions. On error return original object\n\n axis : int\n transpose : boolean\n Set to True if self is stored with axes reversed\n\n Returns\n -------\n a new block(s), the result of the func\n \"\"\"\n import pandas.core.computation.expressions as expressions\n assert errors in ['raise', 'ignore']\n\n values = self.values\n orig_other = other\n if transpose:\n values = values.T\n\n other = getattr(other, '_values', getattr(other, 'values', other))\n cond = getattr(cond, 'values', cond)\n\n # If the default broadcasting would go in the wrong direction, then\n # explicitly reshape other instead\n if getattr(other, 'ndim', 0) >= 1:\n if values.ndim - 1 == other.ndim and axis == 1:\n other = other.reshape(tuple(other.shape + (1, )))\n elif transpose and values.ndim == self.ndim - 1:\n cond = cond.T\n\n if not hasattr(cond, 'shape'):\n raise ValueError(\"where must have a condition that is ndarray \"\n \"like\")\n\n # our where function\n def func(cond, values, other):\n if cond.ravel().all():\n return values\n\n values, other = self._try_coerce_args(values, other)\n\n try:\n return self._try_coerce_result(expressions.where(\n cond, values, other))\n except Exception as detail:\n if errors == 'raise':\n raise TypeError(\n 'Could not operate [{other!r}] with block values '\n '[{detail!s}]'.format(other=other, detail=detail))\n else:\n # return the values\n result = np.empty(values.shape, dtype='float64')\n result.fill(np.nan)\n return result\n\n # see if we can operate on the entire block, or need item-by-item\n # or if we are a single block (ndim == 1)\n try:\n result = func(cond, values, other)\n except TypeError:\n\n # we cannot coerce, return a compat dtype\n # we are explicitly ignoring errors\n block = self.coerce_to_target_dtype(other)\n blocks = block.where(orig_other, cond, align=align,\n errors=errors,\n try_cast=try_cast, axis=axis,\n transpose=transpose)\n return self._maybe_downcast(blocks, 'infer')\n\n if self._can_hold_na or self.ndim == 1:\n\n if transpose:\n result = result.T\n\n # try to cast if requested\n if try_cast:\n result = self._try_cast_result(result)\n\n return self.make_block(result)\n\n # might need to separate out blocks\n axis = cond.ndim - 1\n cond = cond.swapaxes(axis, 0)\n mask = np.array([cond[i].all() for i in range(cond.shape[0])],\n dtype=bool)\n\n result_blocks = []\n for m in [mask, ~mask]:\n if m.any():\n r = self._try_cast_result(result.take(m.nonzero()[0],\n axis=axis))\n result_blocks.append(\n self.make_block(r.T, placement=self.mgr_locs[m]))\n\n return result_blocks\n\n def equals(self, other):\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n return array_equivalent(self.values, other.values)\n\n def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):\n \"\"\"Return a list of unstacked blocks of self\n\n Parameters\n ----------\n unstacker_func : callable\n Partially applied unstacker.\n new_columns : Index\n All columns of the unstacked BlockManager.\n n_rows : int\n Only used in ExtensionBlock.unstack\n fill_value : int\n Only used in ExtensionBlock.unstack\n\n Returns\n -------\n blocks : list of Block\n New blocks of unstacked values.\n mask : array_like of bool\n The mask of columns of `blocks` we should keep.\n \"\"\"\n unstacker = unstacker_func(self.values.T)\n new_items = unstacker.get_new_columns()\n new_placement = new_columns.get_indexer(new_items)\n new_values, mask = unstacker.get_new_values()\n\n mask = mask.any(0)\n new_values = new_values.T[mask]\n new_placement = new_placement[mask]\n\n blocks = [make_block(new_values, placement=new_placement)]\n return blocks, mask\n\n def quantile(self, qs, interpolation='linear', axis=0):\n \"\"\"\n compute the quantiles of the\n\n Parameters\n ----------\n qs: a scalar or list of the quantiles to be computed\n interpolation: type of interpolation, default 'linear'\n axis: axis to compute, default 0\n\n Returns\n -------\n Block\n \"\"\"\n if self.is_datetimetz:\n # TODO: cleanup this special case.\n # We need to operate on i8 values for datetimetz\n # but `Block.get_values()` returns an ndarray of objects\n # right now. We need an API for \"values to do numeric-like ops on\"\n values = self.values.asi8\n\n # TODO: NonConsolidatableMixin shape\n # Usual shape inconsistencies for ExtensionBlocks\n if self.ndim > 1:\n values = values[None, :]\n else:\n values = self.get_values()\n values, _ = self._try_coerce_args(values, values)\n\n is_empty = values.shape[axis] == 0\n orig_scalar = not is_list_like(qs)\n if orig_scalar:\n # make list-like, unpack later\n qs = [qs]\n\n if is_empty:\n if self.ndim == 1:\n result = self._na_value\n else:\n # create the array of na_values\n # 2d len(values) * len(qs)\n result = np.repeat(np.array([self.fill_value] * len(qs)),\n len(values)).reshape(len(values),\n len(qs))\n else:\n # asarray needed for Sparse, see GH#24600\n # TODO: Why self.values and not values?\n mask = np.asarray(isna(self.values))\n result = nanpercentile(values, np.array(qs) * 100,\n axis=axis, na_value=self.fill_value,\n mask=mask, ndim=self.ndim,\n interpolation=interpolation)\n\n result = np.array(result, copy=False)\n if self.ndim > 1:\n result = result.T\n\n if orig_scalar and not lib.is_scalar(result):\n # result could be scalar in case with is_empty and self.ndim == 1\n assert result.shape[-1] == 1, result.shape\n result = result[..., 0]\n result = lib.item_from_zerodim(result)\n\n ndim = getattr(result, 'ndim', None) or 0\n result = self._try_coerce_result(result)\n return make_block(result,\n placement=np.arange(len(result)),\n ndim=ndim)\n\n def _replace_coerce(self, to_replace, value, inplace=True, regex=False,\n convert=False, mask=None):\n \"\"\"\n Replace value corresponding to the given boolean array with another\n value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n A new block if there is anything to replace or the original block.\n \"\"\"\n\n if mask.any():\n if not regex:\n self = self.coerce_to_target_dtype(value)\n return self.putmask(mask, value, inplace=inplace)\n else:\n return self._replace_single(to_replace, value, inplace=inplace,\n regex=regex,\n convert=convert,\n mask=mask)\n return self\n\n\nclass NonConsolidatableMixIn(object):\n \"\"\" hold methods for the nonconsolidatable blocks \"\"\"\n _can_consolidate = False\n _verify_integrity = False\n _validate_ndim = False\n\n def __init__(self, values, placement, ndim=None):\n \"\"\"Initialize a non-consolidatable block.\n\n 'ndim' may be inferred from 'placement'.\n\n This will call continue to call __init__ for the other base\n classes mixed in with this Mixin.\n \"\"\"\n # Placement must be converted to BlockPlacement so that we can check\n # its length\n if not isinstance(placement, libinternals.BlockPlacement):\n placement = libinternals.BlockPlacement(placement)\n\n # Maybe infer ndim from placement\n if ndim is None:\n if len(placement) != 1:\n ndim = 1\n else:\n ndim = 2\n super(NonConsolidatableMixIn, self).__init__(values, placement,\n ndim=ndim)\n\n @property\n def shape(self):\n if self.ndim == 1:\n return (len(self.values)),\n return (len(self.mgr_locs), len(self.values))\n\n def iget(self, col):\n\n if self.ndim == 2 and isinstance(col, tuple):\n col, loc = col\n if not com.is_null_slice(col) and col != 0:\n raise IndexError(\"{0} only contains one item\".format(self))\n return self.values[loc]\n else:\n if col != 0:\n raise IndexError(\"{0} only contains one item\".format(self))\n return self.values\n\n def should_store(self, value):\n return isinstance(value, self._holder)\n\n def set(self, locs, values, check=False):\n assert locs.tolist() == [0]\n self.values = values\n\n def putmask(self, mask, new, align=True, inplace=False, axis=0,\n transpose=False):\n \"\"\"\n putmask the data to the block; we must be a single block and not\n generate other blocks\n\n return the resulting block\n\n Parameters\n ----------\n mask : the condition to respect\n new : a ndarray/object\n align : boolean, perform alignment on other/cond, default is True\n inplace : perform inplace modification, default is False\n\n Returns\n -------\n a new block, the result of the putmask\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n # use block's copy logic.\n # .values may be an Index which does shallow copy by default\n new_values = self.values if inplace else self.copy().values\n new_values, new = self._try_coerce_args(new_values, new)\n\n if isinstance(new, np.ndarray) and len(new) == len(mask):\n new = new[mask]\n\n mask = _safe_reshape(mask, new_values.shape)\n\n new_values[mask] = new\n new_values = self._try_coerce_result(new_values)\n return [self.make_block(values=new_values)]\n\n def _try_cast_result(self, result, dtype=None):\n return result\n\n def _get_unstack_items(self, unstacker, new_columns):\n \"\"\"\n Get the placement, values, and mask for a Block unstack.\n\n This is shared between ObjectBlock and ExtensionBlock. They\n differ in that ObjectBlock passes the values, while ExtensionBlock\n passes the dummy ndarray of positions to be used by a take\n later.\n\n Parameters\n ----------\n unstacker : pandas.core.reshape.reshape._Unstacker\n new_columns : Index\n All columns of the unstacked BlockManager.\n\n Returns\n -------\n new_placement : ndarray[int]\n The placement of the new columns in `new_columns`.\n new_values : Union[ndarray, ExtensionArray]\n The first return value from _Unstacker.get_new_values.\n mask : ndarray[bool]\n The second return value from _Unstacker.get_new_values.\n \"\"\"\n # shared with ExtensionBlock\n new_items = unstacker.get_new_columns()\n new_placement = new_columns.get_indexer(new_items)\n new_values, mask = unstacker.get_new_values()\n\n mask = mask.any(0)\n return new_placement, new_values, mask\n\n\nclass ExtensionBlock(NonConsolidatableMixIn, Block):\n \"\"\"Block for holding extension types.\n\n Notes\n -----\n This holds all 3rd-party extension array types. It's also the immediate\n parent class for our internal extension types' blocks, CategoricalBlock.\n\n ExtensionArrays are limited to 1-D.\n \"\"\"\n is_extension = True\n\n def __init__(self, values, placement, ndim=None):\n values = self._maybe_coerce_values(values)\n super(ExtensionBlock, self).__init__(values, placement, ndim)\n\n def _maybe_coerce_values(self, values):\n \"\"\"Unbox to an extension array.\n\n This will unbox an ExtensionArray stored in an Index or Series.\n ExtensionArrays pass through. No dtype coercion is done.\n\n Parameters\n ----------\n values : Index, Series, ExtensionArray\n\n Returns\n -------\n ExtensionArray\n \"\"\"\n if isinstance(values, (ABCIndexClass, ABCSeries)):\n values = values._values\n return values\n\n @property\n def _holder(self):\n # For extension blocks, the holder is values-dependent.\n return type(self.values)\n\n @property\n def fill_value(self):\n # Used in reindex_indexer\n return self.values.dtype.na_value\n\n @property\n def _can_hold_na(self):\n # The default ExtensionArray._can_hold_na is True\n return self._holder._can_hold_na\n\n @property\n def is_view(self):\n \"\"\"Extension arrays are never treated as views.\"\"\"\n return False\n\n @property\n def is_numeric(self):\n return self.values.dtype._is_numeric\n\n def setitem(self, indexer, value):\n \"\"\"Set the value inplace, returning a same-typed block.\n\n This differs from Block.setitem by not allowing setitem to change\n the dtype of the Block.\n\n Parameters\n ----------\n indexer : tuple, list-like, array-like, slice\n The subset of self.values to set\n value : object\n The value being set\n\n Returns\n -------\n Block\n\n Notes\n -----\n `indexer` is a direct slice/positional indexer. `value` must\n be a compatible shape.\n \"\"\"\n if isinstance(indexer, tuple):\n # we are always 1-D\n indexer = indexer[0]\n\n check_setitem_lengths(indexer, value, self.values)\n self.values[indexer] = value\n return self\n\n def get_values(self, dtype=None):\n # ExtensionArrays must be iterable, so this works.\n values = np.asarray(self.values)\n if values.ndim == self.ndim - 1:\n values = values.reshape((1,) + values.shape)\n return values\n\n def to_dense(self):\n return np.asarray(self.values)\n\n def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):\n \"\"\"\n Take values according to indexer and return them as a block.\n \"\"\"\n if fill_tuple is None:\n fill_value = None\n else:\n fill_value = fill_tuple[0]\n\n # axis doesn't matter; we are really a single-dim object\n # but are passed the axis depending on the calling routing\n # if its REALLY axis 0, then this will be a reindex and not a take\n new_values = self.values.take(indexer, fill_value=fill_value,\n allow_fill=True)\n\n if self.ndim == 1 and new_mgr_locs is None:\n new_mgr_locs = [0]\n else:\n if new_mgr_locs is None:\n new_mgr_locs = self.mgr_locs\n\n return self.make_block_same_class(new_values, new_mgr_locs)\n\n def _can_hold_element(self, element):\n # XXX: We may need to think about pushing this onto the array.\n # We're doing the same as CategoricalBlock here.\n return True\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n\n # slice the category\n # return same dims as we currently have\n\n if isinstance(slicer, tuple) and len(slicer) == 2:\n if not com.is_null_slice(slicer[0]):\n raise AssertionError(\"invalid slicing for a 1-ndim \"\n \"categorical\")\n slicer = slicer[1]\n\n return self.values[slicer]\n\n def formatting_values(self):\n # Deprecating the ability to override _formatting_values.\n # Do the warning here, it's only user in pandas, since we\n # have to check if the subclass overrode it.\n fv = getattr(type(self.values), '_formatting_values', None)\n if fv and fv != ExtensionArray._formatting_values:\n msg = (\n \"'ExtensionArray._formatting_values' is deprecated. \"\n \"Specify 'ExtensionArray._formatter' instead.\"\n )\n warnings.warn(msg, DeprecationWarning, stacklevel=10)\n return self.values._formatting_values()\n\n return self.values\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n \"\"\"\n values = self._holder._concat_same_type(\n [blk.values for blk in to_concat])\n placement = placement or slice(0, len(values), 1)\n return self.make_block_same_class(values, ndim=self.ndim,\n placement=placement)\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n values = self.values if inplace else self.values.copy()\n values = values.fillna(value=value, limit=limit)\n return [self.make_block_same_class(values=values,\n placement=self.mgr_locs,\n ndim=self.ndim)]\n\n def interpolate(self, method='pad', axis=0, inplace=False, limit=None,\n fill_value=None, **kwargs):\n\n values = self.values if inplace else self.values.copy()\n return self.make_block_same_class(\n values=values.fillna(value=fill_value, method=method,\n limit=limit),\n placement=self.mgr_locs)\n\n def shift(self, periods, axis=0, fill_value=None):\n # type: (int, Optional[BlockPlacement], Any) -> List[ExtensionBlock]\n \"\"\"\n Shift the block by `periods`.\n\n Dispatches to underlying ExtensionArray and re-boxes in an\n ExtensionBlock.\n \"\"\"\n return [\n self.make_block_same_class(\n self.values.shift(periods=periods, fill_value=fill_value),\n placement=self.mgr_locs, ndim=self.ndim)\n ]\n\n def where(self, other, cond, align=True, errors='raise',\n try_cast=False, axis=0, transpose=False):\n if isinstance(other, ABCDataFrame):\n # ExtensionArrays are 1-D, so if we get here then\n # `other` should be a DataFrame with a single column.\n assert other.shape[1] == 1\n other = other.iloc[:, 0]\n\n other = extract_array(other, extract_numpy=True)\n\n if isinstance(cond, ABCDataFrame):\n assert cond.shape[1] == 1\n cond = cond.iloc[:, 0]\n\n cond = extract_array(cond, extract_numpy=True)\n\n if lib.is_scalar(other) and isna(other):\n # The default `other` for Series / Frame is np.nan\n # we want to replace that with the correct NA value\n # for the type\n other = self.dtype.na_value\n\n if is_sparse(self.values):\n # TODO(SparseArray.__setitem__): remove this if condition\n # We need to re-infer the type of the data after doing the\n # where, for cases where the subtypes don't match\n dtype = None\n else:\n dtype = self.dtype\n\n try:\n result = self.values.copy()\n icond = ~cond\n if lib.is_scalar(other):\n result[icond] = other\n else:\n result[icond] = other[icond]\n except (NotImplementedError, TypeError):\n # NotImplementedError for class not implementing `__setitem__`\n # TypeError for SparseArray, which implements just to raise\n # a TypeError\n result = self._holder._from_sequence(\n np.where(cond, self.values, other),\n dtype=dtype,\n )\n\n return self.make_block_same_class(result, placement=self.mgr_locs)\n\n @property\n def _ftype(self):\n return getattr(self.values, '_pandas_ftype', Block._ftype)\n\n def _unstack(self, unstacker_func, new_columns, n_rows, fill_value):\n # ExtensionArray-safe unstack.\n # We override ObjectBlock._unstack, which unstacks directly on the\n # values of the array. For EA-backed blocks, this would require\n # converting to a 2-D ndarray of objects.\n # Instead, we unstack an ndarray of integer positions, followed by\n # a `take` on the actual values.\n dummy_arr = np.arange(n_rows)\n dummy_unstacker = functools.partial(unstacker_func, fill_value=-1)\n unstacker = dummy_unstacker(dummy_arr)\n\n new_placement, new_values, mask = self._get_unstack_items(\n unstacker, new_columns\n )\n\n blocks = [\n self.make_block_same_class(\n self.values.take(indices, allow_fill=True,\n fill_value=fill_value),\n [place])\n for indices, place in zip(new_values.T, new_placement)\n ]\n return blocks, mask\n\n\nclass ObjectValuesExtensionBlock(ExtensionBlock):\n \"\"\"\n Block providing backwards-compatibility for `.values`.\n\n Used by PeriodArray and IntervalArray to ensure that\n Series[T].values is an ndarray of objects.\n \"\"\"\n\n def external_values(self, dtype=None):\n return self.values.astype(object)\n\n\nclass NumericBlock(Block):\n __slots__ = ()\n is_numeric = True\n _can_hold_na = True\n\n\nclass FloatOrComplexBlock(NumericBlock):\n __slots__ = ()\n\n def equals(self, other):\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n left, right = self.values, other.values\n return ((left == right) | (np.isnan(left) & np.isnan(right))).all()\n\n\nclass FloatBlock(FloatOrComplexBlock):\n __slots__ = ()\n is_float = True\n\n def _can_hold_element(self, element):\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return (issubclass(tipo.type, (np.floating, np.integer)) and\n not issubclass(tipo.type, (np.datetime64, np.timedelta64)))\n return (\n isinstance(\n element, (float, int, np.floating, np.int_, compat.long))\n and not isinstance(element, (bool, np.bool_, datetime, timedelta,\n np.datetime64, np.timedelta64)))\n\n def to_native_types(self, slicer=None, na_rep='', float_format=None,\n decimal='.', quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n values = values[:, slicer]\n\n # see gh-13418: no special formatting is desired at the\n # output (important for appropriate 'quoting' behaviour),\n # so do not pass it through the FloatArrayFormatter\n if float_format is None and decimal == '.':\n mask = isna(values)\n\n if not quoting:\n values = values.astype(str)\n else:\n values = np.array(values, dtype='object')\n\n values[mask] = na_rep\n return values\n\n from pandas.io.formats.format import FloatArrayFormatter\n formatter = FloatArrayFormatter(values, na_rep=na_rep,\n float_format=float_format,\n decimal=decimal, quoting=quoting,\n fixed_width=False)\n return formatter.get_result_as_array()\n\n def should_store(self, value):\n # when inserting a column should not coerce integers to floats\n # unnecessarily\n return (issubclass(value.dtype.type, np.floating) and\n value.dtype == self.dtype)\n\n\nclass ComplexBlock(FloatOrComplexBlock):\n __slots__ = ()\n is_complex = True\n\n def _can_hold_element(self, element):\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type,\n (np.floating, np.integer, np.complexfloating))\n return (\n isinstance(\n element,\n (float, int, complex, np.float_, np.int_, compat.long))\n and not isinstance(element, (bool, np.bool_)))\n\n def should_store(self, value):\n return issubclass(value.dtype.type, np.complexfloating)\n\n\nclass IntBlock(NumericBlock):\n __slots__ = ()\n is_integer = True\n _can_hold_na = False\n\n def _can_hold_element(self, element):\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return (issubclass(tipo.type, np.integer) and\n not issubclass(tipo.type, (np.datetime64,\n np.timedelta64)) and\n self.dtype.itemsize >= tipo.itemsize)\n return is_integer(element)\n\n def should_store(self, value):\n return is_integer_dtype(value) and value.dtype == self.dtype\n\n\nclass DatetimeLikeBlockMixin(object):\n \"\"\"Mixin class for DatetimeBlock, DatetimeTZBlock, and TimedeltaBlock.\"\"\"\n\n @property\n def _holder(self):\n return DatetimeArray\n\n @property\n def _na_value(self):\n return tslibs.NaT\n\n @property\n def fill_value(self):\n return tslibs.iNaT\n\n def get_values(self, dtype=None):\n \"\"\"\n return object dtype as boxed values, such as Timestamps/Timedelta\n \"\"\"\n if is_object_dtype(dtype):\n values = self.values.ravel()\n result = self._holder(values).astype(object)\n return result.reshape(self.values.shape)\n return self.values\n\n\nclass DatetimeBlock(DatetimeLikeBlockMixin, Block):\n __slots__ = ()\n is_datetime = True\n _can_hold_na = True\n\n def __init__(self, values, placement, ndim=None):\n values = self._maybe_coerce_values(values)\n super(DatetimeBlock, self).__init__(values,\n placement=placement, ndim=ndim)\n\n def _maybe_coerce_values(self, values):\n \"\"\"Input validation for values passed to __init__. Ensure that\n we have datetime64ns, coercing if necessary.\n\n Parameters\n ----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : ndarray[datetime64ns]\n\n Overridden by DatetimeTZBlock.\n \"\"\"\n if values.dtype != _NS_DTYPE:\n values = conversion.ensure_datetime64ns(values)\n\n if isinstance(values, DatetimeArray):\n values = values._data\n\n assert isinstance(values, np.ndarray), type(values)\n return values\n\n def _astype(self, dtype, **kwargs):\n \"\"\"\n these automatically copy, so copy=True has no effect\n raise on an except if raise == True\n \"\"\"\n dtype = pandas_dtype(dtype)\n\n # if we are passed a datetime64[ns, tz]\n if is_datetime64tz_dtype(dtype):\n values = self.values\n if getattr(values, 'tz', None) is None:\n values = DatetimeIndex(values).tz_localize('UTC')\n values = values.tz_convert(dtype.tz)\n return self.make_block(values)\n\n # delegate\n return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs)\n\n def _can_hold_element(self, element):\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return tipo == _NS_DTYPE or tipo == np.int64\n return (is_integer(element) or isinstance(element, datetime) or\n isna(element))\n\n def _try_coerce_args(self, values, other):\n \"\"\"\n Coerce values and other to dtype 'i8'. NaN and NaT convert to\n the smallest i8, and will correctly round-trip to NaT if converted\n back in _try_coerce_result. values is always ndarray-like, other\n may not be\n\n Parameters\n ----------\n values : ndarray-like\n other : ndarray-like or scalar\n\n Returns\n -------\n base-type values, base-type other\n \"\"\"\n\n values = values.view('i8')\n\n if isinstance(other, bool):\n raise TypeError\n elif is_null_datetimelike(other):\n other = tslibs.iNaT\n elif isinstance(other, (datetime, np.datetime64, date)):\n other = self._box_func(other)\n if getattr(other, 'tz') is not None:\n raise TypeError(\"cannot coerce a Timestamp with a tz on a \"\n \"naive Block\")\n other = other.asm8.view('i8')\n elif hasattr(other, 'dtype') and is_datetime64_dtype(other):\n other = other.astype('i8', copy=False).view('i8')\n else:\n # coercion issues\n # let higher levels handle\n raise TypeError(other)\n\n return values, other\n\n def _try_coerce_result(self, result):\n \"\"\" reverse of try_coerce_args \"\"\"\n if isinstance(result, np.ndarray):\n if result.dtype.kind in ['i', 'f']:\n result = result.astype('M8[ns]')\n\n elif isinstance(result, (np.integer, np.float, np.datetime64)):\n result = self._box_func(result)\n return result\n\n @property\n def _box_func(self):\n return tslibs.Timestamp\n\n def to_native_types(self, slicer=None, na_rep=None, date_format=None,\n quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n i8values = self.values.view('i8')\n\n if slicer is not None:\n i8values = i8values[..., slicer]\n\n from pandas.io.formats.format import _get_format_datetime64_from_values\n fmt = _get_format_datetime64_from_values(values, date_format)\n\n result = tslib.format_array_from_datetime(\n i8values.ravel(), tz=getattr(self.values, 'tz', None),\n format=fmt, na_rep=na_rep).reshape(i8values.shape)\n return np.atleast_2d(result)\n\n def should_store(self, value):\n return (issubclass(value.dtype.type, np.datetime64) and\n not is_datetime64tz_dtype(value) and\n not is_extension_array_dtype(value))\n\n def set(self, locs, values):\n \"\"\"\n Modify Block in-place with new item value\n\n Returns\n -------\n None\n \"\"\"\n values = conversion.ensure_datetime64ns(values, copy=False)\n\n self.values[locs] = values\n\n def external_values(self):\n return np.asarray(self.values.astype('datetime64[ns]', copy=False))\n\n\nclass DatetimeTZBlock(ExtensionBlock, DatetimeBlock):\n \"\"\" implement a datetime64 block with a tz attribute \"\"\"\n __slots__ = ()\n is_datetimetz = True\n is_extension = True\n\n @property\n def _holder(self):\n return DatetimeArray\n\n def _maybe_coerce_values(self, values):\n \"\"\"Input validation for values passed to __init__. Ensure that\n we have datetime64TZ, coercing if necessary.\n\n Parametetrs\n -----------\n values : array-like\n Must be convertible to datetime64\n\n Returns\n -------\n values : DatetimeArray\n \"\"\"\n if not isinstance(values, self._holder):\n values = self._holder(values)\n\n if values.tz is None:\n raise ValueError(\"cannot create a DatetimeTZBlock without a tz\")\n\n return values\n\n @property\n def is_view(self):\n \"\"\" return a boolean if I am possibly a view \"\"\"\n # check the ndarray values of the DatetimeIndex values\n return self.values._data.base is not None\n\n def copy(self, deep=True):\n \"\"\" copy constructor \"\"\"\n values = self.values\n if deep:\n values = values.copy(deep=True)\n return self.make_block_same_class(values)\n\n def get_values(self, dtype=None):\n \"\"\"\n Returns an ndarray of values.\n\n Parameters\n ----------\n dtype : np.dtype\n Only `object`-like dtypes are respected here (not sure\n why).\n\n Returns\n -------\n values : ndarray\n When ``dtype=object``, then and object-dtype ndarray of\n boxed values is returned. Otherwise, an M8[ns] ndarray\n is returned.\n\n DatetimeArray is always 1-d. ``get_values`` will reshape\n the return value to be the same dimensionality as the\n block.\n \"\"\"\n values = self.values\n if is_object_dtype(dtype):\n values = values._box_values(values._data)\n\n values = np.asarray(values)\n\n if self.ndim == 2:\n # Ensure that our shape is correct for DataFrame.\n # ExtensionArrays are always 1-D, even in a DataFrame when\n # the analogous NumPy-backed column would be a 2-D ndarray.\n values = values.reshape(1, -1)\n return values\n\n def to_dense(self):\n # we request M8[ns] dtype here, even though it discards tzinfo,\n # as lots of code (e.g. anything using values_from_object)\n # expects that behavior.\n return np.asarray(self.values, dtype=_NS_DTYPE)\n\n def _slice(self, slicer):\n \"\"\" return a slice of my values \"\"\"\n if isinstance(slicer, tuple):\n col, loc = slicer\n if not com.is_null_slice(col) and col != 0:\n raise IndexError(\"{0} only contains one item\".format(self))\n return self.values[loc]\n return self.values[slicer]\n\n def _try_coerce_args(self, values, other):\n \"\"\"\n localize and return i8 for the values\n\n Parameters\n ----------\n values : ndarray-like\n other : ndarray-like or scalar\n\n Returns\n -------\n base-type values, base-type other\n \"\"\"\n # asi8 is a view, needs copy\n values = _block_shape(values.view(\"i8\"), ndim=self.ndim)\n\n if isinstance(other, ABCSeries):\n other = self._holder(other)\n\n if isinstance(other, bool):\n raise TypeError\n elif is_datetime64_dtype(other):\n # add the tz back\n other = self._holder(other, dtype=self.dtype)\n\n elif is_null_datetimelike(other):\n other = tslibs.iNaT\n elif isinstance(other, self._holder):\n if other.tz != self.values.tz:\n raise ValueError(\"incompatible or non tz-aware value\")\n other = _block_shape(other.asi8, ndim=self.ndim)\n elif isinstance(other, (np.datetime64, datetime, date)):\n other = tslibs.Timestamp(other)\n tz = getattr(other, 'tz', None)\n\n # test we can have an equal time zone\n if tz is None or str(tz) != str(self.values.tz):\n raise ValueError(\"incompatible or non tz-aware value\")\n other = other.value\n else:\n raise TypeError(other)\n\n return values, other\n\n def _try_coerce_result(self, result):\n \"\"\" reverse of try_coerce_args \"\"\"\n if isinstance(result, np.ndarray):\n if result.dtype.kind in ['i', 'f']:\n result = result.astype('M8[ns]')\n\n elif isinstance(result, (np.integer, np.float, np.datetime64)):\n result = self._box_func(result)\n\n if isinstance(result, np.ndarray):\n # allow passing of > 1dim if its trivial\n\n if result.ndim > 1:\n result = result.reshape(np.prod(result.shape))\n # GH#24096 new values invalidates a frequency\n result = self._holder._simple_new(result, freq=None,\n dtype=self.values.dtype)\n\n return result\n\n @property\n def _box_func(self):\n return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz)\n\n def diff(self, n, axis=0):\n \"\"\"1st discrete difference\n\n Parameters\n ----------\n n : int, number of periods to diff\n axis : int, axis to diff upon. default 0\n\n Return\n ------\n A list with a new TimeDeltaBlock.\n\n Note\n ----\n The arguments here are mimicking shift so they are called correctly\n by apply.\n \"\"\"\n if axis == 0:\n # Cannot currently calculate diff across multiple blocks since this\n # function is invoked via apply\n raise NotImplementedError\n new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8\n\n # Reshape the new_values like how algos.diff does for timedelta data\n new_values = new_values.reshape(1, len(new_values))\n new_values = new_values.astype('timedelta64[ns]')\n return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)]\n\n def concat_same_type(self, to_concat, placement=None):\n # need to handle concat([tz1, tz2]) here, since DatetimeArray\n # only handles cases where all the tzs are the same.\n # Instead of placing the condition here, it could also go into the\n # is_uniform_join_units check, but I'm not sure what is better.\n if len({x.dtype for x in to_concat}) > 1:\n values = _concat._concat_datetime([x.values for x in to_concat])\n placement = placement or slice(0, len(values), 1)\n\n if self.ndim > 1:\n values = np.atleast_2d(values)\n return ObjectBlock(values, ndim=self.ndim, placement=placement)\n return super(DatetimeTZBlock, self).concat_same_type(to_concat,\n placement)\n\n def fillna(self, value, limit=None, inplace=False, downcast=None):\n # We support filling a DatetimeTZ with a `value` whose timezone\n # is different by coercing to object.\n try:\n return super(DatetimeTZBlock, self).fillna(\n value, limit, inplace, downcast\n )\n except (ValueError, TypeError):\n # different timezones, or a non-tz\n return self.astype(object).fillna(\n value, limit=limit, inplace=inplace, downcast=downcast\n )\n\n def setitem(self, indexer, value):\n # https://github.com/pandas-dev/pandas/issues/24020\n # Need a dedicated setitem until #24020 (type promotion in setitem\n # for extension arrays) is designed and implemented.\n try:\n return super(DatetimeTZBlock, self).setitem(indexer, value)\n except (ValueError, TypeError):\n newb = make_block(self.values.astype(object),\n placement=self.mgr_locs,\n klass=ObjectBlock,)\n return newb.setitem(indexer, value)\n\n def equals(self, other):\n # override for significant performance improvement\n if self.dtype != other.dtype or self.shape != other.shape:\n return False\n return (self.values.view('i8') == other.values.view('i8')).all()\n\n\nclass TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock):\n __slots__ = ()\n is_timedelta = True\n _can_hold_na = True\n is_numeric = False\n\n def __init__(self, values, placement, ndim=None):\n if values.dtype != _TD_DTYPE:\n values = conversion.ensure_timedelta64ns(values)\n if isinstance(values, TimedeltaArray):\n values = values._data\n assert isinstance(values, np.ndarray), type(values)\n super(TimeDeltaBlock, self).__init__(values,\n placement=placement, ndim=ndim)\n\n @property\n def _holder(self):\n return TimedeltaArray\n\n @property\n def _box_func(self):\n return lambda x: Timedelta(x, unit='ns')\n\n def _can_hold_element(self, element):\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, (np.timedelta64, np.int64))\n return is_integer(element) or isinstance(\n element, (timedelta, np.timedelta64, np.int64))\n\n def fillna(self, value, **kwargs):\n\n # allow filling with integers to be\n # interpreted as nanoseconds\n if is_integer(value) and not isinstance(value, np.timedelta64):\n # Deprecation GH#24694, GH#19233\n warnings.warn(\"Passing integers to fillna is deprecated, will \"\n \"raise a TypeError in a future version. To retain \"\n \"the old behavior, pass pd.Timedelta(seconds=n) \"\n \"instead.\",\n FutureWarning, stacklevel=6)\n value = Timedelta(value, unit='s')\n return super(TimeDeltaBlock, self).fillna(value, **kwargs)\n\n def _try_coerce_args(self, values, other):\n \"\"\"\n Coerce values and other to int64, with null values converted to\n iNaT. values is always ndarray-like, other may not be\n\n Parameters\n ----------\n values : ndarray-like\n other : ndarray-like or scalar\n\n Returns\n -------\n base-type values, base-type other\n \"\"\"\n values = values.view('i8')\n\n if isinstance(other, bool):\n raise TypeError\n elif is_null_datetimelike(other):\n other = tslibs.iNaT\n elif isinstance(other, (timedelta, np.timedelta64)):\n other = Timedelta(other).value\n elif hasattr(other, 'dtype') and is_timedelta64_dtype(other):\n other = other.astype('i8', copy=False).view('i8')\n else:\n # coercion issues\n # let higher levels handle\n raise TypeError(other)\n\n return values, other\n\n def _try_coerce_result(self, result):\n \"\"\" reverse of try_coerce_args / try_operate \"\"\"\n if isinstance(result, np.ndarray):\n mask = isna(result)\n if result.dtype.kind in ['i', 'f']:\n result = result.astype('m8[ns]')\n result[mask] = tslibs.iNaT\n\n elif isinstance(result, (np.integer, np.float)):\n result = self._box_func(result)\n\n return result\n\n def should_store(self, value):\n return (issubclass(value.dtype.type, np.timedelta64) and\n not is_extension_array_dtype(value))\n\n def to_native_types(self, slicer=None, na_rep=None, quoting=None,\n **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n values = values[:, slicer]\n mask = isna(values)\n\n rvalues = np.empty(values.shape, dtype=object)\n if na_rep is None:\n na_rep = 'NaT'\n rvalues[mask] = na_rep\n imask = (~mask).ravel()\n\n # FIXME:\n # should use the formats.format.Timedelta64Formatter here\n # to figure what format to pass to the Timedelta\n # e.g. to not show the decimals say\n rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')\n for val in values.ravel()[imask]],\n dtype=object)\n return rvalues\n\n def external_values(self, dtype=None):\n return np.asarray(self.values.astype(\"timedelta64[ns]\", copy=False))\n\n\nclass BoolBlock(NumericBlock):\n __slots__ = ()\n is_bool = True\n _can_hold_na = False\n\n def _can_hold_element(self, element):\n tipo = maybe_infer_dtype_type(element)\n if tipo is not None:\n return issubclass(tipo.type, np.bool_)\n return isinstance(element, (bool, np.bool_))\n\n def should_store(self, value):\n return (issubclass(value.dtype.type, np.bool_) and not\n is_extension_array_dtype(value))\n\n def replace(self, to_replace, value, inplace=False, filter=None,\n regex=False, convert=True):\n inplace = validate_bool_kwarg(inplace, 'inplace')\n to_replace_values = np.atleast_1d(to_replace)\n if not np.can_cast(to_replace_values, bool):\n return self\n return super(BoolBlock, self).replace(to_replace, value,\n inplace=inplace, filter=filter,\n regex=regex, convert=convert)\n\n\nclass ObjectBlock(Block):\n __slots__ = ()\n is_object = True\n _can_hold_na = True\n\n def __init__(self, values, placement=None, ndim=2):\n if issubclass(values.dtype.type, compat.string_types):\n values = np.array(values, dtype=object)\n\n super(ObjectBlock, self).__init__(values, ndim=ndim,\n placement=placement)\n\n @property\n def is_bool(self):\n \"\"\" we can be a bool if we have only bool values but are of type\n object\n \"\"\"\n return lib.is_bool_array(self.values.ravel())\n\n # TODO: Refactor when convert_objects is removed since there will be 1 path\n def convert(self, *args, **kwargs):\n \"\"\" attempt to coerce any object types to better types return a copy of\n the block (if copy = True) by definition we ARE an ObjectBlock!!!!!\n\n can return multiple blocks!\n \"\"\"\n\n if args:\n raise NotImplementedError\n by_item = kwargs.get('by_item', True)\n\n new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta']\n new_style = False\n for kw in new_inputs:\n new_style |= kw in kwargs\n\n if new_style:\n fn = soft_convert_objects\n fn_inputs = new_inputs\n else:\n fn = maybe_convert_objects\n fn_inputs = ['convert_dates', 'convert_numeric',\n 'convert_timedeltas']\n fn_inputs += ['copy']\n\n fn_kwargs = {key: kwargs[key] for key in fn_inputs if key in kwargs}\n\n # operate column-by-column\n def f(m, v, i):\n shape = v.shape\n values = fn(v.ravel(), **fn_kwargs)\n try:\n values = values.reshape(shape)\n values = _block_shape(values, ndim=self.ndim)\n except (AttributeError, NotImplementedError):\n pass\n\n return values\n\n if by_item and not self._is_single_block:\n blocks = self.split_and_operate(None, f, False)\n else:\n values = f(None, self.values.ravel(), None)\n blocks = [make_block(values, ndim=self.ndim,\n placement=self.mgr_locs)]\n\n return blocks\n\n def set(self, locs, values):\n \"\"\"\n Modify Block in-place with new item value\n\n Returns\n -------\n None\n \"\"\"\n try:\n self.values[locs] = values\n except (ValueError):\n\n # broadcasting error\n # see GH6171\n new_shape = list(values.shape)\n new_shape[0] = len(self.items)\n self.values = np.empty(tuple(new_shape), dtype=self.dtype)\n self.values.fill(np.nan)\n self.values[locs] = values\n\n def _maybe_downcast(self, blocks, downcast=None):\n\n if downcast is not None:\n return blocks\n\n # split and convert the blocks\n return _extend_blocks([b.convert(datetime=True, numeric=False)\n for b in blocks])\n\n def _can_hold_element(self, element):\n return True\n\n def _try_coerce_args(self, values, other):\n \"\"\" provide coercion to our input arguments \"\"\"\n\n if isinstance(other, ABCDatetimeIndex):\n # May get a DatetimeIndex here. Unbox it.\n other = other.array\n\n if isinstance(other, DatetimeArray):\n # hit in pandas/tests/indexing/test_coercion.py\n # ::TestWhereCoercion::test_where_series_datetime64[datetime64tz]\n # when falling back to ObjectBlock.where\n other = other.astype(object)\n\n return values, other\n\n def should_store(self, value):\n return not (issubclass(value.dtype.type,\n (np.integer, np.floating, np.complexfloating,\n np.datetime64, np.bool_)) or\n # TODO(ExtensionArray): remove is_extension_type\n # when all extension arrays have been ported.\n is_extension_type(value) or\n is_extension_array_dtype(value))\n\n def replace(self, to_replace, value, inplace=False, filter=None,\n regex=False, convert=True):\n to_rep_is_list = is_list_like(to_replace)\n value_is_list = is_list_like(value)\n both_lists = to_rep_is_list and value_is_list\n either_list = to_rep_is_list or value_is_list\n\n result_blocks = []\n blocks = [self]\n\n if not either_list and is_re(to_replace):\n return self._replace_single(to_replace, value, inplace=inplace,\n filter=filter, regex=True,\n convert=convert)\n elif not (either_list or regex):\n return super(ObjectBlock, self).replace(to_replace, value,\n inplace=inplace,\n filter=filter, regex=regex,\n convert=convert)\n elif both_lists:\n for to_rep, v in zip(to_replace, value):\n result_blocks = []\n for b in blocks:\n result = b._replace_single(to_rep, v, inplace=inplace,\n filter=filter, regex=regex,\n convert=convert)\n result_blocks = _extend_blocks(result, result_blocks)\n blocks = result_blocks\n return result_blocks\n\n elif to_rep_is_list and regex:\n for to_rep in to_replace:\n result_blocks = []\n for b in blocks:\n result = b._replace_single(to_rep, value, inplace=inplace,\n filter=filter, regex=regex,\n convert=convert)\n result_blocks = _extend_blocks(result, result_blocks)\n blocks = result_blocks\n return result_blocks\n\n return self._replace_single(to_replace, value, inplace=inplace,\n filter=filter, convert=convert,\n regex=regex)\n\n def _replace_single(self, to_replace, value, inplace=False, filter=None,\n regex=False, convert=True, mask=None):\n \"\"\"\n Replace elements by the given value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n filter : list, optional\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n a new block, the result after replacing\n \"\"\"\n inplace = validate_bool_kwarg(inplace, 'inplace')\n\n # to_replace is regex compilable\n to_rep_re = regex and is_re_compilable(to_replace)\n\n # regex is regex compilable\n regex_re = is_re_compilable(regex)\n\n # only one will survive\n if to_rep_re and regex_re:\n raise AssertionError('only one of to_replace and regex can be '\n 'regex compilable')\n\n # if regex was passed as something that can be a regex (rather than a\n # boolean)\n if regex_re:\n to_replace = regex\n\n regex = regex_re or to_rep_re\n\n # try to get the pattern attribute (compiled re) or it's a string\n try:\n pattern = to_replace.pattern\n except AttributeError:\n pattern = to_replace\n\n # if the pattern is not empty and to_replace is either a string or a\n # regex\n if regex and pattern:\n rx = re.compile(to_replace)\n else:\n # if the thing to replace is not a string or compiled regex call\n # the superclass method -> to_replace is some kind of object\n return super(ObjectBlock, self).replace(to_replace, value,\n inplace=inplace,\n filter=filter, regex=regex)\n\n new_values = self.values if inplace else self.values.copy()\n\n # deal with replacing values with objects (strings) that match but\n # whose replacement is not a string (numeric, nan, object)\n if isna(value) or not isinstance(value, compat.string_types):\n\n def re_replacer(s):\n try:\n return value if rx.search(s) is not None else s\n except TypeError:\n return s\n else:\n # value is guaranteed to be a string here, s can be either a string\n # or null if it's null it gets returned\n def re_replacer(s):\n try:\n return rx.sub(value, s)\n except TypeError:\n return s\n\n f = np.vectorize(re_replacer, otypes=[self.dtype])\n\n if filter is None:\n filt = slice(None)\n else:\n filt = self.mgr_locs.isin(filter).nonzero()[0]\n\n if mask is None:\n new_values[filt] = f(new_values[filt])\n else:\n new_values[filt][mask] = f(new_values[filt][mask])\n\n # convert\n block = self.make_block(new_values)\n if convert:\n block = block.convert(by_item=True, numeric=False)\n return block\n\n def _replace_coerce(self, to_replace, value, inplace=True, regex=False,\n convert=False, mask=None):\n \"\"\"\n Replace value corresponding to the given boolean array with another\n value.\n\n Parameters\n ----------\n to_replace : object or pattern\n Scalar to replace or regular expression to match.\n value : object\n Replacement object.\n inplace : bool, default False\n Perform inplace modification.\n regex : bool, default False\n If true, perform regular expression substitution.\n convert : bool, default True\n If true, try to coerce any object types to better types.\n mask : array-like of bool, optional\n True indicate corresponding element is ignored.\n\n Returns\n -------\n A new block if there is anything to replace or the original block.\n \"\"\"\n if mask.any():\n block = super(ObjectBlock, self)._replace_coerce(\n to_replace=to_replace, value=value, inplace=inplace,\n regex=regex, convert=convert, mask=mask)\n if convert:\n block = [b.convert(by_item=True, numeric=False, copy=True)\n for b in block]\n return block\n return self\n\n\nclass CategoricalBlock(ExtensionBlock):\n __slots__ = ()\n is_categorical = True\n _verify_integrity = True\n _can_hold_na = True\n _concatenator = staticmethod(_concat._concat_categorical)\n\n def __init__(self, values, placement, ndim=None):\n from pandas.core.arrays.categorical import _maybe_to_categorical\n\n # coerce to categorical if we can\n super(CategoricalBlock, self).__init__(_maybe_to_categorical(values),\n placement=placement,\n ndim=ndim)\n\n @property\n def _holder(self):\n return Categorical\n\n @property\n def array_dtype(self):\n \"\"\" the dtype to return if I want to construct this block as an\n array\n \"\"\"\n return np.object_\n\n def _try_coerce_result(self, result):\n \"\"\" reverse of try_coerce_args \"\"\"\n\n # GH12564: CategoricalBlock is 1-dim only\n # while returned results could be any dim\n if ((not is_categorical_dtype(result)) and\n isinstance(result, np.ndarray)):\n result = _block_shape(result, ndim=self.ndim)\n\n return result\n\n def to_dense(self):\n # Categorical.get_values returns a DatetimeIndex for datetime\n # categories, so we can't simply use `np.asarray(self.values)` like\n # other types.\n return self.values.get_values()\n\n def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs):\n \"\"\" convert to our native types format, slicing if desired \"\"\"\n\n values = self.values\n if slicer is not None:\n # Categorical is always one dimension\n values = values[slicer]\n mask = isna(values)\n values = np.array(values, dtype='object')\n values[mask] = na_rep\n\n # we are expected to return a 2-d ndarray\n return values.reshape(1, len(values))\n\n def concat_same_type(self, to_concat, placement=None):\n \"\"\"\n Concatenate list of single blocks of the same type.\n\n Note that this CategoricalBlock._concat_same_type *may* not\n return a CategoricalBlock. When the categories in `to_concat`\n differ, this will return an object ndarray.\n\n If / when we decide we don't like that behavior:\n\n 1. Change Categorical._concat_same_type to use union_categoricals\n 2. Delete this method.\n \"\"\"\n values = self._concatenator([blk.values for blk in to_concat],\n axis=self.ndim - 1)\n # not using self.make_block_same_class as values can be object dtype\n return make_block(\n values, placement=placement or slice(0, len(values), 1),\n ndim=self.ndim)\n\n def where(self, other, cond, align=True, errors='raise',\n try_cast=False, axis=0, transpose=False):\n # TODO(CategoricalBlock.where):\n # This can all be deleted in favor of ExtensionBlock.where once\n # we enforce the deprecation.\n object_msg = (\n \"Implicitly converting categorical to object-dtype ndarray. \"\n \"One or more of the values in 'other' are not present in this \"\n \"categorical's categories. A future version of pandas will raise \"\n \"a ValueError when 'other' contains different categories.\\n\\n\"\n \"To preserve the current behavior, add the new categories to \"\n \"the categorical before calling 'where', or convert the \"\n \"categorical to a different dtype.\"\n )\n try:\n # Attempt to do preserve categorical dtype.\n result = super(CategoricalBlock, self).where(\n other, cond, align, errors, try_cast, axis, transpose\n )\n except (TypeError, ValueError):\n warnings.warn(object_msg, FutureWarning, stacklevel=6)\n result = self.astype(object).where(other, cond, align=align,\n errors=errors,\n try_cast=try_cast,\n axis=axis, transpose=transpose)\n return result\n\n\n# -----------------------------------------------------------------\n# Constructor Helpers\n\ndef get_block_type(values, dtype=None):\n \"\"\"\n Find the appropriate Block subclass to use for the given values and dtype.\n\n Parameters\n ----------\n values : ndarray-like\n dtype : numpy or pandas dtype\n\n Returns\n -------\n cls : class, subclass of Block\n \"\"\"\n dtype = dtype or values.dtype\n vtype = dtype.type\n\n if is_sparse(dtype):\n # Need this first(ish) so that Sparse[datetime] is sparse\n cls = ExtensionBlock\n elif is_categorical(values):\n cls = CategoricalBlock\n elif issubclass(vtype, np.datetime64):\n assert not is_datetime64tz_dtype(values)\n cls = DatetimeBlock\n elif is_datetime64tz_dtype(values):\n cls = DatetimeTZBlock\n elif is_interval_dtype(dtype) or is_period_dtype(dtype):\n cls = ObjectValuesExtensionBlock\n elif is_extension_array_dtype(values):\n cls = ExtensionBlock\n elif issubclass(vtype, np.floating):\n cls = FloatBlock\n elif issubclass(vtype, np.timedelta64):\n assert issubclass(vtype, np.integer)\n cls = TimeDeltaBlock\n elif issubclass(vtype, np.complexfloating):\n cls = ComplexBlock\n elif issubclass(vtype, np.integer):\n cls = IntBlock\n elif dtype == np.bool_:\n cls = BoolBlock\n else:\n cls = ObjectBlock\n return cls\n\n\ndef make_block(values, placement, klass=None, ndim=None, dtype=None,\n fastpath=None):\n if fastpath is not None:\n # GH#19265 pyarrow is passing this\n warnings.warn(\"fastpath argument is deprecated, will be removed \"\n \"in a future release.\", DeprecationWarning)\n if klass is None:\n dtype = dtype or values.dtype\n klass = get_block_type(values, dtype)\n\n elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values):\n # TODO: This is no longer hit internally; does it need to be retained\n # for e.g. pyarrow?\n values = DatetimeArray._simple_new(values, dtype=dtype)\n\n return klass(values, ndim=ndim, placement=placement)\n\n\n# -----------------------------------------------------------------\n\ndef _extend_blocks(result, blocks=None):\n \"\"\" return a new extended blocks, givin the result \"\"\"\n from pandas.core.internals import BlockManager\n if blocks is None:\n blocks = []\n if isinstance(result, list):\n for r in result:\n if isinstance(r, list):\n blocks.extend(r)\n else:\n blocks.append(r)\n elif isinstance(result, BlockManager):\n blocks.extend(result.blocks)\n else:\n blocks.append(result)\n return blocks\n\n\ndef _block_shape(values, ndim=1, shape=None):\n \"\"\" guarantee the shape of the values to be at least 1 d \"\"\"\n if values.ndim < ndim:\n if shape is None:\n shape = values.shape\n if not is_extension_array_dtype(values):\n # TODO: https://github.com/pandas-dev/pandas/issues/23023\n # block.shape is incorrect for \"2D\" ExtensionArrays\n # We can't, and don't need to, reshape.\n values = values.reshape(tuple((1, ) + shape))\n return values\n\n\ndef _merge_blocks(blocks, dtype=None, _can_consolidate=True):\n\n if len(blocks) == 1:\n return blocks[0]\n\n if _can_consolidate:\n\n if dtype is None:\n if len({b.dtype for b in blocks}) != 1:\n raise AssertionError(\"_merge_blocks are invalid!\")\n dtype = blocks[0].dtype\n\n # FIXME: optimization potential in case all mgrs contain slices and\n # combination of those slices is a slice, too.\n new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])\n new_values = np.vstack([b.values for b in blocks])\n\n argsort = np.argsort(new_mgr_locs)\n new_values = new_values[argsort]\n new_mgr_locs = new_mgr_locs[argsort]\n\n return make_block(new_values, placement=new_mgr_locs)\n\n # no merge\n return blocks\n\n\ndef _safe_reshape(arr, new_shape):\n \"\"\"\n If possible, reshape `arr` to have shape `new_shape`,\n with a couple of exceptions (see gh-13012):\n\n 1) If `arr` is a ExtensionArray or Index, `arr` will be\n returned as is.\n 2) If `arr` is a Series, the `_values` attribute will\n be reshaped and returned.\n\n Parameters\n ----------\n arr : array-like, object to be reshaped\n new_shape : int or tuple of ints, the new shape\n \"\"\"\n if isinstance(arr, ABCSeries):\n arr = arr._values\n if not isinstance(arr, ABCExtensionArray):\n arr = arr.reshape(new_shape)\n return arr\n\n\ndef _putmask_smart(v, m, n):\n \"\"\"\n Return a new ndarray, try to preserve dtype if possible.\n\n Parameters\n ----------\n v : `values`, updated in-place (array like)\n m : `mask`, applies to both sides (array like)\n n : `new values` either scalar or an array like aligned with `values`\n\n Returns\n -------\n values : ndarray with updated values\n this *may* be a copy of the original\n\n See Also\n --------\n ndarray.putmask\n \"\"\"\n\n # we cannot use np.asarray() here as we cannot have conversions\n # that numpy does when numeric are mixed with strings\n\n # n should be the length of the mask or a scalar here\n if not is_list_like(n):\n n = np.repeat(n, len(m))\n elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar\n n = np.repeat(np.array(n, ndmin=1), len(m))\n\n # see if we are only masking values that if putted\n # will work in the current dtype\n try:\n nn = n[m]\n\n # make sure that we have a nullable type\n # if we have nulls\n if not _isna_compat(v, nn[0]):\n raise ValueError\n\n # we ignore ComplexWarning here\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"ignore\", np.ComplexWarning)\n nn_at = nn.astype(v.dtype)\n\n # avoid invalid dtype comparisons\n # between numbers & strings\n\n # only compare integers/floats\n # don't compare integers to datetimelikes\n if (not is_numeric_v_string_like(nn, nn_at) and\n (is_float_dtype(nn.dtype) or\n is_integer_dtype(nn.dtype) and\n is_float_dtype(nn_at.dtype) or\n is_integer_dtype(nn_at.dtype))):\n\n comp = (nn == nn_at)\n if is_list_like(comp) and comp.all():\n nv = v.copy()\n nv[m] = nn_at\n return nv\n except (ValueError, IndexError, TypeError, OverflowError):\n pass\n\n n = np.asarray(n)\n\n def _putmask_preserve(nv, n):\n try:\n nv[m] = n[m]\n except (IndexError, ValueError):\n nv[m] = n\n return nv\n\n # preserves dtype if possible\n if v.dtype.kind == n.dtype.kind:\n return _putmask_preserve(v, n)\n\n # change the dtype if needed\n dtype, _ = maybe_promote(n.dtype)\n\n if is_extension_type(v.dtype) and is_object_dtype(dtype):\n v = v.get_values(dtype)\n else:\n v = v.astype(dtype)\n\n return _putmask_preserve(v, n)\n" ]
[ [ "pandas.core.dtypes.cast.infer_dtype_from", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.dtypes.common.is_datetime64_dtype", "numpy.empty", "pandas.core.computation.expressions.where", "pandas.core.dtypes.common.pandas_dtype", "numpy.prod", "pandas.core.arrays.Categorical", "pandas.core.dtypes.missing._isna_compat", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.dtypes.common.is_period_dtype", "pandas._libs.tslibs.conversion.ensure_timedelta64ns", "pandas.io.formats.format.FloatArrayFormatter", "numpy.errstate", "pandas.core.dtypes.common.is_numeric_v_string_like", "pandas.core.internals.arrays.extract_array", "numpy.any", "pandas.core.algorithms.take_nd", "pandas.compat.range", "pandas._libs.lib.is_scalar", "pandas._libs.internals.BlockPlacement", "pandas.core.dtypes.missing.isna", "pandas.core.dtypes.common.is_re_compilable", "pandas.core.dtypes.common.is_datetime64tz_dtype", "numpy.arange", "pandas.core.missing.clean_fill_method", "numpy.array", "pandas.core.indexing.check_setitem_lengths", "numpy.argsort", "pandas.core.arrays.categorical._maybe_to_categorical", "pandas.core.indexes.datetimes.DatetimeIndex", "numpy.ones", "pandas.core.dtypes.common.is_sparse", "pandas._libs.tslibs.is_null_datetimelike", "pandas.core.dtypes.common.is_timedelta64_dtype", "numpy.repeat", "pandas.core.dtypes.common.is_bool_dtype", "pandas._libs.tslibs.Timestamp", "pandas.core.dtypes.cast.maybe_downcast_to_dtype", "pandas._libs.tslibs.Timedelta", "pandas.io.formats.printing.pprint_thing", "pandas.core.dtypes.cast.find_common_type", "numpy.where", "pandas.core.dtypes.common.ensure_platform_int", "pandas.core.arrays.DatetimeArray._simple_new", "pandas.core.dtypes.common.is_re", "pandas.core.dtypes.common.is_object_dtype", "pandas._libs.lib.item_from_zerodim", "numpy.atleast_2d", "numpy.delete", "pandas.core.dtypes.cast.maybe_infer_dtype_type", "pandas.core.dtypes.cast.maybe_promote", "pandas.core.dtypes.missing.array_equivalent", "pandas.core.dtypes.common.is_integer", "pandas.core.dtypes.common.is_list_like", "pandas._libs.tslibs.conversion.ensure_datetime64ns", "numpy.squeeze", "pandas.core.dtypes.cast.infer_dtype_from_scalar", "numpy.asarray", "pandas.core.missing.mask_missing", "pandas.core.dtypes.concat._concat_datetime", "pandas.core.dtypes.dtypes.CategoricalDtype", "numpy.apply_along_axis", "pandas.core.common._any_not_none", "numpy.concatenate", "pandas.core.dtypes.missing.notna", "numpy.vectorize", "numpy.putmask", "pandas.core.dtypes.common.is_interval_dtype", "pandas.core.dtypes.common.is_extension_array_dtype", "pandas.util._validators.validate_bool_kwarg", "numpy.can_cast", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.dtypes.common.is_categorical", "numpy.vstack", "pandas.core.dtypes.common.is_extension_type", "pandas.core.missing.interpolate_2d", "pandas.core.missing.interpolate_1d", "numpy.isnan", "pandas.core.algorithms.diff", "pandas.compat.zip", "pandas.io.formats.format._get_format_datetime64_from_values", "pandas._libs.internals.indexer_as_slice", "pandas.core.dtypes.cast.maybe_upcast", "numpy.atleast_1d", "pandas.core.dtypes.common.is_categorical_dtype", "pandas.core.common.is_null_slice", "pandas.core.missing.clean_interp_method" ] ]
RobinMaas95/GTSRB_Visualization
[ "fa837ff94e089a936ef4f4418970d262b35f70b6" ]
[ "code/nn_interpretability/nn_interpretability/interpretation/cam/grad_cam.py" ]
[ "from torch.nn import Module\nfrom torch.nn import functional as F\nfrom torchvision import transforms\nimport torch\n\nfrom nn_interpretability.interpretation.cam.cam_base import CAMBase\n\n\nclass GradCAMInterpreter(CAMBase):\n \"\"\"\n Implements the decision-based interpretability method \"Grad-CAM\"\n as outlined in the paper \"Grad-CAM: Visual Explanations from Deep Networks via\n Gradient-based Localization\" by Selvaraju et al.\n\n https://arxiv.org/abs/1610.02391\n \"\"\"\n def __init__(self, model: Module, classes: [str], preprocess: transforms.Compose, input_size,\n conv_layer_name, upsampling_mode=\"bilinear\"):\n \"\"\"\n :param model: The model the decisions of which needs to be interpreted.\n :param classes: A collection of all classes that the given model can classify\n :param preprocess: The preprocessing functions that need to be invoked for the model input.\n :param input_size: The size of the expected model input\n :param conv_layer_name: The name of the last conv layer\n :param upsampling_mode: The mode for the upsampling (e.g. linear, bicubic, bilinear)\n \"\"\"\n CAMBase.__init__(self, model, classes, preprocess, input_size, conv_layer_name, upsampling_mode)\n\n self.conv_layer.register_backward_hook(self._backward_hook)\n\n def _backward_hook(self, module, input, output):\n self.gradient = output[0].detach().to(self.device)\n\n def interpret(self, x):\n x = self._execute_preprocess(x)\n print(x)\n model_output = self.model(x).to(self.device)\n print(model_output)\n self._last_prediction = int(torch.argmax(model_output.squeeze()).data)\n print(self._last_prediction)\n self.model.zero_grad()\n one_hot = self._one_hot(model_output, self._last_prediction)\n model_output.backward(gradient=one_hot.to(self.device), retain_graph=True)\n\n generated_cam = self._execute_grad_cam().to(self.device)\n print(generated_cam)\n\n return generated_cam.detach().cpu()\n\n def _execute_grad_cam(self):\n weights = F.adaptive_avg_pool2d(self.gradient, (1, 1)).to(self.device)\n\n gcam = torch.mul(self.activation.to(self.device), weights).sum(dim=1, keepdim=True).to(self.device)\n gcam = F.relu(gcam).to(self.device)\n gcam = self._upsample(gcam).to(self.device)\n gcam = (gcam - gcam.min()) / (gcam.max() - gcam.min())\n\n return gcam\n" ]
[ [ "torch.nn.functional.relu", "torch.nn.functional.adaptive_avg_pool2d" ] ]
WeiweiJin/PulseWaveSignalProcessor
[ "83d7dcc896f39b4438692f2bce207aa0132e5aea" ]
[ "PulseWaveSignalProcessor.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 16 16:37:24 2020\n\n@author: weiweijin\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt \nfrom scipy.signal import kaiserord, firwin, filtfilt\nfrom scipy import signal\n\n# %% import data\n# import waveform data \nPW = pd.read_csv('sample_ppg.csv')\n\n# Plot raw signal\nfig = plt.figure(figsize = (18,10))\nax = fig.add_subplot(1,1,1) \nplt.plot(PW.time,PW.ppg, 'k-', linewidth=2)\nplt.rc('xtick',labelsize=16)\nplt.rc('ytick',labelsize=16)\nax.set_xlabel('time [s]', fontsize = 20)\nax.set_ylabel('PPG [-]', fontsize = 20)\nplt.xticks(np.arange(0, 502, 250)) \nplt.yticks(np.arange(-10, 12, 10)) \nfilename = 'Raw_signal.png'\nfig.savefig(filename)\n\nfig = plt.figure(figsize = (18,10))\nax = fig.add_subplot(1,1,1) \nplt.plot(PW.time[0:250],PW.ppg[0:250], 'k-', linewidth=2)\nplt.rc('xtick',labelsize=16)\nplt.rc('ytick',labelsize=16)\nax.set_xlabel('time [s]', fontsize = 20)\nax.set_ylabel('PPG [-]', fontsize = 20)\nplt.xticks(np.arange(0, 2, 1)) \nplt.yticks(np.arange(-10, 12, 10)) \nfilename = 'Raw_signal_sigal.png'\nfig.savefig(filename)\n\n# %% Filter out very low and very high frequency noise \n# get ppg data\nppg = PW.loc[:, 'ppg'].values\n\n# Create low pass filter\nsample_rate = 300 #sample rat of the signal\n\nnyq_rate = sample_rate / 2.0 # The Nyquist rate of the signal\nwidth = 5.0/nyq_rate # 5 Hz transition width\nripple_db = 8.0 # Attenuation in the stop band\nN, beta = kaiserord(ripple_db, width) # Compute the order and Kaiser parameter for the FIR filter.\nif N % 2 ==0:\n N = N +1\n\ncutoff_hz_l = 2.0 # The cutoff frequency of the filter\ntaps = firwin(N, cutoff_hz_l/nyq_rate, window=('kaiser', beta)) # Use firwin with a Kaiser window to create a lowpass FIR filter\n# filter out low frequncy siginals\nppg_flt = filtfilt(taps, 1.0, ppg)\n# plot filtered signal\nfig = plt.figure(figsize = (18,10))\nax = fig.add_subplot(1,1,1) \nplt.plot(PW.time[0:250],ppg_flt[0:250], 'k-', linewidth=2)\nplt.rc('xtick',labelsize=16)\nplt.rc('ytick',labelsize=16)\nax.set_xlabel('time [s]', fontsize = 20)\nax.set_ylabel('PPG [-]', fontsize = 20)\nplt.xticks(np.arange(0, 2, 1)) \nplt.yticks(np.arange(-10, 12, 10)) \nfilename = 'LowPass.png'\nfig.savefig(filename)\n\n# Create high pass filter\n\nsos = signal.butter(1, 1, 'hp', fs=sample_rate, output='sos')\nppg_flt = signal.sosfilt(sos, ppg_flt)\n\n\n# plot filtered signal\nfig = plt.figure(figsize = (18,10))\nax = fig.add_subplot(1,1,1) \nplt.plot(PW.time,ppg_flt, 'k-', linewidth=2)\nplt.rc('xtick',labelsize=16)\nplt.rc('ytick',labelsize=16)\nax.set_xlabel('time [s]', fontsize = 20)\nax.set_ylabel('PPG [-]', fontsize = 20)\nplt.xticks(np.arange(0, 502, 250)) \nplt.yticks(np.arange(-10, 12, 10)) \nfilename = 'HighPass.png'\nfig.savefig(filename)\n\n# filter out high frequency again\nppg_flt = filtfilt(taps, 1.0, ppg_flt)\n# plot filtered signal\nfig = plt.figure(figsize = (18,10))\nax = fig.add_subplot(1,1,1) \nplt.plot(PW.time[0:250],ppg_flt[0:250], 'k-', linewidth=2)\nplt.rc('xtick',labelsize=16)\nplt.rc('ytick',labelsize=16)\nax.set_xlabel('time [s]', fontsize = 20)\nax.set_ylabel('PPG [-]', fontsize = 20)\nplt.xticks(np.arange(0, 2, 1)) \nplt.yticks(np.arange(-10, 12, 10)) \nfilename = 'LowPass_final.png'\nfig.savefig(filename)\n\n\n# %% Detect beats\n# detect peaks\nppg_avg = np.mean(ppg_flt) #find the mean value of the dataset\n\ngrad_ppg = np.gradient(ppg_flt)\n\ngrad_ppg_b = grad_ppg[0:-1]\ngrad_ppg_a = grad_ppg[1:]\n\ngrad_sig = np.multiply(grad_ppg_b,grad_ppg_a)\n\npos_grad = np.argwhere(grad_sig<0) # find peaks and troughs in the waveforms\n\ntmp_peak_id = [] # identify temp peak\n\nfor ii in pos_grad:\n if ppg_flt[ii] > ppg_avg:\n tmp_peak_id.extend(ii)\n\nid_dif = np.array(tmp_peak_id[1:]) - np.array(tmp_peak_id[0:-1]) # identify the peaks that are very colse to each other\nid_dif = id_dif.reshape(-1)\n\nsmall_dif = np.argwhere(id_dif < 50)\nsmall_dif = small_dif.reshape(-1)\n\nsmall_dif_list = small_dif.tolist()\n\ndef ranges(nums):\n nums = sorted(set(nums))\n gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s+1 < e]\n edges = iter(nums[:1] + sum(gaps, []) + nums[-1:])\n return list(zip(edges, edges))\n\ncons_range = ranges(small_dif_list)\n\nid_keep = [] # identify the true peak in those close range peaks\n\nfor jj in range(len(cons_range)):\n tmp = np.argmax(ppg_flt[(tmp_peak_id[cons_range[jj][0]]):(tmp_peak_id[cons_range[jj][1]+1]+1)])\n tmp = tmp_peak_id[cons_range[jj][0]] + tmp\n id_keep.append(tmp)\n\ndef split_list(n):\n \"\"\"will return the list index\"\"\"\n return [(x+1) for x,y in zip(n, n[1:]) if y-x != 1]\n\ndef get_sub_list(my_list):\n \"\"\"will split the list base on the index\"\"\"\n my_index = split_list(my_list)\n output = list()\n prev = 0\n for index in my_index:\n new_list = [ x for x in my_list[prev:] if x < index]\n output.append(new_list)\n prev += len(new_list)\n output.append([ x for x in my_list[prev:]])\n return output\n\ncons_list = get_sub_list(small_dif_list)\n\nfor nn in range(len(cons_list)):\n cons_list[nn].append(cons_list[nn][-1]+1)\n\npeak_id = tmp_peak_id.copy() # delete all close range peaks\n\nfor xx in cons_list:\n for ind in xx:\n peak_id.remove(tmp_peak_id[ind])\n\npeak_id.extend(id_keep) # add back the true peaks\n\npeak_id.sort()\n\n# detect onset\nbeats = len(peak_id)\n\nonset = [0]\n\nfor bt in range(beats-1):\n tmp = np.argmin(ppg_flt[peak_id[bt]:peak_id[bt+1]])\n tmp = peak_id[bt] + tmp\n onset.append(tmp)\n\n# seprate beats\nwave = []\nfor bt in range(beats):\n if bt == beats-1:\n tmp = ppg_flt[onset[bt]:]\n else:\n tmp = ppg_flt[onset[bt]:onset[bt+1]]\n wave.append(tmp)\n\n# plot filtered signal\ndt = 1/sample_rate\nT = dt * len(wave[122])\ntime = np.linspace(0,T,num = len(wave[122]))\nfig = plt.figure(figsize = (18,10))\nax = fig.add_subplot(1,1,1) \nplt.plot(time,wave[122], 'k-', linewidth=2)\nplt.rc('xtick',labelsize=16)\nplt.rc('ytick',labelsize=16)\nax.set_xlabel('time [s]', fontsize = 20)\nax.set_ylabel('PPG [-]', fontsize = 20)\nplt.xticks(np.arange(0, 2, 1)) \nplt.yticks(np.arange(-10, 12, 10)) \nfilename = 'singal_waveform_example.png'\nfig.savefig(filename)\n\n" ]
[ [ "scipy.signal.firwin", "numpy.array", "scipy.signal.sosfilt", "numpy.argmin", "matplotlib.pyplot.plot", "scipy.signal.butter", "numpy.mean", "matplotlib.pyplot.rc", "matplotlib.pyplot.figure", "scipy.signal.filtfilt", "numpy.multiply", "numpy.arange", "numpy.argmax", "numpy.argwhere", "scipy.signal.kaiserord", "pandas.read_csv", "numpy.gradient" ] ]
wwqgtxx/Paddle
[ "109ca9d250e37e723e364366e8402c307b110169" ]
[ "python/paddle/tests/test_model.py" ]
[ "# copyright (c) 2020 paddlepaddle authors. all rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\n\nimport os\nimport numpy as np\nimport shutil\nimport tempfile\n\nimport paddle\nfrom paddle import fluid\nfrom paddle import to_tensor\nfrom paddle.nn import Conv2D, Linear, ReLU, Sequential, Softmax\n\nfrom paddle import Model\nfrom paddle.static import InputSpec\nfrom paddle.nn.layer.loss import CrossEntropyLoss\nfrom paddle.metric import Accuracy\nfrom paddle.vision.datasets import MNIST\nfrom paddle.vision.models import LeNet\nimport paddle.vision.models as models\nimport paddle.fluid.dygraph.jit as jit\nfrom paddle.io import DistributedBatchSampler, Dataset\nfrom paddle.hapi.model import prepare_distributed_context\nfrom paddle.fluid.dygraph.jit import declarative\nfrom paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator\n\n\nclass LeNetDygraph(paddle.nn.Layer):\n def __init__(self, num_classes=10):\n super(LeNetDygraph, self).__init__()\n self.num_classes = num_classes\n self.features = Sequential(\n Conv2D(\n 1, 6, 3, stride=1, padding=1),\n ReLU(),\n paddle.fluid.dygraph.Pool2D(2, 'max', 2),\n Conv2D(\n 6, 16, 5, stride=1, padding=0),\n ReLU(),\n paddle.fluid.dygraph.Pool2D(2, 'max', 2))\n\n if num_classes > 0:\n self.fc = Sequential(\n Linear(400, 120), Linear(120, 84), Linear(84, 10))\n\n def forward(self, inputs):\n x = self.features(inputs)\n\n if self.num_classes > 0:\n x = fluid.layers.flatten(x, 1)\n x = self.fc(x)\n return x\n\n\nclass ModelInner(paddle.nn.Layer):\n def __init__(self):\n super(ModelInner, self).__init__()\n self.fc = paddle.nn.Linear(3, 4)\n\n def forward(self, x):\n y = self.fc(x)\n return y, 0\n\n\nclass ModelOutter(paddle.nn.Layer):\n def __init__(self):\n super(ModelOutter, self).__init__()\n self.module1 = ModelInner()\n self.module2 = paddle.nn.Linear(4, 5)\n\n def forward(self, x):\n y, dummpy = self.module1(x)\n y = self.module2(y)\n return y, 3\n\n\nclass LeNetListInput(LeNetDygraph):\n def forward(self, inputs):\n x = inputs[0]\n x = self.features(x)\n\n if self.num_classes > 0:\n x = paddle.flatten(x, 1)\n x = self.fc(x + inputs[1])\n return x\n\n\nclass LeNetDictInput(LeNetDygraph):\n def forward(self, inputs):\n x = self.features(inputs['x1'])\n\n if self.num_classes > 0:\n x = paddle.flatten(x, 1)\n x = self.fc(x + inputs['x2'])\n return x\n\n\nclass MnistDataset(MNIST):\n def __init__(self, mode, return_label=True, sample_num=None):\n super(MnistDataset, self).__init__(mode=mode)\n self.return_label = return_label\n if sample_num:\n self.images = self.images[:sample_num]\n self.labels = self.labels[:sample_num]\n\n def __getitem__(self, idx):\n img, label = self.images[idx], self.labels[idx]\n img = np.reshape(img, [1, 28, 28])\n if self.return_label:\n return img, np.array(self.labels[idx]).astype('int64')\n return img,\n\n def __len__(self):\n return len(self.images)\n\n\ndef compute_acc(pred, label):\n pred = np.argmax(pred, -1)\n label = np.array(label)\n correct = pred[:, np.newaxis] == label\n return np.sum(correct) / correct.shape[0]\n\n\ndef dynamic_train(model, dataloader):\n optim = fluid.optimizer.Adam(\n learning_rate=0.001, parameter_list=model.parameters())\n model.train()\n for inputs, labels in dataloader:\n outputs = model(inputs)\n loss = CrossEntropyLoss(reduction=\"sum\")(outputs, labels)\n avg_loss = fluid.layers.reduce_sum(loss)\n avg_loss.backward()\n optim.minimize(avg_loss)\n model.clear_gradients()\n\n\ndef dynamic_evaluate(model, dataloader):\n with fluid.dygraph.no_grad():\n model.eval()\n cnt = 0\n for inputs, labels in dataloader:\n outputs = model(inputs)\n\n cnt += (np.argmax(outputs.numpy(), -1)[:, np.newaxis] ==\n labels.numpy()).astype('int').sum()\n\n return cnt / len(dataloader.dataset)\n\n\[email protected](not fluid.is_compiled_with_cuda(),\n 'CPU testing is not supported')\nclass TestModel(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n if not fluid.is_compiled_with_cuda():\n cls().skipTest('module not tested when ONLY_CPU compling')\n cls.device = paddle.set_device('gpu')\n fluid.enable_dygraph(cls.device)\n\n sp_num = 1280\n cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num)\n cls.val_dataset = MnistDataset(mode='test', sample_num=sp_num)\n cls.test_dataset = MnistDataset(\n mode='test', return_label=False, sample_num=sp_num)\n\n cls.train_loader = fluid.io.DataLoader(\n cls.train_dataset, places=cls.device, batch_size=64)\n cls.val_loader = fluid.io.DataLoader(\n cls.val_dataset, places=cls.device, batch_size=64)\n cls.test_loader = fluid.io.DataLoader(\n cls.test_dataset, places=cls.device, batch_size=64)\n\n seed = 333\n paddle.seed(seed)\n paddle.framework.random._manual_program_seed(seed)\n\n dy_lenet = LeNetDygraph()\n cls.init_param = dy_lenet.state_dict()\n dynamic_train(dy_lenet, cls.train_loader)\n\n cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader)\n\n cls.inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]\n cls.labels = [InputSpec([None, 1], 'int64', 'label')]\n\n cls.save_dir = tempfile.mkdtemp()\n cls.weight_path = os.path.join(cls.save_dir, 'lenet')\n fluid.dygraph.save_dygraph(dy_lenet.state_dict(), cls.weight_path)\n\n fluid.disable_dygraph()\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls.save_dir)\n\n def test_fit_dygraph(self):\n self.fit(True)\n\n def test_fit_static(self):\n self.fit(False)\n\n def test_fit_dynamic_with_tuple_input(self):\n self.fit_with_tuple_input(True)\n\n def test_fit_static_with_tuple_input(self):\n self.fit_with_tuple_input(False)\n\n def test_fit_dynamic_with_rank(self):\n self.fit(True, 2, 0)\n\n def test_fit_static_with_rank(self):\n self.fit(False, 2, 0)\n\n def test_fit_dynamic_with_num_iters(self):\n self.fit(True, num_iters=1)\n\n def test_fit_static_with_num_iters(self):\n self.fit(False, num_iters=1)\n\n def test_evaluate_dygraph(self):\n self.evaluate(True)\n\n def test_evaluate_static(self):\n self.evaluate(False)\n\n def test_predict_dygraph(self):\n self.predict(True)\n\n def test_predict_static(self):\n self.predict(False)\n\n def test_prepare_context(self):\n prepare_distributed_context()\n\n def fit(self, dynamic, num_replicas=None, rank=None, num_iters=None):\n fluid.enable_dygraph(self.device) if dynamic else None\n seed = 333\n paddle.seed(seed)\n paddle.framework.random._manual_program_seed(seed)\n\n net = LeNet()\n optim_new = fluid.optimizer.Adam(\n learning_rate=0.001, parameter_list=net.parameters())\n model = Model(net, inputs=self.inputs, labels=self.labels)\n model.prepare(\n optim_new,\n loss=CrossEntropyLoss(reduction=\"sum\"),\n metrics=Accuracy())\n model.fit(self.train_dataset, batch_size=64, shuffle=False)\n\n result = model.evaluate(self.val_dataset, batch_size=64)\n np.testing.assert_allclose(result['acc'], self.acc1)\n\n model.fit(self.train_dataset,\n batch_size=64,\n shuffle=False,\n num_iters=num_iters)\n\n result = model.evaluate(\n self.val_dataset, batch_size=64, num_iters=num_iters)\n\n train_sampler = DistributedBatchSampler(\n self.train_dataset,\n batch_size=64,\n shuffle=False,\n num_replicas=num_replicas,\n rank=rank)\n val_sampler = DistributedBatchSampler(\n self.val_dataset,\n batch_size=64,\n shuffle=False,\n num_replicas=num_replicas,\n rank=rank)\n\n train_loader = fluid.io.DataLoader(\n self.train_dataset,\n batch_sampler=train_sampler,\n places=self.device,\n return_list=True)\n\n val_loader = fluid.io.DataLoader(\n self.val_dataset,\n batch_sampler=val_sampler,\n places=self.device,\n return_list=True)\n\n model.fit(train_loader, val_loader)\n fluid.disable_dygraph() if dynamic else None\n\n def fit_with_tuple_input(self, dynamic, num_replicas=None, rank=None):\n fluid.enable_dygraph(self.device) if dynamic else None\n seed = 333\n paddle.seed(seed)\n paddle.framework.random._manual_program_seed(seed)\n\n net = LeNet()\n optim_new = fluid.optimizer.Adam(\n learning_rate=0.001, parameter_list=net.parameters())\n model = Model(net, inputs=tuple(self.inputs), labels=tuple(self.labels))\n model.prepare(\n optim_new,\n loss=CrossEntropyLoss(reduction=\"sum\"),\n metrics=Accuracy())\n model.fit(self.train_dataset, batch_size=64, shuffle=False)\n\n result = model.evaluate(self.val_dataset, batch_size=64)\n np.testing.assert_allclose(result['acc'], self.acc1)\n\n train_sampler = DistributedBatchSampler(\n self.train_dataset,\n batch_size=64,\n shuffle=False,\n num_replicas=num_replicas,\n rank=rank)\n val_sampler = DistributedBatchSampler(\n self.val_dataset,\n batch_size=64,\n shuffle=False,\n num_replicas=num_replicas,\n rank=rank)\n\n train_loader = fluid.io.DataLoader(\n self.train_dataset,\n batch_sampler=train_sampler,\n places=self.device,\n return_list=True)\n\n val_loader = fluid.io.DataLoader(\n self.val_dataset,\n batch_sampler=val_sampler,\n places=self.device,\n return_list=True)\n\n model.fit(train_loader, val_loader)\n fluid.disable_dygraph() if dynamic else None\n\n def evaluate(self, dynamic):\n fluid.enable_dygraph(self.device) if dynamic else None\n model = Model(LeNet(), self.inputs, self.labels)\n model.prepare(metrics=Accuracy())\n model.load(self.weight_path)\n result = model.evaluate(self.val_dataset, batch_size=64)\n np.testing.assert_allclose(result['acc'], self.acc1)\n\n sampler = DistributedBatchSampler(\n self.val_dataset, batch_size=64, shuffle=False)\n\n val_loader = fluid.io.DataLoader(\n self.val_dataset,\n batch_sampler=sampler,\n places=self.device,\n return_list=True)\n\n model.evaluate(val_loader)\n\n fluid.disable_dygraph() if dynamic else None\n\n def predict(self, dynamic):\n fluid.enable_dygraph(self.device) if dynamic else None\n model = Model(LeNet(), self.inputs)\n model.prepare()\n model.load(self.weight_path)\n output = model.predict(\n self.test_dataset, batch_size=64, stack_outputs=True)\n np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))\n\n acc = compute_acc(output[0], self.val_dataset.labels)\n np.testing.assert_allclose(acc, self.acc1)\n\n sampler = DistributedBatchSampler(\n self.test_dataset, batch_size=64, shuffle=False)\n\n test_loader = fluid.io.DataLoader(\n self.test_dataset,\n batch_sampler=sampler,\n places=self.device,\n return_list=True)\n\n model.evaluate(test_loader)\n\n fluid.disable_dygraph() if dynamic else None\n\n def test_predict_without_inputs(self):\n fluid.enable_dygraph(self.device)\n model = Model(LeNet())\n model.prepare()\n model.load(self.weight_path)\n model._inputs = None\n output = model.predict(\n self.test_dataset, batch_size=64, stack_outputs=True)\n np.testing.assert_equal(output[0].shape[0], len(self.test_dataset))\n fluid.disable_dygraph()\n\n def test_summary_gpu(self):\n paddle.disable_static(self.device)\n rnn = paddle.nn.LSTM(16, 32, 2)\n params_info = paddle.summary(\n rnn, [(-1, 23, 16), ((2, None, 32), (2, -1, 32))])\n\n\nclass MyModel(paddle.nn.Layer):\n def __init__(self):\n super(MyModel, self).__init__()\n self._fc = Linear(20, 10)\n\n def forward(self, x):\n y = self._fc(x)\n return y\n\n\nclass MyDataset(Dataset):\n def __getitem__(self, idx):\n return np.random.random(size=(20,)).astype(np.float32), \\\n np.random.randint(0, 10, size=(1,)).astype(np.int64)\n\n def __len__(self):\n return 40\n\n\nclass TestModelFunction(unittest.TestCase):\n def set_seed(self, seed=1024):\n paddle.seed(seed)\n paddle.framework.random._manual_program_seed(seed)\n\n def test_train_batch(self, dynamic=True):\n dim = 20\n data = np.random.random(size=(4, dim)).astype(np.float32)\n label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)\n\n def get_expect():\n fluid.enable_dygraph(fluid.CPUPlace())\n self.set_seed()\n m = MyModel()\n optim = fluid.optimizer.SGD(learning_rate=0.001,\n parameter_list=m.parameters())\n m.train()\n output = m(to_tensor(data))\n loss = CrossEntropyLoss(reduction='sum')(output, to_tensor(label))\n avg_loss = fluid.layers.reduce_sum(loss)\n avg_loss.backward()\n optim.minimize(avg_loss)\n m.clear_gradients()\n fluid.disable_dygraph()\n return avg_loss.numpy()\n\n ref = get_expect()\n for dynamic in [True, False]:\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device) if dynamic else None\n self.set_seed()\n\n net = MyModel()\n optim2 = fluid.optimizer.SGD(learning_rate=0.001,\n parameter_list=net.parameters())\n\n inputs = [InputSpec([None, dim], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n model = Model(net, inputs, labels)\n model.prepare(optim2, loss=CrossEntropyLoss(reduction=\"sum\"))\n loss, = model.train_batch([data], [label])\n np.testing.assert_allclose(loss.flatten(), ref.flatten())\n fluid.disable_dygraph() if dynamic else None\n\n def test_test_batch(self):\n dim = 20\n data = np.random.random(size=(4, dim)).astype(np.float32)\n\n def get_expect():\n fluid.enable_dygraph(fluid.CPUPlace())\n self.set_seed()\n m = MyModel()\n m.eval()\n output = m(to_tensor(data))\n fluid.disable_dygraph()\n return output.numpy()\n\n ref = get_expect()\n for dynamic in [True, False]:\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device) if dynamic else None\n self.set_seed()\n net = MyModel()\n inputs = [InputSpec([None, dim], 'float32', 'x')]\n model = Model(net, inputs)\n model.prepare()\n out, = model.predict_batch([data])\n\n np.testing.assert_allclose(out, ref, rtol=1e-6)\n fluid.disable_dygraph() if dynamic else None\n\n def test_save_load(self):\n path = tempfile.mkdtemp()\n for dynamic in [True, False]:\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device) if dynamic else None\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n optim = fluid.optimizer.SGD(learning_rate=0.001,\n parameter_list=net.parameters())\n model = Model(net, inputs, labels)\n model.prepare(\n optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n model.save(path + '/test')\n model.load(path + '/test')\n shutil.rmtree(path)\n fluid.disable_dygraph() if dynamic else None\n\n def test_dynamic_load(self):\n mnist_data = MnistDataset(mode='train')\n for new_optimizer in [True, False]:\n path = tempfile.mkdtemp()\n paddle.disable_static()\n net = LeNet()\n inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n if new_optimizer:\n optim = paddle.optimizer.Adam(\n learning_rate=0.001, parameters=net.parameters())\n else:\n optim = fluid.optimizer.Adam(\n learning_rate=0.001, parameter_list=net.parameters())\n model = Model(net, inputs, labels)\n model.prepare(\n optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n model.fit(mnist_data, batch_size=64, verbose=0)\n model.save(path + '/test')\n model.load(path + '/test')\n shutil.rmtree(path)\n paddle.enable_static()\n\n def test_dynamic_save_static_load(self):\n path = tempfile.mkdtemp()\n # dynamic saving\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device)\n model = Model(MyModel())\n optim = fluid.optimizer.SGD(learning_rate=0.001,\n parameter_list=model.parameters())\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n model.save(path + '/test')\n fluid.disable_dygraph()\n\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n model = Model(MyModel(), inputs, labels)\n optim = fluid.optimizer.SGD(learning_rate=0.001,\n parameter_list=model.parameters())\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n model.load(path + '/test')\n shutil.rmtree(path)\n\n def test_static_save_dynamic_load(self):\n path = tempfile.mkdtemp()\n\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n optim = fluid.optimizer.SGD(learning_rate=0.001,\n parameter_list=net.parameters())\n model = Model(net, inputs, labels)\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n model.save(path + '/test')\n\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device) #if dynamic else None\n\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n optim = fluid.optimizer.SGD(learning_rate=0.001,\n parameter_list=net.parameters())\n model = Model(net, inputs, labels)\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n model.load(path + '/test')\n shutil.rmtree(path)\n fluid.disable_dygraph()\n\n def test_parameters(self):\n for dynamic in [True, False]:\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device) if dynamic else None\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n model = Model(net, inputs)\n model.prepare()\n params = model.parameters()\n self.assertTrue(params[0].shape[0] == 20)\n self.assertTrue(params[0].shape[1] == 10)\n fluid.disable_dygraph() if dynamic else None\n\n def test_summary(self):\n def _get_param_from_state_dict(state_dict):\n params = 0\n for k, v in state_dict.items():\n params += np.prod(v.numpy().shape)\n return params\n\n for dynamic in [True, False]:\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device) if dynamic else None\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n model = Model(net, inputs)\n model.prepare()\n params_info = model.summary()\n gt_params = _get_param_from_state_dict(net.state_dict())\n\n np.testing.assert_allclose(params_info['total_params'], gt_params)\n print(params_info)\n\n model.summary(input_size=(20))\n model.summary(input_size=[(20)])\n model.summary(input_size=(20), dtype='float32')\n\n def test_summary_non_tensor(self):\n paddle.summary(ModelOutter(), input_size=(-1, 3))\n\n def test_summary_nlp(self):\n def _get_param_from_state_dict(state_dict):\n params = 0\n for k, v in state_dict.items():\n params += np.prod(v.numpy().shape)\n return params\n\n nlp_net = paddle.nn.GRU(input_size=2,\n hidden_size=3,\n num_layers=3,\n direction=\"bidirectional\")\n paddle.summary(nlp_net, (1, 1, 2))\n\n rnn = paddle.nn.LSTM(16, 32, 2)\n params_info = paddle.summary(\n rnn, [(-1, 23, 16), ((2, None, 32), (2, -1, 32))])\n gt_params = _get_param_from_state_dict(rnn.state_dict())\n np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)\n\n rnn = paddle.nn.GRU(16, 32, 2, direction='bidirectional')\n params_info = paddle.summary(rnn, (4, 23, 16))\n gt_params = _get_param_from_state_dict(rnn.state_dict())\n np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)\n\n rnn = paddle.nn.SimpleRNN(16, 32, 2, direction='bidirectional')\n params_info = paddle.summary(rnn, (4, 23, 16))\n gt_params = _get_param_from_state_dict(rnn.state_dict())\n np.testing.assert_allclose(params_info['total_params'], gt_params / 2.0)\n\n def test_summary_input(self):\n paddle.enable_static()\n mymodel = MyModel()\n input_data = paddle.rand([1, 20])\n paddle.summary(mymodel, input=input_data)\n paddle.disable_static()\n\n rnn = paddle.nn.SimpleRNN(16, 32, 2, direction='bidirectional')\n input_data = paddle.rand([4, 23, 16])\n paddle.summary(rnn, input=input_data)\n\n lenet_List_input = LeNetListInput()\n input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]\n paddle.summary(lenet_List_input, input=input_data)\n\n lenet_dict_input = LeNetDictInput()\n input_data = {\n 'x1': paddle.rand([1, 1, 28, 28]),\n 'x2': paddle.rand([1, 400])\n }\n paddle.summary(lenet_dict_input, input=input_data)\n\n def test_summary_dtype(self):\n input_shape = (3, 1)\n net = paddle.nn.Embedding(10, 3, sparse=True)\n paddle.summary(net, input_shape, dtypes='int64')\n\n def test_summary_error(self):\n with self.assertRaises(TypeError):\n nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)\n paddle.summary(nlp_net, (1, 1, '2'))\n\n with self.assertRaises(ValueError):\n nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)\n paddle.summary(nlp_net, (-1, -1))\n\n paddle.disable_static()\n nlp_net = paddle.nn.GRU(input_size=2, hidden_size=3, num_layers=3)\n paddle.summary(nlp_net, (1, 1, 2))\n\n def test_static_flops(self):\n paddle.disable_static()\n net = models.__dict__['mobilenet_v2'](pretrained=False)\n inputs = paddle.randn([1, 3, 224, 224])\n static_program = jit._trace(net, inputs=[inputs])[1]\n paddle.flops(static_program, [1, 3, 224, 224], print_detail=True)\n\n def test_dynamic_flops(self):\n net = models.__dict__['mobilenet_v2'](pretrained=False)\n\n def customize_dropout(m, x, y):\n m.total_ops += 0\n\n paddle.flops(\n net, [1, 3, 224, 224],\n custom_ops={paddle.nn.Dropout: customize_dropout},\n print_detail=True)\n\n def test_export_deploy_model(self):\n self.set_seed()\n np.random.seed(201)\n for dynamic in [True, False]:\n paddle.disable_static() if dynamic else None\n prog_translator = ProgramTranslator()\n prog_translator.enable(False) if not dynamic else None\n net = LeNet()\n inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]\n model = Model(net, inputs)\n model.prepare()\n save_dir = tempfile.mkdtemp()\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n tensor_img = np.array(\n np.random.random((1, 1, 28, 28)), dtype=np.float32)\n\n model.save(save_dir, training=False)\n ori_results = model.predict_batch(tensor_img)\n fluid.disable_dygraph() if dynamic else None\n\n place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda(\n ) else fluid.CUDAPlace(0)\n new_scope = fluid.Scope()\n with fluid.scope_guard(new_scope):\n exe = fluid.Executor(place)\n [inference_program, feed_target_names, fetch_targets] = (\n paddle.static.io.load_inference_model(\n path_prefix=save_dir, executor=exe))\n results = exe.run(inference_program,\n feed={feed_target_names[0]: tensor_img},\n fetch_list=fetch_targets)\n np.testing.assert_allclose(\n results, ori_results, rtol=1e-5, atol=1e-7)\n shutil.rmtree(save_dir)\n paddle.enable_static()\n\n def test_dygraph_export_deploy_model_about_inputs(self):\n self.set_seed()\n np.random.seed(201)\n mnist_data = MnistDataset(mode='train')\n paddle.disable_static()\n # without inputs\n for initial in [\"fit\", \"train_batch\", \"eval_batch\", \"predict_batch\"]:\n save_dir = tempfile.mkdtemp()\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n net = LeNet()\n model = Model(net)\n optim = fluid.optimizer.Adam(\n learning_rate=0.001, parameter_list=model.parameters())\n model.prepare(\n optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n if initial == \"fit\":\n model.fit(mnist_data, batch_size=64, verbose=0)\n else:\n img = np.array(\n np.random.random((1, 1, 28, 28)), dtype=np.float32)\n label = np.array(np.random.rand(1, 1), dtype=np.int64)\n if initial == \"train_batch\":\n model.train_batch([img], [label])\n elif initial == \"eval_batch\":\n model.eval_batch([img], [label])\n else:\n model.predict_batch([img])\n\n model.save(save_dir, training=False)\n shutil.rmtree(save_dir)\n # with inputs, and the type of inputs is InputSpec\n save_dir = tempfile.mkdtemp()\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n net = LeNet()\n inputs = InputSpec([None, 1, 28, 28], 'float32', 'x')\n model = Model(net, inputs)\n optim = fluid.optimizer.Adam(\n learning_rate=0.001, parameter_list=model.parameters())\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n model.save(save_dir, training=False)\n shutil.rmtree(save_dir)\n\n def test_accumulate(self, ):\n dim = 20\n data = np.random.random(size=(4, dim)).astype(np.float32)\n label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64)\n net = MyModel()\n optim = fluid.optimizer.SGD(learning_rate=0.001,\n parameter_list=net.parameters())\n inputs = [InputSpec([None, dim], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n\n for amp_cfg in [None, 'O1']:\n model = Model(net, inputs, labels)\n model.prepare(\n optim,\n loss=CrossEntropyLoss(reduction=\"sum\"),\n amp_configs=amp_cfg)\n losses, grads = [], []\n for stat in [False, False, True]:\n loss, = model.train_batch([data], [label], update=stat)\n losses.append(loss)\n grads.append([p.grad.numpy() for p in net.parameters()])\n\n for grad1, grad2, grad3 in zip(*grads):\n np.testing.assert_almost_equal(grad1 * 2, grad2, decimal=4)\n np.testing.assert_almost_equal(\n grad3, np.zeros_like(grad3), decimal=4)\n\n np.testing.assert_almost_equal(losses[0], losses[1], decimal=4)\n np.testing.assert_almost_equal(losses[0], losses[2], decimal=4)\n\n\nclass TestModelWithLRScheduler(unittest.TestCase):\n def test_fit_by_step(self):\n base_lr = 1e-3\n boundaries = [5, 8]\n\n def make_optimizer(parameters=None):\n momentum = 0.9\n weight_decay = 5e-4\n values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]\n learning_rate = paddle.optimizer.lr.PiecewiseDecay(\n boundaries=boundaries, values=values)\n learning_rate = paddle.optimizer.lr.LinearWarmup(\n learning_rate=learning_rate,\n warmup_steps=4,\n start_lr=base_lr / 5.,\n end_lr=base_lr,\n verbose=True)\n optimizer = paddle.optimizer.Momentum(\n learning_rate=learning_rate,\n weight_decay=weight_decay,\n momentum=momentum,\n parameters=parameters)\n return optimizer\n\n # dynamic test\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device)\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n optim = make_optimizer(net.parameters())\n model = Model(net, inputs, labels)\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n\n dataset = MyDataset()\n model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)\n\n np.testing.assert_allclose(model._optimizer._learning_rate.last_lr,\n base_lr * (0.1**len(boundaries)))\n # static test\n paddle.enable_static()\n\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n optim = make_optimizer(net.parameters())\n model = Model(net, inputs, labels)\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n\n dataset = MyDataset()\n model.fit(dataset, dataset, batch_size=4, epochs=10, num_workers=0)\n\n np.testing.assert_allclose(model._optimizer._learning_rate.last_lr,\n base_lr * (0.1**len(boundaries)))\n\n def test_fit_by_epoch(self):\n base_lr = 1e-3\n boundaries = [5, 8]\n epochs = 10\n wamup_epochs = 4\n\n def make_optimizer(parameters=None):\n momentum = 0.9\n weight_decay = 5e-4\n values = [base_lr * (0.1**i) for i in range(len(boundaries) + 1)]\n learning_rate = paddle.optimizer.lr.PiecewiseDecay(\n boundaries=boundaries, values=values)\n learning_rate = paddle.optimizer.lr.LinearWarmup(\n learning_rate=learning_rate,\n warmup_steps=wamup_epochs,\n start_lr=base_lr / 5.,\n end_lr=base_lr,\n verbose=True)\n optimizer = paddle.optimizer.Momentum(\n learning_rate=learning_rate,\n weight_decay=weight_decay,\n momentum=momentum,\n parameters=parameters)\n return optimizer\n\n # dynamic test\n device = paddle.set_device('cpu')\n fluid.enable_dygraph(device)\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n optim = make_optimizer(net.parameters())\n model = Model(net, inputs, labels)\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n\n dataset = MyDataset()\n\n lr_scheduler_callback = paddle.callbacks.LRScheduler(\n by_step=False, by_epoch=True)\n\n model.fit(dataset,\n dataset,\n batch_size=4,\n epochs=epochs,\n num_workers=0,\n callbacks=lr_scheduler_callback)\n\n cnt = 0\n for b in boundaries:\n if b + wamup_epochs <= epochs:\n cnt += 1\n\n np.testing.assert_allclose(model._optimizer._learning_rate.last_lr,\n base_lr * (0.1**cnt))\n # static test\n paddle.enable_static()\n\n net = MyModel()\n inputs = [InputSpec([None, 20], 'float32', 'x')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n optim = make_optimizer(net.parameters())\n model = Model(net, inputs, labels)\n model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction=\"sum\"))\n\n dataset = MyDataset()\n\n lr_scheduler_callback = paddle.callbacks.LRScheduler(\n by_step=False, by_epoch=True)\n\n model.fit(dataset,\n dataset,\n batch_size=4,\n epochs=epochs,\n num_workers=0,\n callbacks=lr_scheduler_callback)\n\n cnt = 0\n for b in boundaries:\n if b + wamup_epochs <= epochs:\n cnt += 1\n\n np.testing.assert_allclose(model._optimizer._learning_rate.last_lr,\n base_lr * (0.1**cnt))\n\n\nclass TestRaiseError(unittest.TestCase):\n def test_input_without_name(self):\n net = MyModel()\n inputs = [InputSpec([None, 10], 'float32')]\n labels = [InputSpec([None, 1], 'int64', 'label')]\n with self.assertRaises(ValueError):\n model = Model(net, inputs, labels)\n\n def test_static_without_inputs(self):\n paddle.enable_static()\n net = MyModel()\n with self.assertRaises(TypeError):\n model = Model(net)\n\n def test_save_infer_model_without_inputs_and_run_in_dygraph(self):\n paddle.disable_static()\n net = MyModel()\n save_dir = tempfile.mkdtemp()\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n with self.assertRaises(RuntimeError):\n model = Model(net)\n model.save(save_dir, training=False)\n paddle.enable_static()\n\n def test_save_infer_model_without_file_prefix(self):\n paddle.enable_static()\n net = LeNet()\n inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]\n model = Model(net, inputs)\n model.prepare()\n path = \"\"\n tensor_img = np.array(\n np.random.random((1, 1, 28, 28)), dtype=np.float32)\n with self.assertRaises(ValueError):\n model.save(path, training=False)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros_like", "numpy.random.rand", "numpy.reshape", "numpy.random.seed", "numpy.sum", "numpy.testing.assert_almost_equal", "numpy.argmax", "numpy.random.randint", "numpy.random.random" ] ]
EmileDvs/pyleecan
[ "1faedde4b24acc6361fa1fdd4e980eaec4ca3a62" ]
[ "Tests/Plot/test_PostPlot.py" ]
[ "from os.path import join\n\nimport pytest\n\nfrom numpy import exp, sqrt, pi\n\nimport matplotlib.pyplot as plt\n\nfrom pyleecan.Classes.Simu1 import Simu1\n\nfrom pyleecan.Classes.InputCurrent import InputCurrent\n\nfrom pyleecan.Classes.MagFEMM import MagFEMM\nfrom pyleecan.Classes.ForceMT import ForceMT\n\nfrom pyleecan.Classes.PostPlot import PostPlot\n\nfrom pyleecan.Functions.load import load\nfrom pyleecan.Functions.Plot import dict_2D\nfrom pyleecan.definitions import DATA_DIR\n\n\[email protected]_5s\[email protected]\[email protected]\[email protected]\[email protected]\[email protected]\ndef test_PostPlot():\n \"\"\"Validation of the PostPlot class to plot airgap flux automatically as postprocessing at the end of the simulation\"\"\"\n\n Toyota_Prius = load(join(DATA_DIR, \"Machine\", \"Toyota_Prius.json\"))\n\n simu = Simu1(name=\"test_PostPlot\", machine=Toyota_Prius)\n\n # Definition of the enforced output of the electrical module\n I0_rms = 250 / sqrt(2)\n Phi0 = 140 * pi / 180 # Maximum Torque Per Amp\n\n Id_ref = (I0_rms * exp(1j * Phi0)).real\n Iq_ref = (I0_rms * exp(1j * Phi0)).imag\n\n simu.input = InputCurrent(\n Id_ref=Id_ref,\n Iq_ref=Iq_ref,\n Na_tot=252 * 8,\n Nt_tot=20 * 8,\n N0=1000,\n )\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(is_periodicity_a=True, is_periodicity_t=True, nb_worker=4)\n simu.force = ForceMT(is_periodicity_a=True, is_periodicity_t=True)\n\n # Plot radial and tangential flux densities over angle as an automated PostProc\n # and save the picture\n fig1, axes1 = plt.subplots(2, 1)\n fig2, axes2 = plt.subplots(1, 2)\n\n plot_B_rad_tan_space1 = PostPlot(\n method=\"plot_2D_Data\",\n quantity=\"mag.B\",\n param_list=[\"angle\"],\n param_dict=dict(\n {\n \"component_list\": [\"radial\"],\n \"is_show_fig\": False,\n \"save_path\": None,\n \"fig\": fig1,\n \"ax\": axes1[0],\n },\n **dict_2D\n ),\n )\n\n plot_B_rad_tan_space2 = PostPlot(\n method=\"plot_2D_Data\",\n quantity=\"mag.B\",\n param_list=[\"angle\"],\n param_dict=dict(\n {\n \"component_list\": [\"tangential\"],\n \"is_show_fig\": False,\n \"fig\": fig1,\n \"ax\": axes1[1],\n },\n **dict_2D\n ),\n name=\"plot_B_rad_tan_space\",\n save_format=\"png\",\n )\n\n plot_machine_Tem_time1 = PostPlot(\n method=\"plot\",\n quantity=\"simu.machine\",\n param_dict={\n \"is_show_fig\": False,\n \"save_path\": None,\n \"fig\": fig2,\n \"ax\": axes2[0],\n },\n )\n\n plot_machine_Tem_time2 = PostPlot(\n method=\"plot_2D_Data\",\n quantity=\"mag.Tem\",\n param_list=[\"time\"],\n param_dict=dict(\n {\n \"is_show_fig\": False,\n \"fig\": fig2,\n \"ax\": axes2[1],\n },\n **dict_2D\n ),\n name=\"plot_machine_Tem_time\",\n save_format=\"png\",\n )\n\n plot_P_radial_space_svg = PostPlot(\n method=\"plot_2D_Data\",\n quantity=\"force.AGSF\",\n param_list=[\"angle\"],\n param_dict=dict(\n {\n \"component_list\": [\"radial\"],\n \"is_show_fig\": False,\n },\n **dict_2D\n ),\n name=\"plot_P_radial_space\",\n save_format=\"svg\",\n )\n\n plot_Is = PostPlot(\n method=\"plot_2D_Data\",\n quantity=\"elec.get_Is\",\n param_list=[\"time\", \"phase\"],\n param_dict=dict(\n {\n \"is_show_fig\": False,\n },\n **dict_2D\n ),\n name=\"plot_Is\",\n save_format=\"png\",\n )\n\n simu.postproc_list = [\n plot_B_rad_tan_space1,\n plot_B_rad_tan_space2,\n plot_machine_Tem_time1,\n plot_machine_Tem_time2,\n plot_P_radial_space_svg,\n plot_Is,\n ]\n\n # Run simulations\n out = simu.run()\n\n return out\n\n\n# To run it without pytest\nif __name__ == \"__main__\":\n\n out = test_PostPlot()\n" ]
[ [ "numpy.sqrt", "numpy.exp", "matplotlib.pyplot.subplots" ] ]
loveisacat/pymarl
[ "383af616cbf5b1c878b62089baf15231a29f9a82" ]
[ "src/run.py" ]
[ "import datetime\nimport os\nimport pprint\nimport time\nimport threading\nimport torch as th\nfrom types import SimpleNamespace as SN\nfrom utils.logging import Logger\nfrom utils.timehelper import time_left, time_str\nfrom os.path import dirname, abspath\n\nfrom learners import REGISTRY as le_REGISTRY\nfrom runners import REGISTRY as r_REGISTRY\nfrom controllers import REGISTRY as mac_REGISTRY\nfrom components.episode_buffer import ReplayBuffer\nfrom components.transforms import OneHot\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n\ndef run(_run, _config, _log):\n\n # check args sanity\n _config = args_sanity_check(_config, _log)\n\n args = SN(**_config)\n args.device = \"cuda\" if args.use_cuda else \"cpu\"\n\n # setup loggers\n logger = Logger(_log)\n\n _log.info(\"Experiment Parameters:\")\n experiment_params = pprint.pformat(_config,\n indent=4,\n width=1)\n _log.info(\"\\n\\n\" + experiment_params + \"\\n\")\n\n # configure tensorboard logger\n unique_token = \"{}__{}\".format(args.name, datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"))\n args.unique_token = unique_token\n if args.use_tensorboard:\n tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), \"results\", \"tb_logs\")\n tb_exp_direc = os.path.join(tb_logs_direc, \"{}\").format(unique_token)\n logger.setup_tb(tb_exp_direc)\n\n # sacred is on by default\n logger.setup_sacred(_run)\n\n # Run and train\n run_sequential(args=args, logger=logger)\n\n # Clean up after finishing\n print(\"Exiting Main\")\n\n print(\"Stopping all threads\")\n for t in threading.enumerate():\n if t.name != \"MainThread\":\n print(\"Thread {} is alive! Is daemon: {}\".format(t.name, t.daemon))\n t.join(timeout=1)\n print(\"Thread joined\")\n\n print(\"Exiting script\")\n\n # Making sure framework really exits\n os._exit(os.EX_OK)\n\n\ndef evaluate_sequential(args, runner):\n\n for _ in range(args.test_nepisode):\n runner.run(test_mode=True)\n\n if args.save_replay:\n runner.save_replay()\n\n runner.close_env()\n\ndef run_sequential(args, logger):\n\n # Init runner so we can get env info\n runner = r_REGISTRY[args.runner](args=args, logger=logger)\n\n # Set up schemes and groups here\n env_info = runner.get_env_info()\n args.n_agents = env_info[\"n_agents\"]\n #args.n_actions = env_info[\"n_actions\"]\n #we only need direction actions: east,west,north,south,op,stop\n args.n_actions = 6\n args.state_shape = env_info[\"state_shape\"]\n\n # Default/Base scheme\n scheme = {\n \"state\": {\"vshape\": env_info[\"state_shape\"]},\n \"obs\": {\"vshape\": env_info[\"obs_shape\"], \"group\": \"agents\"},\n \"actions\": {\"vshape\": (1,), \"group\": \"agents\", \"dtype\": th.long},\n \"avail_actions\": {\"vshape\": (args.n_actions,), \"group\": \"agents\", \"dtype\": th.int},\n #\"avail_actions\": {\"vshape\": (env_info[\"n_actions\"],), \"group\": \"agents\", \"dtype\": th.int},\n \"reward\": {\"vshape\": (1,)},\n \"terminated\": {\"vshape\": (1,), \"dtype\": th.uint8},\n }\n groups = {\n \"agents\": args.n_agents\n }\n preprocess = {\n \"actions\": (\"actions_onehot\", [OneHot(out_dim=args.n_actions)])\n }\n\n buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info[\"episode_limit\"] + 1,\n preprocess=preprocess,\n device=\"cpu\" if args.buffer_cpu_only else args.device)\n\n # Setup multiagent controller here\n mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)\n\n # Give runner the scheme\n runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)\n\n # Learner\n learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)\n\n if args.use_cuda:\n learner.cuda()\n\n if args.checkpoint_path != \"\":\n\n timesteps = []\n timestep_to_load = 0\n\n if not os.path.isdir(args.checkpoint_path):\n logger.console_logger.info(\"Checkpoint directiory {} doesn't exist\".format(args.checkpoint_path))\n return\n\n # Go through all files in args.checkpoint_path\n for name in os.listdir(args.checkpoint_path):\n full_name = os.path.join(args.checkpoint_path, name)\n # Check if they are dirs the names of which are numbers\n if os.path.isdir(full_name) and name.isdigit():\n timesteps.append(int(name))\n\n if args.load_step == 0:\n # choose the max timestep\n timestep_to_load = max(timesteps)\n else:\n # choose the timestep closest to load_step\n timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))\n\n model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))\n\n logger.console_logger.info(\"Loading model from {}\".format(model_path))\n learner.load_models(model_path)\n #runner.t_env = timestep_to_load\n runner.t_env = 0\n\n if args.evaluate or args.save_replay:\n evaluate_sequential(args, runner)\n return\n\n # start training\n episode = 0\n last_test_T = -args.test_interval - 1\n last_log_T = 0\n model_save_time = 0\n\n start_time = time.time()\n last_time = start_time\n\n logger.console_logger.info(\"Beginning training for {} timesteps\".format(args.t_max))\n\n while runner.t_env <= args.t_max:\n\n # Run for a whole episode at a time\n episode_batch = runner.run(test_mode=False)\n buffer.insert_episode_batch(episode_batch)\n\n if buffer.can_sample(args.batch_size):\n episode_sample = buffer.sample(args.batch_size)\n\n # Truncate batch to only filled timesteps\n max_ep_t = episode_sample.max_t_filled()\n episode_sample = episode_sample[:, :max_ep_t]\n\n if episode_sample.device != args.device:\n episode_sample.to(args.device)\n\n learner.train(episode_sample, runner.t_env, episode)\n\n # Execute test runs once in a while\n n_test_runs = max(1, args.test_nepisode // runner.batch_size)\n if (runner.t_env - last_test_T) / args.test_interval >= 1.0:\n\n logger.console_logger.info(\"t_env: {} / {}\".format(runner.t_env, args.t_max))\n logger.console_logger.info(\"Estimated time left: {}. Time passed: {}\".format(\n time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))\n last_time = time.time()\n\n last_test_T = runner.t_env\n for _ in range(n_test_runs):\n runner.run(test_mode=True)\n\n if args.save_model and (runner.t_env - model_save_time >= args.save_model_interval or model_save_time == 0):\n model_save_time = runner.t_env\n save_path = os.path.join(args.local_results_path, \"models\", args.unique_token, str(runner.t_env))\n #\"results/models/{}\".format(unique_token)\n os.makedirs(save_path, exist_ok=True)\n logger.console_logger.info(\"Saving models to {}\".format(save_path))\n\n # learner should handle saving/loading -- delegate actor save/load to mac,\n # use appropriate filenames to do critics, optimizer states\n learner.save_models(save_path)\n\n episode += args.batch_size_run\n\n if (runner.t_env - last_log_T) >= args.log_interval:\n logger.log_stat(\"episode\", episode, runner.t_env)\n logger.print_recent_stats()\n last_log_T = runner.t_env\n\n runner.close_env()\n logger.console_logger.info(\"Finished Training\")\n\n\ndef args_sanity_check(config, _log):\n\n # set CUDA flags\n # config[\"use_cuda\"] = True # Use cuda whenever possible!\n if config[\"use_cuda\"] and not th.cuda.is_available():\n config[\"use_cuda\"] = False\n _log.warning(\"CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!\")\n\n if config[\"test_nepisode\"] < config[\"batch_size_run\"]:\n config[\"test_nepisode\"] = config[\"batch_size_run\"]\n else:\n config[\"test_nepisode\"] = (config[\"test_nepisode\"]//config[\"batch_size_run\"]) * config[\"batch_size_run\"]\n\n return config\n" ]
[ [ "torch.cuda.is_available" ] ]
CSUBioGroup/SDLDA-
[ "ceb8fc1713b7eb90b858cec6b9418ff76524842d" ]
[ "LOOCV/data_input_cv.py" ]
[ "# encoding=utf-8\r\nimport random\r\nimport pickle\r\nimport numpy as np\r\nfrom hyperparams import Hyperparams as params\r\n\r\nrandom.seed(params.static_random_seed)\r\nneg_pos_ratio = params.neg_pos_ratio\r\ntrain_val_ratio = params.train_val_ratio\r\n\r\n\r\nclass DataLoader:\r\n def __init__(self, use_side_info=False):\r\n with open('../data_processing/data.pkl', 'rb') as file:\r\n pos_set, neg_set = pickle.load(file)\r\n with open('../data_processing/matrix.npy', 'rb') as file:\r\n matrix = np.load(file)\r\n\r\n if use_side_info:\r\n self.u_feature = np.load('../data_processing/u_feature.npy')\r\n self.v_feature = np.load('../data_processing/v_feature.npy')\r\n\r\n self.matrix = matrix\r\n self.pos_set = pos_set\r\n self.neg_set = neg_set\r\n self.pos_size = len(pos_set)\r\n self.train_set = self.pos_set + self.neg_set # initializer\r\n\r\n def coor_to_sample(self, batch, use_sise_info=False):\r\n XL_batch = []\r\n XR_batch = []\r\n Y_batch = []\r\n for i, j, l in batch:\r\n temp = self.matrix[i][j]\r\n self.matrix[i][j] = 0\r\n XL_batch.append(self.matrix[i])\r\n XR_batch.append(self.matrix[:, j])\r\n self.matrix[i][j] = temp\r\n Y_batch.append(l)\r\n XL_batch = np.array(XL_batch)\r\n XR_batch = np.array(XR_batch)\r\n Y_batch = np.array(Y_batch).reshape((-1, 1))\r\n\r\n if use_sise_info is not True:\r\n return XL_batch, XR_batch, Y_batch\r\n else:\r\n u_feature_batch = []\r\n v_feature_batch = []\r\n for i, j, l in batch:\r\n u_feature_batch.append(self.u_feature[i])\r\n v_feature_batch.append(self.v_feature[j])\r\n U_batch = np.stack(u_feature_batch, axis=0)\r\n V_batch = np.stack(v_feature_batch, axis=0)\r\n return XL_batch, U_batch, XR_batch, V_batch, Y_batch\r\n\r\n def shuffle(self):\r\n random.shuffle(self.train_set)\r\n\r\n def leave_one_out(self, id):\r\n assert id >= 0 and id <= len(self.pos_set)\r\n\r\n neg_size = (self.pos_size - 1) * neg_pos_ratio\r\n neg_set = self.neg_set\r\n random.shuffle(neg_set)\r\n neg_set = neg_set[:neg_size]\r\n\r\n train_set = neg_set + self.pos_set[:id] + self.pos_set[id:]\r\n self.train_set = train_set\r\n self.train_size = len(train_set)\r\n self.val_set = [self.pos_set[id]]\r\n self.val_size = 1\r\n\r\n def sample_a_col(self, col_id):\r\n cols = []\r\n for i, x in enumerate(self.matrix[:, col_id]):\r\n cols.append((i, col_id, x))\r\n return cols\r\n\r\n\r\nif __name__ == '__main__':\r\n dl = DataLoader()\r\n # print(dl.train_set)\r\n dl.shuffle()\r\n" ]
[ [ "numpy.array", "numpy.stack", "numpy.load" ] ]
VolkerH/LLSpy
[ "d14b2387058f679981ff08af546570527bc723d9" ]
[ "llspy/otf.py" ]
[ "from .exceptions import OTFError\nfrom .util import load_lib\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport re\nimport ctypes\nimport os\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ntry:\n import pathlib as plib\n\n plib.Path()\nexcept (ImportError, AttributeError):\n import pathlib2 as plib\nexcept (ImportError, AttributeError):\n raise ImportError(\"no pathlib detected. For python2: pip install pathlib2\")\n\n\notflib = load_lib(\"libradialft\")\n\nif not otflib:\n logger.error(\"Could not load libradialft!\")\nelse:\n try:\n shared_makeotf = otflib.makeOTF\n shared_makeotf.restype = ctypes.c_int\n shared_makeotf.argtypes = [\n ctypes.c_char_p,\n ctypes.c_char_p,\n ctypes.c_int,\n ctypes.c_float,\n ctypes.c_int,\n ctypes.c_bool,\n ctypes.c_float,\n ctypes.c_float,\n ctypes.c_float,\n ctypes.c_float,\n ctypes.c_int,\n ctypes.c_bool,\n ]\n except AttributeError as e:\n logger.warn(\"Failed to properly import libradialft\")\n logger.error(e)\n\n\ndef requireOTFlib(func, *args, **kwargs):\n def dec(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if not otflib:\n raise Exception(\n \"Could not find libradialft library! OTF generation \"\n \"will not be available:\"\n )\n else:\n raise e\n\n return dec\n\n\n@requireOTFlib\ndef makeotf(\n psf,\n otf=None,\n lambdanm=520,\n dz=0.102,\n fixorigin=10,\n bUserBackground=False,\n background=90,\n NA=1.25,\n NIMM=1.3,\n dr=0.102,\n krmax=0,\n bDoCleanup=False,\n):\n # krmax => \"pixels outside this limit will be zeroed (overwriting estimated value from NA and NIMM)\")\n if otf is None:\n otf = psf.replace(\".tif\", \"_otf.tif\")\n shared_makeotf(\n str.encode(psf),\n str.encode(otf),\n lambdanm,\n dz,\n fixorigin,\n bUserBackground,\n background,\n NA,\n NIMM,\n dr,\n krmax,\n bDoCleanup,\n )\n return otf\n\n\n# example: 20160825_488_totPSF_mb_0p5-0p42.tif\n\npsffile_pattern = re.compile(\n r\"\"\"\n ^(?P<date>\\d{6}|\\d{8}) # 6 or 8 digit date\n _(?P<wave>\\d+) # wavelength ... only digits following _ are used\n _(?P<slmpattern>[a-zA-Z_]+) # slm pattern\n _(?P<outerNA>[0-9p.]+) # outer NA, digits with . or p for decimal\n [-_](?P<innerNA>[0-9p.]+) # inter NA, digits with . or p for decimal\n (?P<isotf>_otf)?.tif$\"\"\", # optional _otf to specify that it is already an otf\n re.VERBOSE,\n)\n\n\ndefault_otf_pattern = re.compile(\n r\"\"\"\n ^(?P<wave>\\d{3})\n (?P<isotf>_otf)?\n (?P<ispsf>_psf)?.tif$\"\"\",\n re.VERBOSE,\n)\n\n\ndef dir_has_otfs(dirname):\n if os.path.isdir(str(dirname)):\n if any(\n [\n (psffile_pattern.search(t) or default_otf_pattern.search(t))\n for t in os.listdir(dirname)\n ]\n ):\n return True\n return False\n\n\ndef get_otf_dict(otfdir):\n \"\"\" The otf_dict is a dict with\n \"\"\"\n otf_dict = {}\n otfdir = plib.Path(otfdir)\n\n for t in otfdir.glob(\"*tif\"):\n M = psffile_pattern.search(str(t.name))\n if M:\n M = M.groupdict()\n wave = int(M[\"wave\"])\n if wave not in otf_dict:\n otf_dict[wave] = {\"default\": None}\n mask = (\n float(M[\"innerNA\"].replace(\"p\", \".\")),\n float(M[\"outerNA\"].replace(\"p\", \".\")),\n )\n if mask not in otf_dict[wave]:\n otf_dict[wave][mask] = []\n if not M[\"isotf\"]:\n matching_otf = otfdir.joinpath(t.name.replace(\".tif\", \"_otf.tif\"))\n if not matching_otf.is_file():\n matching_otf = None\n else:\n matching_otf = matching_otf\n else:\n matching_otf = None\n otf_dict[wave][mask].append(\n {\n \"date\": datetime.strptime(M[\"date\"], \"%Y%m%d\"),\n \"path\": str(t),\n \"form\": \"otf\" if M[\"isotf\"] else \"psf\",\n \"slm\": M[\"slmpattern\"],\n \"otf\": str(matching_otf),\n }\n )\n else:\n M = default_otf_pattern.search(str(t.name))\n if M:\n M = M.groupdict()\n wave = int(M[\"wave\"])\n if wave not in otf_dict:\n otf_dict[wave] = {}\n if not M[\"isotf\"]:\n newname = str(t).replace(\".tif\", \"_otf.tif\")\n if M[\"ispsf\"]:\n newname.replace('_psf', '')\n if not os.path.exists(newname):\n otf = makeotf(str(t), newname, lambdanm=int(wave), bDoCleanup=False)\n t = str(otf)\n otf_dict[wave][\"default\"] = str(t)\n for wave in otf_dict.keys():\n logger.debug(\"OTFdict wave: {}, masks: {}\".format(wave, otf_dict[wave].keys()))\n return otf_dict\n\n\ndef get_default_otf(wave, otfpath, approximate=True):\n origwave = wave\n otf_dict = get_otf_dict(otfpath)\n waves_with_defaults = [k for k, v in otf_dict.items() if v[\"default\"] is not None]\n if wave not in waves_with_defaults:\n if approximate:\n for newwave in range(wave - 8, wave + 9):\n if newwave in waves_with_defaults:\n wave = newwave\n if wave in otf_dict:\n return otf_dict[wave][\"default\"]\n else:\n raise OTFError(\"No default OTF found for wavelength {}\".format(origwave))\n\n\ndef choose_otf(\n wave, otfpath, date=None, mask=None, direction=\"nearest\", approximate=True\n):\n \"\"\"return otf with date closest to requested date.\n if OTF doesn't exist, but PSF does, generate OTF and return the path.i\n direction can be {'nearest', 'before', 'after'}, where 'before' returns an\n OTF that was collected before 'date' and 'after' returns one that was\n collected after 'date.'\n \"\"\"\n if not dir_has_otfs(otfpath):\n raise OTFError(\"Not a valid OTF path: {}\".format(otfpath))\n if not date:\n date = datetime.now()\n\n otf_dict = get_otf_dict(otfpath)\n otflist = []\n\n # if the exact wavelenght is not matched, look for similar wavelengths...\n if wave not in otf_dict:\n if approximate:\n for newwave in range(wave - 8, wave + 9):\n if newwave in otf_dict:\n wave = newwave\n break\n else:\n return None\n if wave not in otf_dict:\n return None\n\n # if the mask has been provided, use the OTFs from that mask\n if mask is not None and mask in otf_dict[wave]:\n otflist = otf_dict[wave][mask]\n\n # if still empty, just return the default\n if not len(otflist):\n return get_default_otf(wave, otfpath, approximate)\n\n if direction == \"nearest\":\n minIdx = np.argmin([np.abs(i[\"date\"] - date) for i in otflist])\n elif direction == \"before\":\n deltas = [date - i[\"date\"] for i in otflist]\n test = [d > timedelta(minutes=0) for d in deltas]\n minIdx = next((obj for obj in test if obj), None)\n elif direction == \"after\":\n deltas = [i[\"date\"] - date for i in otflist]\n test = [d > timedelta(minutes=0) for d in deltas]\n minIdx = next((obj for obj in test if obj), None)\n else:\n raise ValueError(\"Unkown direction argument: {}\".format(direction))\n\n if minIdx is None:\n return get_default_otf(wave, otfpath, approximate)\n\n matching_otfs = [\n i\n for i in otflist\n if i[\"date\"] == otflist[minIdx][\"date\"] and i[\"form\"] == \"otf\"\n ]\n if len(matching_otfs):\n return matching_otfs[0][\"path\"]\n else:\n matching_psfs = [\n i\n for i in otflist\n if i[\"date\"] == otflist[minIdx][\"date\"] and i[\"form\"] == \"psf\"\n ]\n if matching_psfs:\n # generate new OTF from PSF\n return makeotf(\n matching_psfs[0][\"path\"], lambdanm=int(wave), bDoCleanup=False\n )\n\n return get_default_otf(wave, otfpath, approximate)\n" ]
[ [ "numpy.abs" ] ]
qqaatw/pytorch
[ "44764f131b040a41a6dcf1304bb635c574bf5a3b", "44764f131b040a41a6dcf1304bb635c574bf5a3b" ]
[ "test/test_cuda.py", "test/quantization/core/test_quantized_tensor.py" ]
[ "# Owner(s): [\"module: cuda\"]\n\nfrom itertools import repeat, chain, product\nfrom typing import NamedTuple\nimport collections\nimport contextlib\nimport ctypes\nimport gc\nimport io\nimport os\nimport pickle\nimport queue\nimport sys\nimport tempfile\nimport threading\nimport unittest\n\nimport torch\nimport torch.cuda\nimport torch.cuda.comm as comm\nfrom torch.nn.parallel import scatter_gather\nfrom torch.utils.checkpoint import checkpoint_sequential\nfrom torch._six import inf, nan\nfrom torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \\\n _compare_trilu_indices, _compare_large_trilu_indices\nfrom torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \\\n NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \\\n slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \\\n get_cycles_per_ms\nfrom torch.testing._internal.autocast_test_lists import AutocastTestLists\n\n# load_tests from common_utils is used to automatically filter tests for\n# sharding on sandcastle. This line silences flake warnings\nload_tests = load_tests\n\n# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,\n# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed\n# multiple times as well during the execution of this test suite, and it will\n# cause CUDA OOM error on Windows.\nTEST_CUDA = torch.cuda.is_available()\nTEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2\n\nif not TEST_CUDA:\n print('CUDA not available, skipping tests', file=sys.stderr)\n TestCase = object # noqa: F811\n\nTEST_LARGE_TENSOR = TEST_CUDA\nTEST_MEDIUM_TENSOR = TEST_CUDA\nTEST_CUDNN = TEST_CUDA\nTEST_BF16 = False\nif TEST_CUDA:\n torch.ones(1).cuda() # initialize cuda context\n TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or\n torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))\n TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9\n TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9\n TEST_BF16 = torch.cuda.is_bf16_supported()\n\n\ndef make_sparse_tensor(t, n, *sizes):\n assert t.is_sparse\n tensor = t()\n i = tensor._indices()\n i = i.new(len(sizes), n).copy_(\n torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))\n v = tensor._values()\n v = v.new(n).copy_(torch.randn(n))\n return t(i, v, torch.Size(sizes)).coalesce()\n\n_cycles_per_ms = None\n\n\nclass TestCuda(TestCase):\n _do_cuda_memory_leak_check = True\n _do_cuda_non_default_stream = True\n FIFTY_MIL_CYCLES = 50000000\n\n def setUp(self):\n super(TestCuda, self).setUp()\n self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))\n\n def tearDown(self):\n del self.autocast_lists\n super(TestCuda, self).tearDown()\n\n def _check_memory_stat_consistency(self):\n snapshot = torch.cuda.memory_snapshot()\n\n expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))\n\n for segment in snapshot:\n expected = expected_each_device[segment[\"device\"]]\n pool_str = segment[\"segment_type\"] + \"_pool\"\n\n expected[\"segment.all.current\"] += 1\n expected[\"segment.\" + pool_str + \".current\"] += 1\n\n expected[\"allocated_bytes.all.current\"] += segment[\"allocated_size\"]\n expected[\"allocated_bytes.\" + pool_str + \".current\"] += segment[\"allocated_size\"]\n\n expected[\"reserved_bytes.all.current\"] += segment[\"total_size\"]\n expected[\"reserved_bytes.\" + pool_str + \".current\"] += segment[\"total_size\"]\n\n expected[\"active_bytes.all.current\"] += segment[\"active_size\"]\n expected[\"active_bytes.\" + pool_str + \".current\"] += segment[\"active_size\"]\n\n is_split = len(segment[\"blocks\"]) > 1\n for block in segment[\"blocks\"]:\n if block[\"state\"] == \"active_allocated\":\n expected[\"allocation.all.current\"] += 1\n expected[\"allocation.\" + pool_str + \".current\"] += 1\n\n if block[\"state\"].startswith(\"active_\"):\n expected[\"active.all.current\"] += 1\n expected[\"active.\" + pool_str + \".current\"] += 1\n\n if block[\"state\"] == \"inactive\" and is_split:\n expected[\"inactive_split.all.current\"] += 1\n expected[\"inactive_split.\" + pool_str + \".current\"] += 1\n expected[\"inactive_split_bytes.all.current\"] += block[\"size\"]\n expected[\"inactive_split_bytes.\" + pool_str + \".current\"] += block[\"size\"]\n\n for device, expected in expected_each_device.items():\n stats = torch.cuda.memory_stats(device)\n for k, v in expected.items():\n self.assertEqual(v, stats[k])\n\n @staticmethod\n def _test_memory_stats_generator(self, device=None, N=35):\n if device is None:\n device = torch.cuda.current_device()\n\n m0 = torch.cuda.memory_allocated(device)\n last_m_arr = [torch.cuda.memory_allocated(device)]\n max_m_arr = [torch.cuda.max_memory_allocated(device)]\n last_r_arr = [torch.cuda.memory_reserved(device)]\n max_r_arr = [torch.cuda.max_memory_reserved(device)]\n\n def alloc(*size):\n with torch.cuda.device(device):\n # NOTE: do **not** use methods that can have additional\n # memory overhead, e.g., inplace random sampling methods.\n # they can leave some memory occupied even after being\n # deallocated, e.g., initialized RNG state, causing some\n # memory checks below to fail.\n return torch.cuda.FloatTensor(*size)\n\n def assert_change(comp=1, empty_cache=False, reset_peak=False):\n # comp > 0: increased\n # comp = 0: equal\n # comp < 0: decreased\n new_m = torch.cuda.memory_allocated(device)\n new_max_m = torch.cuda.max_memory_allocated(device)\n if comp > 0:\n self.assertGreater(new_m, last_m_arr[0])\n elif comp < 0:\n self.assertLess(new_m, last_m_arr[0])\n else:\n self.assertEqual(new_m, last_m_arr[0])\n self.assertLessEqual(new_m, new_max_m)\n self.assertGreaterEqual(new_max_m, max_m_arr[0])\n last_m_arr[0] = new_m\n max_m_arr[0] = new_max_m\n\n new_r = torch.cuda.memory_reserved(device)\n new_max_r = torch.cuda.max_memory_reserved(device)\n # emptying cache may happen (due to allocation or empty_cache), so\n # we can't assert new_c >= last_c\n self.assertLessEqual(new_r, new_max_r)\n self.assertGreaterEqual(new_max_r, max_r_arr[0])\n last_r_arr[0] = new_r\n max_r_arr[0] = new_max_r\n\n if empty_cache:\n torch.cuda.empty_cache()\n new_r = torch.cuda.memory_reserved(device)\n new_max_r = torch.cuda.max_memory_reserved(device)\n self.assertLessEqual(new_r, last_r_arr[0])\n self.assertLessEqual(new_r, new_max_r)\n self.assertEqual(new_max_r, max_r_arr[0])\n last_r_arr[0] = new_r\n\n if reset_peak:\n torch.cuda.reset_peak_memory_stats(device)\n self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])\n self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])\n max_m_arr[0] = last_m_arr[0]\n self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])\n self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])\n max_r_arr[0] = last_r_arr[0]\n\n assert_change(0)\n assert_change(0, reset_peak=True)\n assert_change(0, empty_cache=True)\n assert_change(0, reset_peak=True)\n assert_change(0)\n yield\n\n tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]\n m1 = torch.cuda.memory_allocated(device)\n assert_change(1)\n yield\n\n tensors2 = []\n\n for i in range(1, int(N / 2) + 1):\n # small ones\n tensors2.append(alloc(i, i * 4))\n assert_change(1)\n yield\n\n for i in range(5, int(N / 2) + 5):\n # large ones\n tensors2.append(alloc(i, i * 7, i * 9, i * 11))\n assert_change(1, reset_peak=(i % 2 == 0))\n yield\n\n tensors2.append(alloc(0, 0, 0))\n assert_change(0)\n yield\n\n permute = []\n for i in torch.randperm(len(tensors2)):\n permute.append(tensors2[i])\n assert_change(0)\n yield\n\n del tensors2\n assert_change(0)\n yield\n tensors2 = permute\n assert_change(0)\n yield\n del permute\n assert_change(0, reset_peak=True)\n yield\n\n for i in range(int(N / 2)):\n x = tensors2[i].numel()\n del tensors2[i]\n assert_change(-x) # in case that tensors2[i] is empty\n yield\n\n for i in range(2, int(2 * N / 3) + 2):\n tensors2.append(alloc(i, i * 3, i * 8))\n assert_change(1)\n yield\n\n del tensors2\n assert_change(-1, reset_peak=True)\n assert_change(0)\n self.assertEqual(torch.cuda.memory_allocated(device), m1)\n yield True\n\n del tensors1\n assert_change(-1, reset_peak=True)\n self.assertEqual(torch.cuda.memory_allocated(device), m0)\n\n # test empty_cache and reset_peak\n assert_change(0, empty_cache=True)\n assert_change(0, reset_peak=True)\n\n def test_cudart_register(self):\n t = torch.ones(20)\n self.assertFalse(t.is_pinned())\n cudart = torch.cuda.cudart()\n r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)\n self.assertEqual(r, 0)\n self.assertTrue(t.is_pinned())\n r = cudart.cudaHostUnregister(t.data_ptr())\n self.assertEqual(r, 0)\n self.assertFalse(t.is_pinned())\n\n def test_memory_stats(self):\n gc.collect()\n torch.cuda.empty_cache()\n for _ in self._test_memory_stats_generator(self):\n self._check_memory_stat_consistency()\n\n def test_memory_allocation(self):\n gc.collect()\n torch.cuda.empty_cache()\n mem = None\n size = 1\n prev = 0\n try:\n prev = torch.cuda.memory_allocated()\n mem = torch.cuda.caching_allocator_alloc(size)\n self.assertGreater(torch.cuda.memory_allocated(), prev)\n finally:\n if mem is not None:\n torch.cuda.caching_allocator_delete(mem)\n self.assertEqual(torch.cuda.memory_allocated(), prev)\n\n def test_check_error(self):\n # Assert this call doesn't raise.\n torch.cuda.check_error(0)\n\n with self.assertRaisesRegex(torch.cuda.CudaError,\n \"out of memory|hipErrorOutOfMemory\"):\n torch.cuda.check_error(2)\n\n def test_cuda_get_device_name(self):\n # Testing the behaviour with None as an argument\n current_device = torch.cuda.current_device()\n current_device_name = torch.cuda.get_device_name(current_device)\n device_name_None = torch.cuda.get_device_name(None)\n self.assertEqual(current_device_name, device_name_None)\n\n # Testing the behaviour for No argument\n device_name_no_argument = torch.cuda.get_device_name()\n self.assertEqual(current_device_name, device_name_no_argument)\n\n def test_cuda_get_device_capability(self):\n # Testing the behaviour with None as an argument\n current_device = torch.cuda.current_device()\n current_device_capability = torch.cuda.get_device_capability(current_device)\n device_capability_None = torch.cuda.get_device_capability(None)\n self.assertEqual(current_device_capability, device_capability_None)\n\n # Testing the behaviour for No argument\n device_capability_no_argument = torch.cuda.get_device_capability()\n self.assertEqual(current_device_capability, device_capability_no_argument)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_memory_stats_multigpu(self):\n # advance a generator with a end flag\n def advance(gen, end):\n if not end:\n try:\n next(gen)\n except StopIteration:\n end = True\n return end\n\n # interlace\n torch.cuda.empty_cache()\n gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)\n gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)\n end0 = end1 = False\n while not (end0 and end1):\n end0 = advance(gen0, end0)\n end1 = advance(gen1, end1)\n\n # semi-random order\n torch.cuda.empty_cache()\n gen0 = self._test_memory_stats_generator(self, device=0, N=35)\n gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)\n end0 = end1 = False\n\n while not (end0 and end1):\n end0 = advance(gen0, end0)\n if not end0:\n gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]\n else:\n gen1_max_times = inf\n t = 0\n while t < gen1_max_times and not end1:\n end1 = advance(gen1, end1)\n t += 1\n\n def test_out_of_memory(self):\n tensor = torch.zeros(1024, device='cuda')\n\n with self.assertRaisesRegex(RuntimeError, \"Tried to allocate 800000000.00 GiB\"):\n torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')\n\n with self.assertRaisesRegex(RuntimeError, \"Tried to allocate more than 1EB memory\"):\n torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')\n\n # ensure out of memory error doesn't disturb subsequent kernel\n tensor.fill_(1)\n self.assertTrue((tensor == 1).all())\n\n def test_set_per_process_memory_fraction(self):\n # test invalid fraction value.\n with self.assertRaisesRegex(TypeError, \"Invalid type\"):\n torch.cuda.set_per_process_memory_fraction(int(1))\n with self.assertRaisesRegex(ValueError, \"Invalid fraction value\"):\n torch.cuda.set_per_process_memory_fraction(-0.1)\n with self.assertRaisesRegex(ValueError, \"Invalid fraction value\"):\n torch.cuda.set_per_process_memory_fraction(2.0)\n\n tensor = torch.zeros(1024, device='cuda')\n torch.cuda.empty_cache()\n total_memory = torch.cuda.get_device_properties(0).total_memory\n torch.cuda.set_per_process_memory_fraction(0.5, 0)\n\n # test 0.499 allocation is ok.\n application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()\n tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')\n del tmp_tensor\n torch.cuda.empty_cache()\n\n application = int(total_memory * 0.5)\n # it will get OOM when try to allocate more than half memory.\n with self.assertRaisesRegex(RuntimeError, \"out of memory\"):\n torch.empty(application, dtype=torch.int8, device='cuda')\n\n # ensure out of memory error doesn't disturb subsequent kernel\n tensor.fill_(1)\n self.assertTrue((tensor == 1).all())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_autogpu(self):\n x = torch.randn(5, 5).cuda()\n y = torch.randn(5, 5).cuda()\n self.assertEqual(x.get_device(), 0)\n self.assertEqual(x.get_device(), 0)\n with torch.cuda.device(1):\n z = torch.randn(5, 5).cuda()\n self.assertEqual(z.get_device(), 1)\n q = x.add(y)\n self.assertEqual(q.get_device(), 0)\n w = torch.randn(5, 5).cuda()\n self.assertEqual(w.get_device(), 1)\n self.assertEqual(y.cuda().get_device(), 1)\n z = z.cuda()\n self.assertEqual(z.get_device(), 0)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_new(self):\n x = torch.randn(3, 3).cuda()\n self.assertEqual(x.new([0, 1, 2]).get_device(), 0)\n self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)\n\n with torch.cuda.device(1):\n self.assertEqual(x.new([0, 1, 2]).get_device(), 0)\n self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_copy_device(self):\n x = torch.randn(5, 5).cuda()\n with torch.cuda.device(1):\n y = x.cuda()\n self.assertEqual(y.get_device(), 1)\n self.assertIs(y.cuda(), y)\n z = y.cuda(0)\n self.assertEqual(z.get_device(), 0)\n self.assertIs(z.cuda(0), z)\n\n x = torch.randn(5, 5)\n with torch.cuda.device(1):\n y = x.cuda()\n self.assertEqual(y.get_device(), 1)\n self.assertIs(y.cuda(), y)\n z = y.cuda(0)\n self.assertEqual(z.get_device(), 0)\n self.assertIs(z.cuda(0), z)\n\n def _test_copy_sync_current_stream(self, x, y):\n x_plus_one = x + 1\n s0 = torch.cuda.Stream(device=x.device)\n s1 = torch.cuda.Stream(device=y.device)\n s2 = torch.cuda.Stream(device=x.device)\n s3 = torch.cuda.Stream(device=y.device)\n\n # same dst stream different src streams\n with torch.cuda.stream(s0):\n torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)\n with torch.cuda.stream(s1):\n y.copy_(x_plus_one)\n\n with torch.cuda.stream(s2), torch.cuda.stream(s1):\n y.copy_(x)\n\n s1.synchronize()\n # The copy() is synchronized on the current streams of both src and dst.\n # In the above test, the _sleep() op on s0 will not block the copy() on\n # s2, but both copies are synchronized on s1 in the dst device. Hence,\n # x is copied to y after x_plus_one is copied to y. If x and y are on\n # the same device, both copy() ops are synchronized on s1.\n self.assertEqual(y, x)\n\n # same src stream different dst streams\n with torch.cuda.stream(s1):\n torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)\n with torch.cuda.stream(s0):\n y.copy_(x_plus_one)\n\n with torch.cuda.stream(s3), torch.cuda.stream(s0):\n y.copy_(x)\n\n s0.synchronize()\n # Similarly, both copy() ops are synchronized on s0.\n self.assertEqual(y, x)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_copy_streams(self):\n d0 = torch.device('cuda:0')\n x0 = torch.zeros(5, 5, device=d0)\n\n d1 = torch.device('cuda:1')\n x1 = torch.zeros(5, 5, device=d1)\n self._test_copy_sync_current_stream(x0, x1)\n\n x2 = torch.zeros(5, 5, device=d0)\n self._test_copy_sync_current_stream(x0, x2)\n\n def test_copy_non_blocking(self):\n def _test_copy_non_blocking(a, b):\n event = torch.cuda.Event()\n a.copy_(b, non_blocking=True)\n event.record()\n event.synchronize()\n self.assertEqual(a, b)\n\n # 10MB copies\n x = torch.ones(10000000, dtype=torch.uint8).cuda()\n y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()\n _test_copy_non_blocking(x, y)\n\n x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()\n y = torch.ones(10000000, dtype=torch.uint8).cuda()\n _test_copy_non_blocking(x, y)\n\n # Test the case where the pinned data_ptr is not equal to the storage data_ptr.\n x_base = torch.zeros(10000000, dtype=torch.uint8).pin_memory()\n x = x_base[1:]\n self.assertTrue(x.is_pinned())\n self.assertTrue(x_base.is_pinned())\n self.assertNotEqual(x_base.data_ptr(), x.data_ptr())\n self.assertEqual(x_base.storage().data_ptr(), x.storage().data_ptr())\n y = torch.ones(10000000 - 1, dtype=torch.uint8).cuda()\n _test_copy_non_blocking(x, y)\n\n\n def test_to_non_blocking(self):\n stream = torch.cuda.current_stream()\n\n def _test_to_non_blocking(a, non_blocking, dst):\n torch.cuda.synchronize()\n # Pushes an 0.1 second spin to stream so if the copy is non blocking,\n # stream will almost surely be active when we query().\n torch.cuda._sleep(int(100 * get_cycles_per_ms()))\n b = a.to(device=dst, non_blocking=non_blocking)\n self.assertEqual(stream.query(), not non_blocking)\n stream.synchronize()\n self.assertEqual(a, b)\n self.assertTrue(b.is_pinned() == (non_blocking and dst == \"cpu\"))\n\n for dst, try_non_blocking in product((\"cuda\", \"cpu\"), (True, False)):\n # Creates source on the opposite device from destination.\n src = torch.randn(1000000,\n device=\"cuda\" if dst == \"cpu\" else \"cpu\",\n pin_memory=True if dst == \"cuda\" else False)\n _test_to_non_blocking(src, try_non_blocking, dst)\n\n def test_to_cpu_blocking_by_default(self):\n src = torch.randn(1000000, device=\"cuda\")\n torch.cuda.synchronize()\n torch.cuda._sleep(int(100 * get_cycles_per_ms()))\n dst = src.to(device=\"cpu\")\n self.assertEqual(torch.cuda.current_stream().query(), True)\n self.assertEqual(src, dst)\n self.assertFalse(dst.is_pinned())\n\n def test_serialization_array_with_storage(self):\n x = torch.randn(5, 5).cuda()\n y = torch.IntTensor(2, 5).fill_(0).cuda()\n q = [x, y, x, y.storage()]\n with tempfile.NamedTemporaryFile() as f:\n torch.save(q, f)\n f.seek(0)\n q_copy = torch.load(f)\n self.assertEqual(q_copy, q, atol=0, rtol=0)\n q_copy[0].fill_(5)\n self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)\n self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))\n self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))\n self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))\n self.assertTrue(isinstance(q_copy[3], torch.storage._TypedStorage))\n self.assertTrue(isinstance(q_copy[3]._storage, torch._UntypedStorage))\n q_copy[1].fill_(10)\n self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))\n\n def test_cublas_allow_tf32_get_set(self):\n skip_tf32_cublas = 'TORCH_ALLOW_TF32_CUBLAS_OVERRIDE' in os.environ and\\\n int(os.environ['TORCH_ALLOW_TF32_CUBLAS_OVERRIDE'])\n if skip_tf32_cublas:\n self.assertTrue(torch.backends.cuda.matmul.allow_tf32)\n return\n\n orig = torch.backends.cuda.matmul.allow_tf32\n self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)\n torch.backends.cuda.matmul.allow_tf32 = not orig\n self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)\n torch.backends.cuda.matmul.allow_tf32 = orig\n\n def test_float32_matmul_precision_get_set(self):\n self.assertEqual(torch.get_float32_matmul_precision(), 'highest')\n skip_tf32_cublas = 'TORCH_ALLOW_TF32_CUBLAS_OVERRIDE' in os.environ and\\\n int(os.environ['TORCH_ALLOW_TF32_CUBLAS_OVERRIDE'])\n if not skip_tf32_cublas:\n self.assertFalse(torch.backends.cuda.matmul.allow_tf32)\n for p in ('medium', 'high'):\n torch.set_float32_matmul_precision(p)\n self.assertEqual(torch.get_float32_matmul_precision(), p)\n if not skip_tf32_cublas:\n self.assertTrue(torch.backends.cuda.matmul.allow_tf32)\n torch.set_float32_matmul_precision('highest')\n self.assertEqual(torch.get_float32_matmul_precision(), 'highest')\n if not skip_tf32_cublas:\n self.assertFalse(torch.backends.cuda.matmul.allow_tf32)\n\n def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):\n orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction\n self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig)\n torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig\n self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig)\n torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig\n\n def test_cudnn_allow_tf32_get_set(self):\n with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):\n self.assertFalse(torch.backends.cudnn.allow_tf32)\n with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):\n self.assertTrue(torch.backends.cudnn.allow_tf32)\n\n def test_type_conversions(self):\n x = torch.randn(5, 5)\n self.assertIsInstance(x.float(), torch.FloatTensor)\n self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)\n self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)\n self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)\n self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)\n\n y = x.storage()\n self.assertIsInstance(y.float(), torch.FloatStorage)\n self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)\n self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)\n self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)\n self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)\n\n @unittest.skip(\"was disabled due to not enough memory, but actually it always fail\")\n def test_arithmetic_large_tensor(self):\n x = torch.empty(2**30, device='cuda')\n\n x.fill_(1)\n self.assertEqual(x.sum(), 2**30)\n\n x += 1\n self.assertEqual(x.sum(), 2**31)\n\n x.fill_(1)\n x -= 0.5\n self.assertEqual(x.sum(), 2**29)\n\n x.fill_(1)\n x *= 2\n self.assertEqual(x.sum(), 2**31)\n\n x.fill_(1)\n x /= 2\n self.assertEqual(x.sum(), 2**29)\n\n def test_gather_bool(self):\n t = torch.tensor([[False, True], [True, True]], device='cuda')\n self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),\n torch.tensor([[False, False], [True, True]], device='cuda'))\n\n def test_torch_manual_seed_seeds_cuda_devices(self):\n with freeze_rng_state():\n x = torch.zeros(4, 4).float().cuda()\n torch.manual_seed(2)\n self.assertEqual(torch.cuda.initial_seed(), 2)\n x.uniform_()\n torch.manual_seed(2)\n y = x.clone().uniform_()\n self.assertEqual(x, y)\n self.assertEqual(torch.cuda.initial_seed(), 2)\n\n def test_manual_seed(self):\n with freeze_rng_state():\n x = torch.zeros(4, 4).float().cuda()\n torch.cuda.manual_seed(2)\n self.assertEqual(torch.cuda.initial_seed(), 2)\n x.uniform_()\n a = torch.bernoulli(torch.full_like(x, 0.5))\n torch.cuda.manual_seed(2)\n y = x.clone().uniform_()\n b = torch.bernoulli(torch.full_like(x, 0.5))\n self.assertEqual(x, y)\n self.assertEqual(a, b)\n self.assertEqual(torch.cuda.initial_seed(), 2)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_cat_autogpu(self):\n x = torch.randn(4, 4).cuda(1)\n y = torch.randn(4, 4).cuda(1)\n z = torch.cat([x, y], 0)\n self.assertEqual(z.get_device(), x.get_device())\n\n @unittest.skipIf(torch.cuda.device_count() >= 10, \"Loading a cuda:9 tensor\")\n def test_load_nonexistent_device(self):\n # Setup: create a serialized file object with a 'cuda:9' restore location\n tensor = torch.randn(2, device='cuda')\n buf = io.BytesIO()\n torch.save(tensor, buf)\n # NB: this might not work in the future if serialization changes\n buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))\n\n msg = r'Attempting to deserialize object on CUDA device 9'\n with self.assertRaisesRegex(RuntimeError, msg):\n _ = torch.load(buf)\n\n def test_specify_improper_device_name(self):\n import os\n fname = \"tempfile.pt\"\n try:\n with self.assertRaisesRegex(RuntimeError, \"Invalid device string\"):\n torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,\n _use_new_zipfile_serialization=True)\n torch.load(fname, 'cuda0')\n finally:\n if os.path.exists(fname):\n os.remove(fname)\n\n def test_get_device_index(self):\n from torch.cuda._utils import _get_device_index\n with self.assertRaisesRegex(RuntimeError, \"Invalid device string\"):\n _get_device_index('cuda0', optional=True)\n\n with self.assertRaisesRegex(ValueError, \"Expected a cuda device\"):\n cpu_device = torch.device('cpu')\n _get_device_index(cpu_device, optional=True)\n\n def test_serialization_array_with_empty(self):\n x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]\n with tempfile.NamedTemporaryFile() as f:\n torch.save(x, f)\n f.seek(0)\n x_copy = torch.load(f)\n for original, copy in zip(x, x_copy):\n self.assertEqual(copy, original)\n self.assertIs(type(copy), type(original))\n self.assertEqual(copy.get_device(), original.get_device())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_multigpu_serialization_remap(self):\n x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]\n\n def gpu_remap(storage, location):\n if location == 'cuda:1':\n return storage.cuda(0)\n\n with tempfile.NamedTemporaryFile() as f:\n torch.save(x, f)\n f.seek(0)\n x_copy = torch.load(f, map_location=gpu_remap)\n\n for original, copy in zip(x, x_copy):\n self.assertEqual(copy, original)\n self.assertIs(type(copy), type(original))\n self.assertEqual(copy.get_device(), 0)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_multigpu_serialization_remap_dict(self):\n x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]\n with tempfile.NamedTemporaryFile() as f:\n torch.save(x, f)\n f.seek(0)\n x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})\n for original, copy in zip(x, x_copy):\n self.assertEqual(copy, original)\n self.assertIs(type(copy), type(original))\n self.assertEqual(copy.get_device(), 0)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_multigpu_storage_clone(self):\n x = torch.randn(4, 4, device='cuda:1').storage()\n y = x.clone()\n self.assertEqual(x.get_device(), y.get_device())\n for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:\n self.assertEqual(getattr(x, t)().get_device(), x.get_device())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_cuda_set_device(self):\n x = torch.randn(5, 5)\n with torch.cuda.device(1):\n self.assertEqual(x.cuda().get_device(), 1)\n torch.cuda.set_device(0)\n self.assertEqual(x.cuda().get_device(), 0)\n with torch.cuda.device(1):\n self.assertEqual(x.cuda().get_device(), 1)\n self.assertEqual(x.cuda().get_device(), 0)\n torch.cuda.set_device(1)\n self.assertEqual(x.cuda().get_device(), 0)\n\n def test_cuda_synchronize(self):\n torch.cuda.synchronize()\n torch.cuda.synchronize('cuda')\n torch.cuda.synchronize('cuda:0')\n torch.cuda.synchronize(0)\n torch.cuda.synchronize(torch.device('cuda:0'))\n\n if TEST_MULTIGPU:\n torch.cuda.synchronize('cuda:1')\n torch.cuda.synchronize(1)\n torch.cuda.synchronize(torch.device('cuda:1'))\n\n with self.assertRaisesRegex(ValueError, \"Expected a cuda device, but\"):\n torch.cuda.synchronize(torch.device(\"cpu\"))\n\n with self.assertRaisesRegex(ValueError, \"Expected a cuda device, but\"):\n torch.cuda.synchronize(\"cpu\")\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_current_stream(self):\n d0 = torch.device('cuda:0')\n d1 = torch.device('cuda:1')\n\n s0 = torch.cuda.current_stream()\n s1 = torch.cuda.current_stream(device=1)\n s2 = torch.cuda.current_stream(device=0)\n\n self.assertEqual(d0, s0.device)\n self.assertEqual(d1, s1.device)\n self.assertEqual(d0, s2.device)\n self.assertEqual(s0, s2)\n\n with torch.cuda.device(d1):\n s0 = torch.cuda.current_stream()\n s1 = torch.cuda.current_stream(1)\n s2 = torch.cuda.current_stream(d0)\n\n self.assertEqual(d1, s0.device)\n self.assertEqual(d1, s1.device)\n self.assertEqual(d0, s2.device)\n self.assertEqual(s0, s1)\n\n with self.assertRaisesRegex(ValueError,\n \"Expected a cuda device, but got: cpu\"):\n torch.cuda.current_stream(torch.device('cpu'))\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n @skipCUDANonDefaultStreamIf(True)\n def test_default_stream(self):\n d0 = torch.device('cuda:0')\n d1 = torch.device('cuda:1')\n\n with torch.cuda.device(d0):\n s0 = torch.cuda.default_stream()\n\n with torch.cuda.device(d1):\n s1 = torch.cuda.default_stream()\n\n s2 = torch.cuda.default_stream(device=0)\n s3 = torch.cuda.default_stream(d1)\n\n self.assertEqual(d0, s0.device)\n self.assertEqual(d1, s1.device)\n self.assertEqual(d0, s2.device)\n self.assertEqual(d1, s3.device)\n self.assertEqual(s0, s2)\n self.assertEqual(s1, s3)\n\n with torch.cuda.device(d0):\n self.assertEqual(torch.cuda.current_stream(), s0)\n\n with torch.cuda.device(d1):\n self.assertEqual(torch.cuda.current_stream(), s1)\n\n with self.assertRaisesRegex(ValueError,\n \"Expected a cuda device, but got: cpu\"):\n torch.cuda.default_stream(torch.device('cpu'))\n\n @skipCUDANonDefaultStreamIf(True)\n def test_streams(self):\n default_stream = torch.cuda.current_stream()\n user_stream = torch.cuda.Stream()\n self.assertEqual(torch.cuda.current_stream(), default_stream)\n self.assertNotEqual(default_stream, user_stream)\n self.assertEqual(default_stream.cuda_stream, 0)\n self.assertNotEqual(user_stream.cuda_stream, 0)\n with torch.cuda.stream(user_stream):\n self.assertEqual(torch.cuda.current_stream(), user_stream)\n self.assertTrue(user_stream.query())\n tensor1 = torch.ByteTensor(5).pin_memory()\n tensor2 = tensor1.cuda(non_blocking=True) + 1\n default_stream.synchronize()\n self.assertTrue(default_stream.query())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_stream_event_device(self):\n d0 = torch.device('cuda:0')\n d1 = torch.device('cuda:1')\n e0 = torch.cuda.Event()\n\n self.assertEqual(None, e0.device)\n\n with torch.cuda.device(d0):\n s0 = torch.cuda.current_stream()\n s0.record_event(e0)\n\n with torch.cuda.device(d1):\n s1 = torch.cuda.Stream()\n e1 = s1.record_event()\n\n self.assertEqual(s0.device, torch.device('cuda:0'))\n self.assertEqual(e0.device, torch.device('cuda:0'))\n self.assertEqual(s1.device, torch.device('cuda:1'))\n self.assertEqual(e1.device, torch.device('cuda:1'))\n\n def test_stream_event_repr(self):\n s = torch.cuda.current_stream()\n self.assertTrue(\"torch.cuda.Stream\" in s.__repr__())\n e = torch.cuda.Event()\n self.assertTrue(\"torch.cuda.Event\" in e.__repr__())\n s.record_event(e)\n self.assertTrue(\"torch.cuda.Event\" in e.__repr__())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_stream_context(self):\n s0 = torch.cuda.current_stream()\n s1 = torch.cuda.Stream(device=1)\n s2 = torch.cuda.Stream(device=0)\n\n with torch.cuda.device(s1.device):\n prev_stream_on_cuda1 = torch.cuda.current_stream()\n\n self.assertEqual(torch.cuda.current_stream(), s0)\n self.assertEqual(0, torch.cuda.current_device())\n with torch.cuda.stream(s1):\n self.assertEqual(torch.cuda.current_stream(), s1)\n self.assertEqual(1, torch.cuda.current_device())\n with torch.cuda.stream(s2):\n self.assertEqual(torch.cuda.current_stream(), s2)\n self.assertEqual(0, torch.cuda.current_device())\n with torch.cuda.stream(s0):\n self.assertEqual(torch.cuda.current_stream(), s0)\n self.assertEqual(0, torch.cuda.current_device())\n self.assertEqual(torch.cuda.current_stream(), s2)\n self.assertEqual(0, torch.cuda.current_device())\n self.assertEqual(torch.cuda.current_stream(), s1)\n self.assertEqual(1, torch.cuda.current_device())\n\n with torch.cuda.device(s1.device):\n self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())\n\n self.assertEqual(torch.cuda.current_stream(), s0)\n self.assertEqual(0, torch.cuda.current_device())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_streams_multi_gpu(self):\n default_stream = torch.cuda.current_stream()\n self.assertEqual(default_stream.device, torch.device('cuda:0'))\n stream = torch.cuda.Stream(device=1)\n self.assertEqual(stream.device, torch.device('cuda:1'))\n with torch.cuda.device(1):\n self.assertEqual(\n torch.cuda.current_stream().device, torch.device('cuda:1'))\n self.assertNotEqual(torch.cuda.current_stream(), default_stream)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_streams_multi_gpu_query(self):\n d0 = torch.device('cuda:0')\n d1 = torch.device('cuda:1')\n torch.cuda.synchronize(d0)\n torch.cuda.synchronize(d1)\n\n with torch.cuda.device(d0):\n s0 = torch.cuda.current_stream()\n\n with torch.cuda.device(d1):\n s1 = torch.cuda.current_stream()\n torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)\n\n self.assertTrue(s0.query())\n self.assertFalse(s1.query())\n\n with torch.cuda.device(d0):\n self.assertTrue(s0.query())\n self.assertFalse(s1.query())\n\n with torch.cuda.device(d1):\n self.assertTrue(s0.query())\n self.assertFalse(s1.query())\n\n # deliberately using a different device\n with torch.cuda.device(d0):\n s1.synchronize()\n\n self.assertTrue(s0.query())\n self.assertTrue(s1.query())\n\n with torch.cuda.device(d0):\n self.assertTrue(s0.query())\n self.assertTrue(s1.query())\n\n with torch.cuda.device(d1):\n self.assertTrue(s0.query())\n self.assertTrue(s1.query())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_streams_multi_gpu_eq(self):\n d0 = torch.device('cuda:0')\n d1 = torch.device('cuda:1')\n\n with torch.cuda.device(d0):\n s0 = torch.cuda.current_stream()\n s1 = torch.cuda.current_stream()\n\n with torch.cuda.device(d1):\n s2 = torch.cuda.current_stream()\n s3 = torch.cuda.current_stream()\n\n self.assertTrue(s0 == s0)\n self.assertTrue(s0 == s1)\n self.assertTrue(s2 == s2)\n self.assertTrue(s2 == s3)\n self.assertFalse(s0 == s2)\n self.assertFalse(s1 == s3)\n\n self.assertEqual(s0.device, s1.device)\n self.assertEqual(s0.cuda_stream, s1.cuda_stream)\n self.assertEqual(s2.device, s3.device)\n self.assertEqual(s2.cuda_stream, s3.cuda_stream)\n self.assertNotEqual(s0.device, s3.device)\n\n self.assertEqual(hash(s0), hash(s1))\n self.assertEqual(hash(s2), hash(s3))\n self.assertNotEqual(hash(s0), hash(s3))\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n def test_streams_priority(self):\n low, high = torch.cuda.Stream.priority_range()\n s0 = torch.cuda.Stream(device=0, priority=low)\n\n self.assertEqual(low, s0.priority)\n self.assertEqual(torch.device('cuda:0'), s0.device)\n\n s1 = torch.cuda.Stream(device=1, priority=high)\n\n self.assertEqual(high, s1.priority)\n self.assertEqual(torch.device('cuda:1'), s1.device)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n def test_tensor_device(self):\n self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)\n self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)\n with torch.cuda.device(1):\n self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)\n self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)\n self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)\n\n def test_events(self):\n stream = torch.cuda.current_stream()\n event = torch.cuda.Event(enable_timing=True)\n self.assertTrue(event.query())\n start_event = torch.cuda.Event(enable_timing=True)\n stream.record_event(start_event)\n torch.cuda._sleep(int(50 * get_cycles_per_ms()))\n stream.record_event(event)\n self.assertFalse(event.query())\n event.synchronize()\n self.assertTrue(event.query())\n self.assertGreater(start_event.elapsed_time(event), 0)\n\n @staticmethod\n def _stream_synchronize(self, spin_time_cycles):\n s = torch.cuda.current_stream()\n e_tik = torch.cuda.Event(enable_timing=True)\n e_tok = torch.cuda.Event(enable_timing=True)\n\n e_tik.record(s)\n torch.cuda._sleep(spin_time_cycles)\n e_tok.record(s)\n s.synchronize()\n\n self.assertTrue(s.query())\n\n # not necessary to check e_tik and e_tok, as elapsed_time would throw\n # exception if otherwise.\n return e_tik.elapsed_time(e_tok)\n\n @staticmethod\n def _event_synchronize(self, spin_time_cycles):\n s = torch.cuda.current_stream()\n e_tik = torch.cuda.Event(enable_timing=True)\n e_tok = torch.cuda.Event(enable_timing=True)\n\n e_tik.record(s)\n torch.cuda._sleep(spin_time_cycles)\n s.record_event(e_tok)\n e_tok.synchronize()\n\n self.assertTrue(s.query())\n\n # not necessary to check e_tik and e_tok, as elapsed_time would throw\n # exception if otherwise.\n return e_tik.elapsed_time(e_tok)\n\n @staticmethod\n def _event_wait(self, spin_time_cycles):\n s0 = torch.cuda.current_stream()\n s1 = torch.cuda.Stream()\n e_tik = torch.cuda.Event(blocking=True, enable_timing=True)\n e_tok = torch.cuda.Event(blocking=True, enable_timing=True)\n\n e_tik.record(s0)\n torch.cuda._sleep(spin_time_cycles - 10)\n e_sync = torch.cuda.Event(blocking=True)\n e_sync.record()\n e_sync.wait(s1)\n with torch.cuda.stream(s1):\n torch.cuda._sleep(10)\n s1.synchronize()\n e_tok.record()\n e_tok.synchronize()\n\n self.assertTrue(s0.query())\n self.assertTrue(s1.query())\n self.assertTrue(e_sync.query())\n\n # not necessary to check e_tik and e_tok, as elapsed_time would throw\n # exception if otherwise.\n return e_tik.elapsed_time(e_tok)\n\n @staticmethod\n def _test_stream_event_nogil(self, sync_func, p2c, c2p):\n with torch.cuda.device('cuda:1'):\n c2p.put(0)\n p2c.get()\n c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))\n\n # Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190\n @skipIfRocm\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_stream_event_nogil(self):\n for sync_func in [TestCuda._stream_synchronize,\n TestCuda._event_synchronize,\n TestCuda._event_wait]:\n p2c = queue.Queue()\n c2p = queue.Queue()\n e_tik = torch.cuda.Event(enable_timing=True)\n e_tok = torch.cuda.Event(enable_timing=True)\n\n t = threading.Thread(\n target=TestCuda._test_stream_event_nogil,\n args=(self, sync_func, p2c, c2p))\n t.daemon = True\n t.start()\n\n c2p.get()\n with torch.cuda.device('cuda:0'):\n e_tik.record()\n p2c.put(0)\n parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)\n child_time = c2p.get()\n e_tok.record()\n e_tok.synchronize()\n total_time = e_tik.elapsed_time(e_tok)\n\n # Without GIL, synchronizations in parent and child threads can\n # overlap. The total execution time should be a little bit longer\n # than spinning fifty million cycles and much shorter than twice of\n # that. However, testing absolute execution time is not reliable as\n # it may vary on different hardware in different environments.\n # Therefore, this test uses relative comparisons, checking if the\n # sum of parent and child threads execution time is greater than the\n # real execution time by least 40%.\n self.assertGreater(parent_time + child_time, total_time * 1.4)\n\n # This test is flaky for ROCm, see issue #62602\n @skipIfRocm\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_events_wait(self):\n d0 = torch.device('cuda:0')\n d1 = torch.device('cuda:1')\n torch.cuda.synchronize(d0)\n torch.cuda.synchronize(d1)\n\n with torch.cuda.device(d0):\n s0 = torch.cuda.current_stream()\n torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)\n e0 = torch.cuda.Event()\n s0.record_event(e0)\n\n with torch.cuda.device(d1):\n s1 = torch.cuda.current_stream()\n\n self.assertFalse(s0.query())\n self.assertTrue(s1.query())\n\n s1.wait_event(e0)\n s1.synchronize()\n\n self.assertTrue(e0.query())\n self.assertTrue(s0.query())\n self.assertTrue(s1.query())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_events_multi_gpu_query(self):\n d0 = torch.device('cuda:0')\n d1 = torch.device('cuda:1')\n\n with torch.cuda.device(d0):\n s0 = torch.cuda.current_stream()\n e0 = s0.record_event()\n s0.synchronize()\n\n with torch.cuda.device(d1):\n s1 = torch.cuda.current_stream()\n torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)\n e1 = s1.record_event()\n\n self.assertTrue(e0.query())\n self.assertFalse(e1.query())\n\n with torch.cuda.device(d0):\n self.assertTrue(e0.query())\n self.assertFalse(e1.query())\n\n with torch.cuda.device(d1):\n self.assertTrue(e0.query())\n self.assertFalse(e1.query())\n\n # deliberately using a different device\n with torch.cuda.device(d0):\n e1.synchronize()\n\n self.assertTrue(e0.query())\n self.assertTrue(e1.query())\n\n with torch.cuda.device(d0):\n self.assertTrue(e0.query())\n self.assertTrue(e1.query())\n\n with torch.cuda.device(d1):\n self.assertTrue(e0.query())\n self.assertTrue(e1.query())\n\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n @skipIfRocm\n def test_events_multi_gpu_elapsed_time(self):\n d0 = torch.device('cuda:0')\n d1 = torch.device('cuda:1')\n\n with torch.cuda.device(d0):\n s0 = torch.cuda.current_stream()\n e0 = torch.cuda.Event(enable_timing=True)\n torch.cuda._sleep(10)\n s0.record_event(e0)\n\n with torch.cuda.device(d1):\n s1 = torch.cuda.current_stream()\n e1 = torch.cuda.Event(enable_timing=True)\n torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)\n s1.record_event(e1)\n\n e0.synchronize()\n e1.synchronize()\n with torch.cuda.device(d0):\n with self.assertRaises(RuntimeError):\n self.assertGreater(e0.elapsed_time(e1), 0)\n\n with torch.cuda.device(d1):\n with self.assertRaises(RuntimeError):\n self.assertGreater(e0.elapsed_time(e1), 0)\n\n with torch.cuda.device(d0):\n s0 = torch.cuda.current_stream()\n e2 = torch.cuda.Event(enable_timing=True)\n torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)\n s0.record_event(e2)\n s0.synchronize()\n\n self.assertGreater(e0.elapsed_time(e2), 0)\n\n # deliberately calling from a different device\n with torch.cuda.device(d1):\n self.assertGreater(e0.elapsed_time(e2), 0)\n\n def test_record_stream(self):\n cycles_per_ms = get_cycles_per_ms()\n\n t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()\n result = torch.cuda.FloatTensor(t.size())\n stream = torch.cuda.Stream()\n ptr = [None]\n\n # Performs the CPU->GPU copy in a background stream\n def perform_copy():\n with torch.cuda.stream(stream):\n tmp = t.cuda(non_blocking=True)\n ptr[0] = tmp.data_ptr()\n torch.cuda.current_stream().wait_stream(stream)\n tmp.record_stream(torch.cuda.current_stream())\n torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy\n result.copy_(tmp)\n\n perform_copy()\n with torch.cuda.stream(stream):\n tmp2 = torch.cuda.FloatTensor(t.size())\n tmp2.zero_()\n self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')\n\n self.assertEqual(result.tolist(), [1, 2, 3, 4])\n\n # Check that the block will be re-used after the main stream finishes\n torch.cuda.current_stream().synchronize()\n with torch.cuda.stream(stream):\n tmp3 = torch.cuda.FloatTensor(t.size())\n self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')\n\n def test_record_stream_on_shifted_view(self):\n # See issue #27366\n\n # This test detects unexpected block reallocation. For reliable test,\n # the stream to allocate tensors is isolated. The allocator will not\n # reuse free blocks which were allocated from another stream.\n stream_alloc = torch.cuda.Stream()\n with torch.cuda.stream(stream_alloc):\n base = torch.cuda.FloatTensor([10, 10])\n\n # Record another stream on a shifted view tensor.\n view = base[5:]\n assert view.storage_offset() > 0\n\n stream_record = torch.cuda.Stream()\n with torch.cuda.stream(stream_record):\n torch.cuda._sleep(int(50 * get_cycles_per_ms()))\n\n view.record_stream(stream_record)\n\n # Delete those tensors to make the block free soon.\n data_ptr = base.data_ptr()\n del base, view\n\n # A new tensor should not be allocated to the block above.\n stream_alloc.synchronize()\n\n with torch.cuda.stream(stream_alloc):\n try_realloc = torch.cuda.FloatTensor([10, 10])\n\n self.assertNotEqual(try_realloc.data_ptr(), data_ptr)\n\n @contextlib.contextmanager\n def _get_external_stream(self, device):\n cudart = torch.cuda.cudart()\n stream = ctypes.c_ulonglong(0)\n stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)\n stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value\n with device:\n try:\n out = cudart.cudaStreamCreate(stream_p_int)\n self.assertEqual(out, 0)\n self.assertNotEqual(stream.value, 0)\n yield stream.value\n finally:\n out = cudart.cudaStreamDestroy(stream.value)\n self.assertEqual(out, 0)\n\n @skipIfRocm\n def test_external_streams(self):\n device = torch.cuda.device(0)\n with self._get_external_stream(device) as stream_v:\n ext_stream = torch.cuda.ExternalStream(stream_v)\n self.assertEqual(stream_v, ext_stream.cuda_stream)\n self.assertEqual(ext_stream.device.index, device.idx)\n\n @skipIfRocm\n @unittest.skipIf(not TEST_MULTIGPU, \"detected only one GPU\")\n def test_external_streams_multi_device(self):\n device = torch.cuda.device(1)\n with self._get_external_stream(device) as stream_v:\n ext_stream = torch.cuda.ExternalStream(\n stream_v, device=device)\n self.assertEqual(stream_v, ext_stream.cuda_stream)\n self.assertEqual(ext_stream.device.index, device.idx)\n\n def test_noncontiguous_pinned_memory(self):\n # See issue #3266\n x = torch.arange(0, 10).view((2, 5))\n self.assertEqual(x.t(), x.t().pin_memory())\n\n def test_caching_pinned_memory(self):\n cycles_per_ms = get_cycles_per_ms()\n\n # check that allocations are re-used after deletion\n t = torch.FloatTensor([1]).pin_memory()\n ptr = t.data_ptr()\n del t\n t = torch.FloatTensor([1]).pin_memory()\n self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')\n\n # check that the allocation is not re-used if it's in-use by a copy\n gpu_tensor = torch.cuda.FloatTensor([0])\n torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy\n gpu_tensor.copy_(t, non_blocking=True)\n del t\n t = torch.FloatTensor([1]).pin_memory()\n self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')\n self.assertEqual(list(gpu_tensor), [1])\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_caching_pinned_memory_multi_gpu(self):\n # checks that the events preventing pinned memory from being re-used\n # too early are recorded on the correct GPU\n cycles_per_ms = get_cycles_per_ms()\n\n t = torch.FloatTensor([1]).pin_memory()\n ptr = t.data_ptr()\n gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)\n gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)\n\n with torch.cuda.device(1):\n torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy\n gpu_tensor1.copy_(t, non_blocking=True)\n\n del t\n t = torch.FloatTensor([2]).pin_memory()\n self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')\n\n with torch.cuda.device(0):\n gpu_tensor0.copy_(t, non_blocking=True)\n\n self.assertEqual(gpu_tensor1[0], 1)\n self.assertEqual(gpu_tensor0[0], 2)\n\n def test_caching_allocator_record_stream_oom(self):\n \"\"\"allocations delayed by a record_stream call should still be freed on\n an out-of-memory in cuda_malloc_retry. see issue #19219\"\"\"\n stream = torch.cuda.Stream()\n\n with torch.cuda.stream(stream):\n y = torch.zeros(40 * 1024 * 1024, device='cuda')\n\n for _ in range(100):\n x = torch.empty(40 * 1024 * 1024, device='cuda')\n with torch.cuda.stream(stream):\n y += x\n # delays re-use of `x` until after all operations in `stream`\n x.record_stream(stream)\n del x\n\n # we've made a mess by allocating up to the device capacity. free any\n # cached blocks in case it affects future tests.\n torch.cuda.empty_cache()\n\n # Tests for historic illegal memory access, see #17040.\n def test_reduction_gpu_memory_accessing(self):\n x = torch.ones(512, 8, dtype=torch.float32, device='cuda')\n torch.sum(x, 0)\n\n def test_sum_fp16(self):\n x = torch.zeros(10, device='cuda', dtype=torch.float16)\n self.assertEqual(x.sum(), 0)\n\n x = torch.ones(65504, device='cuda', dtype=torch.float16)\n self.assertEqual(x.sum(), 65504)\n self.assertEqual(x.sum(dtype=torch.float32), 65504)\n\n x = torch.ones(65536, device='cuda', dtype=torch.float16)\n self.assertEqual(x.sum(dtype=torch.float32), 65536)\n\n a = torch.zeros(1203611).bernoulli_(0.0005)\n x = a.to(device='cuda', dtype=torch.float16)\n self.assertEqual(x.sum().item(), a.sum().item())\n\n a = torch.zeros(100, 121, 80).bernoulli_(0.0005)\n x = a.to(device='cuda', dtype=torch.float16)\n self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))\n\n def test_mean_fp16(self):\n x = torch.ones(65536, device='cuda', dtype=torch.float16)\n self.assertEqual(x.mean(), 1)\n\n x = torch.ones(65536, device='cuda', dtype=torch.float16)\n self.assertEqual(x.mean(dtype=torch.float32), 1)\n\n def test_prod_large(self):\n # tests global reduction (should_global_reduce = true) in case of non-zero identity element\n x = torch.ones(240000, device='cuda', dtype=torch.float32)\n self.assertEqual(x.prod(), 1)\n\n # test for complex types. Note 240k is divisible by 4\n for dtype in [torch.cfloat, torch.cdouble]:\n x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)\n self.assertEqual(x.prod(), 1)\n\n def test_multinomial_ext(self):\n # Test two corner cases from older PyTorch (Issue #4858)\n freqs = torch.cuda.FloatTensor([\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.03178183361887932, 0.027680952101945877, 0.033176131546497345,\n 0.046052902936935425, 0.07742464542388916, 0.11543981730937958,\n 0.14148041605949402, 0.15784293413162231, 0.13180233538150787,\n 0.08271478116512299, 0.049702685326337814, 0.027557924389839172,\n 0.018125897273421288, 0.011851548217236996, 0.010252203792333603,\n 0.007422595750540495, 0.005372154992073774, 0.0045109698548913,\n 0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,\n 0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,\n 0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,\n 0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,\n 0.00020504408166743815, 0.00012302644609007984, 0.0,\n 0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0])\n\n torch.cuda.manual_seed(11042)\n sample = torch.multinomial(freqs, 1000, True)\n self.assertNotEqual(freqs[sample].min(), 0)\n\n p = torch.zeros(3421, 2, device=\"cuda\", dtype=torch.float)\n p[:, 1] = 1\n torch.cuda.manual_seed(5214)\n r = torch.multinomial(p, 1)\n self.assertNotEqual(r.min().item(), 0)\n\n # test corner case from Issue #13867\n torch.cuda.manual_seed(33)\n probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5\n samples = probs.multinomial(1000000, replacement=True)\n self.assertGreater(probs[samples].min().item(), 0)\n\n def _spawn_test_multinomial_invalid_probs_cuda(self, probs):\n import subprocess\n try:\n p = subprocess.Popen([sys.executable, '-c', f\"\"\"\\\nimport sys\nimport torch\nfrom torch._six import inf, nan\ntry:\n with torch.random.fork_rng(devices=[0]):\n torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)\n torch.cuda.synchronize()\n sys.exit(-1) # Should not be reached\nexcept RuntimeError as e:\n sys.exit(-2)\n\"\"\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n out, err = p.communicate(timeout=10)\n p.wait(timeout=10)\n except subprocess.TimeoutExpired as e:\n p.kill()\n out, err = p.communicate()\n expected_messages = [\n 'device-side assert triggered', # CUDA\n 'Assertion', # CUDA\n 'HSA_STATUS_ERROR_EXCEPTION', # ROCm\n 'Device-side assertion' # ROCm\n ]\n self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))\n\n @slowTest\n @unittest.skipIf(TEST_WITH_ROCM, \"ROCm doesn't support device side asserts\")\n @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, \"Disabled for environments that \\\n don't support multiprocessing with spawn start method\")\n def test_multinomial_invalid_probs_cuda(self):\n self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])\n self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])\n self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])\n self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])\n\n @slowTest\n @unittest.skipIf(not TEST_LARGE_TENSOR, \"not enough memory\")\n def test_huge_index(self):\n src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)\n idx = torch.randperm(src.shape[0], device='cuda')\n res = src[idx]\n res_cpu = src.cpu()[idx.cpu()]\n self.assertEqual(res.cpu(), res_cpu)\n\n def test_min_max_inits(self):\n # Testing if THC_reduceAll received the correct index initialization.\n # This affects the result of THC_reduceAll operations at extreme values\n x = torch.cuda.ByteTensor([0])\n y = torch.cuda.ByteTensor([255])\n expected = torch.cuda.LongTensor([0])[0]\n\n _, v = x.max(dim=0)\n self.assertEqual(v, expected)\n\n _, v = y.min(dim=0)\n self.assertEqual(v, expected)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_get_set_rng_state_all(self):\n states = torch.cuda.get_rng_state_all()\n before0 = torch.cuda.FloatTensor(100, device=0).normal_()\n before1 = torch.cuda.FloatTensor(100, device=1).normal_()\n torch.cuda.set_rng_state_all(states)\n after0 = torch.cuda.FloatTensor(100, device=0).normal_()\n after1 = torch.cuda.FloatTensor(100, device=1).normal_()\n self.assertEqual(before0, after0, atol=0, rtol=0)\n self.assertEqual(before1, after1, atol=0, rtol=0)\n\n def test_nvtx(self):\n # Just making sure we can see the symbols\n torch.cuda.nvtx.range_push(\"foo\")\n torch.cuda.nvtx.mark(\"bar\")\n torch.cuda.nvtx.range_pop()\n range_handle = torch.cuda.nvtx.range_start(\"range_start\")\n torch.cuda.nvtx.range_end(range_handle)\n\n def test_bincount_ext(self):\n # ensure CUDA code coverage\n input_size = (5000,)\n w = torch.randn(input_size, dtype=torch.double, device='cuda')\n w_cpu = w.cpu()\n # test shared memory impl\n t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')\n self.assertEqual(t.cpu().bincount(), t.bincount())\n self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))\n # test multi block memory impl\n # see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu\n t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')\n self.assertEqual(t.cpu().bincount(), t.bincount())\n self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))\n # test global memory impl\n # see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu\n t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')\n self.assertEqual(t.cpu().bincount(), t.bincount())\n self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))\n\n t = torch.zeros([10], dtype=torch.int32, device='cuda')\n # 35488 * 65536 as int32 would cause overflow to negative value\n # giving negative bin offset\n t[0] = 35488\n counted = t.bincount(minlength=65536)\n self.assertEqual(torch.sum(counted), 10)\n\n def test_tiny_half_norm_(self):\n a = torch.arange(25).cuda().float()\n a /= 100000000\n b = a.half()\n self.assertGreater(b.norm().item(), 0)\n\n def test_norm_type_conversion(self):\n a = torch.ones(65536).cuda().half()\n self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)\n\n # Verifies that mem_get_info works, including when called for a different device\n def test_mem_get_info(self):\n def _test(idx):\n before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(idx)\n # increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms\n t = torch.randn(1024 * 1024 * 8, device='cuda:' + str(idx))\n after_free_bytes, after_available_bytes = torch.cuda.mem_get_info(idx)\n\n self.assertTrue(after_free_bytes < before_free_bytes)\n self.assertEqual(before_available_bytes, after_available_bytes)\n\n _test(0)\n if TEST_MULTIGPU:\n _test(1)\n\n # Test that wrap_with_cuda_memory_check successfully detects leak\n # skip for ROCM. Look into #62533.\n @skipIfRocm\n def test_cuda_memory_leak_detection(self):\n l = []\n\n @self.wrap_with_cuda_memory_check\n def no_leak():\n pass\n\n @self.wrap_with_cuda_memory_check\n def leak_gpu0():\n # increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms\n l.append(torch.randn(1024 * 1024 * 8, device=torch.device(\"cuda:0\")))\n\n no_leak()\n\n with self.assertRaisesRegex(RuntimeError, r\"CUDA driver API confirmed .+ on device 0.+\"):\n leak_gpu0()\n\n if TEST_MULTIGPU:\n @self.wrap_with_cuda_memory_check\n def leak_gpu1():\n # increasing to 8MB to force acquiring a new block and overcome blocksize differences across platforms\n l.append(torch.randn(1024 * 1024 * 8, device=torch.device(\"cuda:1\")))\n\n with self.assertRaisesRegex(RuntimeError, r\"CUDA driver API confirmed .+ on device 1.+\"):\n leak_gpu1()\n\n def test_cuda_memory_leak_detection_propagates_errors(self):\n with self.assertRaisesRegex(RuntimeError, r\"The size of tensor a \\(3\\) must match\"):\n with self.assertLeaksNoCudaTensors():\n x = torch.randn(3, 1, device='cuda')\n y = torch.randn(2, 1, device='cuda')\n z = x + y\n\n def test_trilu_indices(self):\n for test_args in tri_tests_args:\n _compare_trilu_indices(self, *test_args, device='cuda')\n\n # test default options\n x = torch.ones(\n 3, 3, dtype=torch.long, device='cuda', layout=torch.strided)\n self.assertEqual(\n x.tril(0).nonzero().transpose(0, 1),\n torch.tril_indices(3, 3, device='cuda'))\n self.assertEqual(\n x.triu(0).nonzero().transpose(0, 1),\n torch.triu_indices(3, 3, device='cuda'))\n\n def test_large_trilu_indices(self):\n for test_args in tri_large_tests_args:\n _compare_large_trilu_indices(self, *test_args, device='cuda')\n\n @unittest.skipIf(not TEST_MEDIUM_TENSOR, \"not enough memory\")\n def test_cuda_kernel_loop_overflow(self):\n # Issue #24309: In extreme cases, the loop variable could overflow and continue\n # the kernel loop with a negative index, causing a RuntimeError (invalid write):\n x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device=\"cuda\")\n expected = x[0, 0, 0, 2**30]\n y = torch.nn.functional.avg_pool2d(x, kernel_size=1)\n torch.cuda.synchronize()\n self.assertEqual(y[0, 0, 0, 2**30], expected)\n\n @unittest.skipIf(not TEST_LARGE_TENSOR, \"not enough memory\")\n def test_cuda_kernel_loop_overflow_large(self):\n # Make sure input.numel() > INT_MAX is handled:\n x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device=\"cuda\")\n with self.assertRaisesRegex(RuntimeError, \"integer out of range\"):\n y = torch.nn.functional.avg_pool2d(x, kernel_size=1)\n\n # Issue #24309: In extreme cases, the loop variable could overflow and continue\n # the kernel loop with a negative index, causing a RuntimeError (invalid write):\n x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device=\"cuda\")\n expected = x[0, 0, 0, 2**31 - 2]\n y = torch.nn.functional.avg_pool2d(x, kernel_size=1)\n torch.cuda.synchronize()\n self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)\n\n # this might create a reference cycle on self...\n def _make_multiply_in_stream(self):\n class MultiplyInStream(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, val):\n ctx.val = val\n ctx.stream = torch.cuda.current_stream()\n return x * val\n\n @staticmethod\n def backward(ctx, grad):\n self.assertEqual(torch.cuda.current_stream(), ctx.stream)\n # delays the operation in the the background stream\n torch.cuda._sleep(1000 * 5000)\n return grad * ctx.val, None\n\n return MultiplyInStream\n\n @skipCUDANonDefaultStreamIf(True)\n def test_streaming_backwards_sync(self):\n default_stream = torch.cuda.current_stream()\n stream = torch.cuda.Stream()\n\n MultiplyInStream = self._make_multiply_in_stream()\n\n # Tests using grads outside the backward() stream context\n # See \"Stream semantics of backward passes\" on https://pytorch.org/docs/stable/notes/cuda.html\n x = torch.randn(5, 5, device='cuda', requires_grad=True)\n with torch.cuda.stream(stream):\n stream.wait_stream(default_stream)\n output = MultiplyInStream.apply(x, 2)\n output.sum().backward()\n # sync needed\n default_stream.wait_stream(stream)\n self.assertEqual(x.grad, torch.ones_like(x) * 2)\n self.assertEqual(torch.cuda.current_stream(), default_stream)\n\n # Tests that using grads in the same stream context as backward()\n # is safe regardless what streams bwd ops ran on\n bwd_ambient_stream = torch.cuda.Stream()\n x = torch.randn(5, 5, device='cuda', requires_grad=True)\n with torch.cuda.stream(stream):\n stream.wait_stream(default_stream)\n output = MultiplyInStream.apply(x, 3)\n with torch.cuda.stream(bwd_ambient_stream):\n bwd_ambient_stream.wait_stream(stream)\n output.sum().backward()\n # x was first used on \"stream\" so its AccumulateGrad leaf should run on \"stream\".\n # The end of backward() should have synced \"bwd_ambient_stream\" with \"stream\"\n # so it should be safe to use x.grad here without any syncs.\n self.assertEqual(x.grad, torch.ones_like(x) * 3)\n self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)\n\n # Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190\n @skipIfRocm\n def test_streaming_backwards_multiple_streams(self):\n MultiplyInStream = self._make_multiply_in_stream()\n\n class StreamModel(torch.nn.Module):\n def __init__(self):\n super(StreamModel, self).__init__()\n self.event = torch.cuda.Event()\n self.stream0 = torch.cuda.Stream()\n self.stream1 = torch.cuda.Stream()\n\n def forward(self, x, x_first_use_on_ambient):\n if x_first_use_on_ambient:\n x0 = x.clone()\n self.stream0.wait_stream(torch.cuda.current_stream())\n self.stream1.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(self.stream0):\n if not x_first_use_on_ambient:\n x0 = x.clone()\n y0 = MultiplyInStream.apply(x0, 2)\n self.event.record(stream=torch.cuda.current_stream())\n\n with torch.cuda.stream(self.stream1):\n y1 = MultiplyInStream.apply(x, 3)\n self.stream1.wait_event(self.event)\n return y0 + y1\n\n stream = torch.cuda.Stream()\n\n for x_first_use_on_ambient in (True, False):\n # the out_of_place=False, iters=1 case stresses if proper syncs are inserted\n # when grads are initially None and stolen by backward ops.\n for out_of_place, iters in ((True, 1),\n (False, 1),\n (False, 5)):\n with torch.cuda.stream(stream):\n x = torch.randn(5, 5, device='cuda', requires_grad=True)\n model = StreamModel().cuda()\n x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),\n stream if x_first_use_on_ambient else model.stream0))\n for p in model.parameters():\n self.assertTrue(p.grad is None)\n for i in range(iters):\n loss = model(x, x_first_use_on_ambient).sum()\n if out_of_place:\n x_grad = torch.autograd.grad((loss,), (x,))[0]\n else:\n loss.backward()\n # See \"Stream semantics of backward passes\" on https://pytorch.org/docs/stable/notes/cuda.html\n torch.cuda.current_stream().wait_stream(stream)\n\n if out_of_place:\n self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)\n else:\n self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_streaming_backwards_device_transfer(self):\n # This function must run with non-default current streams on all devices, otherwise it's meaningless.\n # The intention is to test that to()'s backward (CopyBackward) interacts properly with the\n # synchronization logic in torch/csrc/autograd/input_buffer.cpp.\n dev0 = torch.device(\"cuda:0\")\n dev1 = torch.device(\"cuda:1\")\n\n # Unfortunately I need to make the tensors largeish.\n # Bigger tensors = longer D2D transfers = more likely to expose races.\n size = 2**26\n\n a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)\n b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)\n\n # Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.\n # This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.\n to_backward_recipient = a * b\n s = to_backward_recipient.to(device=\"cuda:0\").sum()\n torch.cuda.synchronize(device=dev0)\n torch.cuda.synchronize(device=dev1)\n s.backward()\n self.assertTrue(a.grad.sum().item() == size)\n self.assertTrue(b.grad.sum().item() == size)\n\n # Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.\n # This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.\n a.grad = None\n b.grad = None\n to_backward_recipient = a * b\n # Multiply by 2 here so to's backward creates gradient values that are different from the case above,\n # to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated\n # with 1s by the case above\n s0 = to_backward_recipient.to(device=\"cuda:0\").sum() * 2.\n s1 = to_backward_recipient.to(device=\"cuda:0\").sum() * 2.\n torch.cuda.synchronize(device=dev0)\n torch.cuda.synchronize(device=dev1)\n s0.backward(retain_graph=True)\n s1.backward()\n self.assertTrue(a.grad.sum().item() == 4 * size)\n self.assertTrue(b.grad.sum().item() == 4 * size)\n\n def test_streaming_backwards_sync_graph_root(self):\n # This function tests if bwd ops running on a side stream properly sync with the GraphRoot.\n # The potential bug it targets is a race condition. The test uses multiple trials and\n # torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,\n # but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,\n # but failure does guarantee there is a bug.\n fwd_bwd_op_stream = torch.cuda.Stream()\n bwd_ambient_stream = torch.cuda.Stream()\n # We need these streams to be different otherwise the test is meaningless.\n self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)\n\n size = int(1e3)\n\n a = torch.full((size,), 2.0, device=\"cuda\", requires_grad=True)\n b = torch.full((size,), 3.0, device=\"cuda\", requires_grad=True)\n\n # I don't think we need any manual record_streams below.\n # a and b remain in scope for the entire test.\n # c and grad remain in scope for each iteration, and there's a full sync between iterations.\n for trial in range(5):\n torch.cuda.synchronize()\n a.grad = b.grad = None\n with torch.cuda.stream(fwd_bwd_op_stream):\n c = a * b\n\n with torch.cuda.stream(bwd_ambient_stream):\n torch.cuda.synchronize()\n # Long-running dummy kernel on bwd_ambient_stream delays filling of grad\n torch.cuda._sleep(int(50 * get_cycles_per_ms()))\n # Fills grad on bwd_ambient_stream\n grad = torch.full((size,), float(trial + 1), device=\"cuda\")\n\n # Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if\n # bwd ops don't sync with bwd_ambient_stream before consuming grad.\n torch.autograd.backward(tensors=c, grad_tensors=grad)\n\n # See https://github.com/pytorch/pytorch/issues/47028\n # assertEquals below run on bwd_ambient_stream, so this test may also fail\n # if backward() fails to sync with bwd_ambient_stream at the end.\n # Synchronizing here works around the issue until a proper fix can be made.\n torch.cuda.synchronize()\n with torch.no_grad():\n self.assertEqual(a.grad, grad * b)\n self.assertEqual(b.grad, grad * a)\n\n def test_streaming_backwards_callback(self):\n # Tests if autograd callbacks sync properly with respect to leaf streams and\n # the user-facing stream surrounding backward(). If it fails, first suspect is\n # sync logic where \"final_callbacks_\" are called in torch/csrc/autograd/engine.cpp\n MultiplyInStream = self._make_multiply_in_stream()\n\n size = int(1e3)\n a = torch.full((size,), 1, device=\"cuda\", dtype=torch.float, requires_grad=True)\n b = torch.full((size,), 1, device=\"cuda\", dtype=torch.float, requires_grad=True)\n\n s0 = torch.cuda.Stream()\n s1 = torch.cuda.Stream()\n s2 = torch.cuda.Stream()\n\n stash = []\n\n # sets up a nontrivial structure of leaf streams\n s0.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s0):\n c = MultiplyInStream.apply(a, 2)\n\n s1.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s1):\n d = MultiplyInStream.apply(b, 3)\n s1.wait_stream(s0)\n e = c * d\n\n def clone_leaf_grads():\n stash.append(a.grad.clone())\n stash.append(b.grad.clone())\n\n # Use a hook on e to install the callback\n e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))\n\n s2.wait_stream(s1)\n with torch.cuda.stream(s2):\n e.sum().backward()\n # The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.\n # If those things happened properly, checking the values of the cloned grads on s2 should be safe:\n self.assertEqual(stash[0], torch.full_like(a, 6))\n self.assertEqual(stash[1], torch.full_like(a, 6))\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n @unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, \"Does not work on Sandcastle\")\n def test_cuda_init_race(self):\n # See https://github.com/pytorch/pytorch/issues/16559\n import subprocess\n subprocess.check_call([sys.executable, '-c', \"\"\"\\\nimport torch\nimport threading\n\ndef worker(rank):\n torch.tensor([1.]).cuda(rank)\n\nt1 = threading.Thread(target=worker, args=(0,))\nt2 = threading.Thread(target=worker, args=(1,))\nt1.start()\nt2.start()\n\"\"\"])\n\n @unittest.skipIf(TEST_WITH_ROCM, \"ROCm doesn't support device side asserts\")\n def test_fixed_cuda_assert_async(self):\n with self.assertRaisesRegex(RuntimeError, \"Boolean value of Tensor with no values is ambiguous\"):\n torch._assert_async(torch.tensor([], device=\"cuda\"))\n with self.assertRaisesRegex(RuntimeError, \"Boolean value of Tensor with more than one value is ambiguous\"):\n torch._assert_async(torch.tensor([0, 0], device=\"cuda\"))\n\n torch._assert_async(torch.tensor(1, device=\"cuda\"))\n torch._assert_async(torch.tensor(0.1, device=\"cuda\"))\n torch._assert_async(torch.tensor(-0.1, device=\"cuda\"))\n torch._assert_async(torch.tensor(True, device=\"cuda\"))\n torch._assert_async(torch.tensor(0 + 0.1j, device=\"cuda\"))\n\n fail_stmts = [\n \"torch._assert_async(torch.tensor(0, device='cuda'))\",\n \"torch._assert_async(torch.tensor(0.0, device='cuda'))\",\n \"torch._assert_async(torch.tensor(False, device='cuda'))\",\n \"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))\",\n ]\n\n import subprocess\n for stmt in fail_stmts:\n with self.subTest(stmt=stmt):\n r = subprocess.call([sys.executable, '-c', f\"\"\"\\\nimport torch\n\n{stmt}\ntorch.cuda.synchronize()\n\"\"\"])\n self.assertTrue(r != 0)\n\n\n def test_grad_scaling_unscale(self, dtype=torch.float):\n inv_scale = torch.full((1,), 0.25, dtype=torch.float, device=\"cuda:0\")\n found_inf = torch.full((1,), 0.0, dtype=torch.float, device=\"cuda:0\")\n\n size = 10\n g = torch.full((size, size), 4.0, dtype=dtype, device=\"cuda:0\")\n ginf = g.clone()\n ginf[2, 2] = float('inf')\n gnan = g.clone()\n gnan[2, 2] = float('nan')\n\n # Tries selected combinations of\n # - contiguous grads\n # - g.clone().t() which is not contiguous but still non overlapping and dense\n # - variants of g.clone()[:, :5] which are not non overlapping and dense\n # Non overlapping and dense grads route into a multi tensor apply kernel,\n # others use a fallback per-tensor kernel, so we should try both.\n cases = (\n ([g.clone(), g.clone()], False),\n ([g.clone(), g.clone().t()], False),\n ([g.clone(), g.clone()[:, :5]], False),\n ([g.clone()[:, :5], g.clone()[:, :5]], False),\n ([g.clone(), ginf.clone()], True),\n ([g.clone(), gnan.clone()], True),\n ([g.clone(), ginf.clone()[:, :5]], True),\n ([g.clone(), gnan.clone()[:, :5]], True),\n ([ginf.clone(), g.clone()[:, :5]], True),\n ([ginf.clone()[:, :5], g.clone()[:, :5]], True),\n )\n\n for grads, has_inf in cases:\n found_inf.zero_()\n torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)\n if has_inf:\n self.assertEqual(found_inf, 1.0)\n else:\n self.assertEqual(found_inf, 0.0)\n for grad in grads:\n self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)\n\n # When passing lists with mismatched dtypes to a raw\n # _amp_foreach_non_finite_check_and_unscale_ call,\n # it's expected to fall back to single-tensor TensorIterator kernel.\n grads = [g.clone(), g.to(dtype=torch.float16)]\n torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)\n for grad in grads:\n self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)\n\n # Passing lists with mismatched devices to a raw\n # _amp_foreach_non_finite_check_and_unscale_ call should raise errors.\n if TEST_MULTIGPU:\n with self.assertRaisesRegex(RuntimeError, r\"Expected all tensors to be on the same device\"):\n torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device=\"cuda:1\")],\n found_inf,\n inv_scale)\n\n # Creates a list of grads with mismatched dtypes and devices, to ensure\n # scaler._unscale_grads_ organizes grads by dtype and device before calling\n # _amp_foreach_non_finite_check_and_unscale_ on each set.\n # If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.\n def perfect_storm_grads(inject_inf):\n grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]\n if TEST_MULTIGPU:\n grads += [g.to(device=\"cuda:1\"),\n g.to(device=\"cuda:1\")[:, :5],\n g.to(device=\"cuda:1\", dtype=torch.float16),\n g.to(device=\"cuda:1\", dtype=torch.float16)]\n if inject_inf >= 0:\n grads[inject_inf][2, 2] = float('inf')\n return grads\n\n scaler = torch.cuda.amp.GradScaler()\n dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]\n dummy_opt = torch.optim.SGD(dummy_params, lr=1.)\n\n # Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.\n for inject_inf in range(-1, len(dummy_params)):\n found_inf = torch.full((1,), 0.0, dtype=torch.float, device=\"cuda:0\")\n grads = perfect_storm_grads(inject_inf)\n for i, p in enumerate(dummy_params):\n p.grad = grads[i]\n found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)\n if inject_inf < 0:\n # No inf was injected, ensures unscaling worked normally.\n self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)\n for grad in grads:\n self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)\n else:\n # inf was injected, ensures inf was found.\n self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)\n\n def test_grad_scaling_update_scale(self, device=\"cuda\", dtype=torch.float):\n growth = 2.0\n backoff = 0.25\n growth_interval = 2\n scale = torch.full((1,), 4.0, dtype=dtype, device=device)\n growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)\n found_inf = torch.full((1,), 0.0, dtype=torch.float, device=\"cuda:0\")\n\n # Simulates 2 consecutive unskipped iterations\n torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)\n self.assertEqual(growth_tracker, 1)\n self.assertEqual(scale, 4.0)\n torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)\n self.assertEqual(growth_tracker, 0)\n self.assertEqual(scale, 8.0)\n\n # Simulates a skipped iteration\n found_inf.fill_(1.0)\n torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)\n self.assertEqual(growth_tracker, 0)\n self.assertEqual(scale, 2.0)\n\n def test_grad_scaling_unscale_sparse(self, device=\"cuda\", dtype=torch.float):\n scaler = torch.cuda.amp.GradScaler()\n\n inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)\n found_inf = torch.empty((1,), dtype=dtype, device=device)\n cur = found_inf.device\n\n # As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.\n # https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.\n # The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.\n # Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks\n # legacy and does not accept a device=\"cuda\" kwarg. Going with torch.sparse_coo_tensor.\n i = torch.tensor([[0, 1, 1],\n [2, 0, 2]], device=\"cuda\", dtype=torch.int64)\n v = torch.tensor([16., 32., 64.], device=\"cuda\", dtype=torch.float)\n s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device=\"cuda\", dtype=dtype)\n\n p = s.clone()\n assert p.is_sparse\n opt = torch.optim.SGD([p], lr=1.)\n\n p.grad = s.clone()\n found_inf.zero_()\n found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]\n self.assertEqual(found_inf, 0.0)\n self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())\n\n v = torch.FloatTensor([16., 32., float('inf')])\n p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device=\"cuda\", dtype=dtype)\n found_inf.zero_()\n found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]\n self.assertEqual(found_inf, 1.0)\n\n v = torch.FloatTensor([16., 32., float('nan')])\n p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device=\"cuda\", dtype=dtype)\n found_inf.zero_()\n found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]\n self.assertEqual(found_inf, 1.0)\n\n p = s.clone().half()\n assert p.is_sparse\n opt = torch.optim.SGD([p], lr=1.)\n\n p.grad = s.clone().half()\n found_inf.zero_()\n found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]\n self.assertEqual(found_inf, 0.0)\n self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())\n\n # Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation\n # does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.\n # _amp_non_finite_check_and_unscale_ should report an overflow here.\n i = torch.LongTensor([[0, 1, 0],\n [2, 0, 2]])\n v = torch.FloatTensor([64000., 32., 64000.])\n p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device=\"cuda\", dtype=torch.float16)\n found_inf.zero_()\n found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]\n self.assertEqual(found_inf, 1.0)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_grad_scaling_device_as_key(self):\n # Ensure that different instances of \"device\" objects that point to the same device\n # are treated as identical keys by dicts. GradScaler relies on this behavior, and may\n # error otherwise in a way that's difficult to detect (a silent performance hit).\n d = {}\n t = torch.empty((1,), device=\"cuda:0\")\n dev0a = torch.device(\"cuda:0\")\n dev0b = torch.device(\"cuda:0\")\n dev1a = torch.device(\"cuda:1\")\n dev1b = torch.device(\"cuda:1\")\n\n self.assertTrue(hash(dev0a) == hash(dev0b))\n self.assertTrue(hash(dev1a) == hash(dev1b))\n\n d[dev0a] = \"0a\"\n d[dev0b] = \"0b\"\n self.assertTrue(len(d) == 1)\n self.assertTrue(d[dev0a] == \"0b\")\n d[t.device] = \"t\"\n self.assertTrue(len(d) == 1)\n self.assertTrue(d[dev0a] == \"t\")\n\n d[dev1a] = \"1a\"\n d[dev1b] = \"1b\"\n self.assertTrue(len(d) == 2)\n self.assertTrue(d[dev1a] == \"1b\")\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_grad_scaling_scale(self):\n scaler = torch.cuda.amp.GradScaler(init_scale=2.)\n t0 = torch.full((1,), 4.0, dtype=torch.float32, device=\"cuda:0\")\n t1 = torch.full((1,), 4.0, dtype=torch.float32, device=\"cuda:1\")\n # Create some nested iterables of tensors on different devices.\n outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])\n outputs = scaler.scale(outputs)\n self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and\n outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)\n self.assertTrue(scaler._scale.device == t1.device)\n\n def test_grad_scaling_state_dict(self):\n for lazy_init_scale in True, False:\n s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)\n s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)\n\n # sets a random value for load_state_dict to overwrite\n s1._init_growth_tracker = 7\n\n if lazy_init_scale:\n # Dummy scale() call to ensure the scale tensor is lazily initialized.\n s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device=\"cuda:0\"))\n self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))\n\n s1.load_state_dict(s0.state_dict())\n\n self.assertEqual(s1.get_scale(), 3.)\n self.assertEqual(s1.get_growth_factor(), 4.)\n self.assertEqual(s1.get_backoff_factor(), .5)\n self.assertEqual(s1.get_growth_interval(), 2)\n self.assertEqual(s1._init_growth_tracker, 0)\n\n def _create_scaling_models_optimizers(self, device=\"cuda\"):\n # Create a module+optimizer that will use scaling, and a control module+optimizer\n # that will not use scaling, against which the scaling-enabled module+optimizer can be compared.\n mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)\n mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)\n for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):\n s.data.copy_(c.data)\n\n opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)\n opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)\n\n return mod_control, mod_scaling, opt_control, opt_scaling\n\n def _create_scaling_case(self, device=\"cuda\", dtype=torch.float):\n data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),\n (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),\n (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),\n (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]\n\n loss_fn = torch.nn.MSELoss().cuda()\n\n skip_iter = 2\n\n return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)\n\n # _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.\n def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):\n # Ensure scaling can be disabled without changing user control flow.\n for enabled in True, False:\n mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()\n\n # For functionality, test with a modest initial scale, and an unrealistically-large growth factor\n # so any potential errors with the growth factor handling will be magnified.\n scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)\n\n _ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)\n ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)\n\n # Allows run() to optionally return a different scaler instance.\n scaler = ret if ret else scaler\n\n # If scaling was enabled, the scale factor should have been multiplied by the growth factor\n # len(data) - skipped times and the backoff factor \"skipped\" times.\n if enabled:\n net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0\n net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0\n self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))\n else:\n self.assertTrue(scaler.get_scale() == 1.0)\n\n for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):\n self.assertEqual(c, s, atol=atol, rtol=1e-05)\n\n # Compares no scaling + no autocasting against scaling + autocasting.\n def test_grad_scaling_autocast(self):\n try_pickle = False\n\n def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):\n for i, (input, target) in enumerate(data):\n optimizer.zero_grad()\n with torch.autocast('cuda', enabled=try_scaling_api):\n output = model(input)\n loss = loss_fn(output, target)\n if try_scaling_api:\n scaler.scale(loss).backward()\n if i == skip_iter and scaler.is_enabled():\n model[1].weight.grad.data.fill_(float('inf'))\n scaler.step(optimizer)\n scaler.update()\n if try_pickle:\n scaler = pickle.loads(pickle.dumps(scaler))\n else:\n loss.backward()\n if (not scaler.is_enabled()) or (i != skip_iter):\n optimizer.step()\n return scaler\n\n # sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32\n self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)\n # this will be picked up by try_pickle within run():\n try_pickle = True\n self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)\n\n def test_grad_scaling_clipping(self):\n def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):\n max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads\n for i, (input, target) in enumerate(data):\n optimizer.zero_grad()\n output = model(input)\n loss = loss_fn(output, target)\n if try_scaling_api:\n scaler.scale(loss).backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())\n if i == skip_iter and scaler.is_enabled():\n model[1].weight.grad.data.fill_(float('inf'))\n scaler.step(optimizer)\n scaler.update()\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)\n if (not scaler.is_enabled()) or (i != skip_iter):\n optimizer.step()\n\n self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)\n\n def test_grad_scaling_clipping_separate_unscale(self):\n def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):\n max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads\n for i, (input, target) in enumerate(data):\n optimizer.zero_grad()\n output = model(input)\n loss = loss_fn(output, target)\n if try_scaling_api:\n scaler.scale(loss).backward()\n if i == skip_iter and scaler.is_enabled():\n model[1].weight.grad.data.fill_(float('inf'))\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)\n scaler.step(optimizer)\n scaler.update()\n else:\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)\n if (not scaler.is_enabled()) or (i != skip_iter):\n optimizer.step()\n\n self._run_scaling_case(run, unskipped=3, skipped=1)\n\n @unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')\n def test_grad_scaling_penalty(self):\n def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):\n for i, (input, target) in enumerate(data):\n optimizer.zero_grad()\n output = model(input)\n loss = loss_fn(output, target)\n\n if try_scaling_api:\n grad_params = torch.autograd.grad(scaler.scale(loss),\n model.parameters(), create_graph=True)\n inv_scale = 1. / scaler.get_scale()\n grad_params = [p * inv_scale for p in grad_params]\n else:\n grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)\n\n grad_norm = 0\n for grad in grad_params:\n grad_norm += grad.pow(2).sum()\n grad_norm = grad_norm.sqrt()\n loss = loss + grad_norm\n\n if try_scaling_api:\n scaler.scale(loss).backward()\n if i == skip_iter and scaler.is_enabled():\n model[1].weight.grad.data.fill_(float('inf'))\n scaler.step(optimizer)\n scaler.update()\n else:\n loss.backward()\n if (not scaler.is_enabled()) or (i != skip_iter):\n optimizer.step()\n\n self._run_scaling_case(run, unskipped=3, skipped=1)\n\n def test_grad_scaling_accumulation(self):\n def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):\n iters_to_accumulate = 2\n for i, (input, target) in enumerate(data):\n output = model(input)\n loss = loss_fn(output, target)\n loss = loss / iters_to_accumulate\n if try_scaling_api:\n scaler.scale(loss).backward()\n else:\n loss.backward()\n if (i + 1) % iters_to_accumulate == 0:\n if try_scaling_api:\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n else:\n optimizer.step()\n optimizer.zero_grad()\n\n self._run_scaling_case(run, unskipped=2, skipped=0)\n\n def test_grad_scaling_multiple(self):\n # Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.\n # Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.\n for enabled in True, False:\n mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \\\n self._create_scaling_case()\n mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \\\n self._create_scaling_models_optimizers()\n\n scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)\n\n def run(model0, model1, optimizer0, optimizer1, try_scaling_api):\n for i, (input, target) in enumerate(data):\n optimizer0.zero_grad()\n optimizer1.zero_grad()\n output0 = model0(input)\n output1 = model1(input)\n loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)\n loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)\n\n if try_scaling_api:\n scaler.scale(loss0).backward(retain_graph=True)\n scaler.scale(loss1).backward()\n if i == skip_iter and scaler.is_enabled():\n model1[1].weight.grad.data.fill_(float('inf'))\n\n # As an additional stress test, separately unscale for one of the optimizers.\n scaler.unscale_(optimizer0)\n\n scaler.step(optimizer0)\n scaler.step(optimizer1)\n scaler.update()\n else:\n loss0.backward(retain_graph=True)\n loss1.backward()\n optimizer0.step()\n if (not scaler.is_enabled()) or (i != skip_iter):\n optimizer1.step()\n\n run(mod_control0, mod_control1, opt_control0, opt_control1, False)\n run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)\n\n # The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.\n self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *\n scaler.get_backoff_factor()**1) if enabled else 1.0)\n\n for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),\n chain(mod_scaling0.parameters(), mod_scaling1.parameters())):\n self.assertEqual(c, s, rtol=1e-5, atol=1e-7)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_grad_scaling_multigpu(self):\n # Same as above, but runs some of the models on device 1.\n # GradScaler should transparently handle losses and gradients on multiple devices.\n # This test could be combined with the test above, but I think it makes sense to treat\n # multi-GPU operations separately.\n dev0 = torch.device(\"cuda:0\")\n dev1 = torch.device(\"cuda:1\")\n\n for enabled in True, False:\n mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \\\n self._create_scaling_case()\n mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \\\n self._create_scaling_models_optimizers(device=dev1)\n\n scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)\n\n def run(model0, model1, optimizer0, optimizer1, try_scaling_api):\n for i, (input, target) in enumerate(data):\n optimizer0.zero_grad()\n optimizer1.zero_grad()\n output0 = model0(input)\n output1 = model1(input.to(dev1))\n loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)\n loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))\n\n if try_scaling_api:\n scaler.scale(loss0).backward(retain_graph=True)\n scaler.scale(loss1).backward()\n if i == skip_iter and scaler.is_enabled():\n model1[1].weight.grad.data.fill_(float('inf'))\n\n # As an additional stress test, separately unscale for one of the optimizers.\n scaler.unscale_(optimizer0)\n\n scaler.step(optimizer0)\n scaler.step(optimizer1)\n\n # Make sure the found_infs were collected properly across optimizers and devices.\n if scaler.is_enabled():\n self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)\n self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)\n self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)\n self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==\n float(i == skip_iter))\n\n scaler.update()\n else:\n loss0.backward(retain_graph=True)\n loss1.backward()\n optimizer0.step()\n if (not scaler.is_enabled()) or (i != skip_iter):\n optimizer1.step()\n\n run(mod_control0, mod_control1, opt_control0, opt_control1, False)\n run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)\n\n # The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.\n self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *\n scaler.get_backoff_factor()**1) if enabled else 1.0)\n\n # Copy mod_control1 and mod_scaling1 back the device 0 for comparison\n mod_control1.to(dev0)\n mod_scaling1.to(dev0)\n\n for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),\n chain(mod_scaling0.parameters(), mod_scaling1.parameters())):\n self.assertEqual(c, s, rtol=1e-5, atol=1e-7)\n\n def test_cublas_multiple_threads_same_device(self):\n # Note, these parameters should be very carefully tuned\n # Too small number makes it hard for the racing condition\n # to happen, while too large number sometimes cause hang\n size = 1024\n num_threads = 2\n trials = 3\n test_iters = 100\n\n weight = torch.ones((size, size), device='cuda')\n results = {}\n barrier = threading.Barrier(num_threads)\n\n def _worker(t):\n my_stream = torch.cuda.Stream()\n # Hard sync so we don't need to worry about creating and using tensors\n # across streams or the fact that default streams are thread-local.\n # Those issues are not the target of this test.\n torch.cuda.synchronize()\n # Line up threads to increase likelihood of race conditions.\n barrier.wait()\n with torch.cuda.stream(my_stream):\n for i in range(test_iters):\n # If all threads are sharing the same cublas handle,\n # the following sequence may occur:\n # thread 0 calls cublasSetStream()\n # thread 1 calls cublasSetStream()\n # thread 0 launches its raw gemm, which it thinks is in\n # its own stream, but is actually in thread 1's stream.\n # thread 0 enqueues its div_, which IS is its own stream,\n # but actually now races with its gemm.\n results[t] = torch.mm(results[t], weight)\n results[t].div_(float(size))\n torch.cuda.synchronize()\n\n for _ in range(trials):\n for t in range(num_threads):\n results[t] = torch.ones((size, size), device='cuda')\n\n threads = [threading.Thread(target=_worker,\n args=(t,)) for t in range(num_threads)]\n\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n for t in range(num_threads):\n self.assertEqual(results[t].sum().item(), size * size)\n\n # Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)\n @unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n @skipIfRocm\n def test_cudnn_multiple_threads_same_device(self):\n # This function is intended to test the lazy creation and reuse of per-thread\n # cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.\n # Failure here likely indicates something wrong with that logic.\n weight = torch.ones((1, 1, 2, 2), device='cuda')\n\n results = {}\n\n num_threads = 2\n trials = 3\n test_iters = 1000\n barrier = threading.Barrier(num_threads)\n\n with torch.backends.cudnn.flags(enabled=True):\n def _worker(t):\n my_stream = torch.cuda.Stream()\n # Hard sync so we don't need to worry about creating and using tensors\n # across streams or the fact that default streams are thread-local.\n # Those issues are not the target of this test.\n torch.cuda.synchronize()\n # Line up threads to increase likelihood of race conditions.\n barrier.wait()\n with torch.cuda.stream(my_stream):\n for _ in range(test_iters):\n # If all threads are sharing the same cudnn handle,\n # the following sequence may occur:\n # thread 0 calls setCuDNNStreamToCurrent()\n # thread 1 calls setCuDNNStreamToCurrent()\n # thread 0 launches its raw convolution, which it thinks is in\n # its own stream, but is actually in thread 1's stream.\n # thread 0 enqueues its div_, which IS is its own stream,\n # but now races with its convolution.\n results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)\n results[t].div_(4.0)\n torch.cuda.synchronize()\n\n for _ in range(trials):\n for t in range(num_threads):\n results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')\n\n threads = [threading.Thread(target=_worker,\n args=(t,)) for t in range(num_threads)]\n\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n for t in range(num_threads):\n self.assertEqual(results[t].sum().item(),\n (2048 - test_iters) * (2048 - test_iters))\n\n def test_cusparse_multiple_threads_same_device(self):\n size = 1024\n num_threads = 2\n trials = 3\n test_iters = 500\n\n def ones_sparse(size):\n a = torch.arange(size, device='cuda')\n indices = torch.cartesian_prod(a, a).t()\n values = torch.ones(size * size, device='cuda')\n return torch.sparse_coo_tensor(indices, values)\n\n weight = ones_sparse(size)\n results = {}\n barrier = threading.Barrier(num_threads)\n\n def _worker(t):\n my_stream = torch.cuda.Stream()\n # Hard sync so we don't need to worry about creating and using tensors\n # across streams or the fact that default streams are thread-local.\n # Those issues are not the target of this test.\n torch.cuda.synchronize()\n # Line up threads to increase likelihood of race conditions.\n barrier.wait()\n with torch.cuda.stream(my_stream):\n for i in range(test_iters):\n # If all threads are sharing the same cublas handle,\n # the following sequence may occur:\n # thread 0 calls cublasSetStream()\n # thread 1 calls cublasSetStream()\n # thread 0 launches its raw gemm, which it thinks is in\n # its own stream, but is actually in thread 1's stream.\n # thread 0 enqueues its div_, which IS is its own stream,\n # but actually now races with its gemm.\n results[t] = weight.mm(results[t])\n results[t].div_(float(size))\n torch.cuda.synchronize()\n\n for _ in range(trials):\n for t in range(num_threads):\n results[t] = torch.ones((size, size), device='cuda')\n\n threads = [threading.Thread(target=_worker,\n args=(t,)) for t in range(num_threads)]\n\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n for t in range(num_threads):\n self.assertEqual(results[t].sum().item(), size * size)\n\n def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):\n # helper to cast args\n def cast(val, to_type):\n if isinstance(val, torch.Tensor):\n return val.to(to_type) if val.is_floating_point() else val\n elif isinstance(val, collections.abc.Iterable):\n return type(val)(cast(v, to_type) for v in val)\n else:\n return val\n\n if add_kwargs is None:\n add_kwargs = {}\n fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16\n self.assertFalse(torch.is_autocast_enabled())\n with torch.autocast('cuda', dtype=fast_dtype):\n self.assertTrue(torch.is_autocast_enabled())\n\n out_type = out_type if out_type is not None else run_as_type\n output = output_method = None\n\n # Try module.* variant, if requested:\n if module is not None and hasattr(module, op):\n output = getattr(module, op)(*args, **add_kwargs)\n if isinstance(output, torch.Tensor):\n self.assertTrue(out_type == output.dtype,\n \"autocast for torch.{} produced {}, should produce {}\"\n .format(op, output.dtype, out_type))\n\n # Try Tensor.* variant:\n if hasattr(torch.Tensor, op):\n output_method = getattr(args[0], op)(*args[1:], **add_kwargs)\n if isinstance(output_method, torch.Tensor):\n self.assertTrue(out_type == output_method.dtype,\n \"autocast for torch.{} produced {}, should produce torch.{}\"\n .format(op, output_method.dtype, out_type))\n\n self.assertTrue((output is not None) or (output_method is not None),\n \"{} not found as an attribute on either Tensor or the requested module {}\".format(\n op, module))\n\n # Accounts for ops that return Tensors, iterables, and other non-Tensors.\n # For example, lstm_cell returns a tuple and equal returns bool.\n def compare(first, second):\n if isinstance(first, torch.Tensor):\n return torch.equal(first, second)\n elif isinstance(first, collections.abc.Iterable):\n return all(compare(f, s) for f, s in zip(first, second))\n else:\n return first == second\n\n # If both torch.* and Tensor.* variants were found, check outputs are identical\n if (output is not None) and (output_method is not None):\n self.assertTrue(type(output) == type(output_method))\n comparison = compare(output, output_method)\n self.assertTrue(comparison, \"torch.{0} result did not match Tensor.{0} result\".format(op))\n\n # Compare numerics to Python-side \"autocasting\" that (we expect) does the same thing\n # as the C++-side autocasting, and should be bitwise accurate.\n output_to_compare = output if output is not None else output_method\n with torch.autocast('cuda', enabled=False):\n self.assertFalse(torch.is_autocast_enabled())\n\n if module is not None and hasattr(module, op):\n control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)\n else:\n control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)\n self.assertTrue(type(output_to_compare) == type(control))\n comparison = compare(output_to_compare, control)\n self.assertTrue(comparison, \"torch.{} result did not match control\".format(op))\n self.assertTrue(torch.is_autocast_enabled())\n self.assertFalse(torch.is_autocast_enabled())\n\n def args_maybe_kwargs(self, op_with_args):\n if len(op_with_args) == 2:\n return op_with_args[0], op_with_args[1], {}\n else:\n return op_with_args[0], op_with_args[1], op_with_args[2]\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_torch_fp16(self):\n with torch.backends.cudnn.flags(enabled=True, deterministic=True):\n for op_with_args in self.autocast_lists.torch_fp16:\n skip_test = False\n op, args = op_with_args[0], op_with_args[1]\n if len(op_with_args) == 3:\n skip_test = op_with_args[2] # TEST_WITH_ROCM\n if not skip_test:\n self._run_autocast_outofplace(op, args, torch.float16)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_torch_bf16(self):\n with torch.backends.cudnn.flags(enabled=True, deterministic=True):\n for op_with_args in self.autocast_lists.torch_fp16:\n skip_test = False\n op, args = op_with_args[0], op_with_args[1]\n if len(op_with_args) == 3:\n skip_test = op_with_args[2] # TEST_WITH_ROCM\n should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \\\n or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'\n if not skip_test:\n if should_error_from_not_implemented:\n with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):\n self._run_autocast_outofplace(op, args, torch.bfloat16)\n else:\n if torch.cuda.is_bf16_supported():\n self._run_autocast_outofplace(op, args, torch.bfloat16)\n else:\n with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):\n self._run_autocast_outofplace(op, args, torch.bfloat16)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_torch_fp32(self):\n for op_with_args in self.autocast_lists.torch_fp32:\n op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)\n self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_torch_need_autocast_promote(self):\n for op, args in self.autocast_lists.torch_need_autocast_promote:\n self._run_autocast_outofplace(op, args, torch.float32)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_torch_expect_builtin_promote(self):\n for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:\n self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_nn_fp16(self):\n with torch.backends.cudnn.flags(enabled=True, deterministic=True):\n for op, args in self.autocast_lists.nn_fp16:\n self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)\n\n\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_nn_bf16(self):\n with torch.backends.cudnn.flags(enabled=True, deterministic=True):\n for op, args in self.autocast_lists.nn_fp16:\n if torch.cuda.is_bf16_supported():\n self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)\n else:\n with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):\n self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_nn_fp32(self):\n for op, args in self.autocast_lists.nn_fp32:\n self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_linalg_fp16(self):\n with torch.backends.cudnn.flags(enabled=True, deterministic=True):\n for op, args in self.autocast_lists.linalg_fp16:\n self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_methods_fp16(self):\n with torch.backends.cudnn.flags(enabled=True, deterministic=True):\n for op, args in self.autocast_lists.methods_fp16:\n self._run_autocast_outofplace(op, args, torch.float16, module=None)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_methods_fp32(self):\n for op, args in self.autocast_lists.methods_fp32:\n self._run_autocast_outofplace(op, args, torch.float32, module=None)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_methods_expect_builtin_promote(self):\n for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:\n self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)\n\n def test_autocast_banned(self):\n with torch.autocast('cuda'):\n for op, args, module in self.autocast_lists.banned:\n with self.assertRaises(RuntimeError):\n getattr(module, op)(*args)\n\n def test_autocast_ignored_types(self):\n with torch.autocast('cuda'):\n for ignore_type in (torch.double, torch.int32):\n a_ignore = torch.ones((8, 8), dtype=ignore_type, device=\"cuda:0\")\n b_ignore = torch.ones((8, 8), dtype=ignore_type, device=\"cuda:0\")\n c_16 = torch.ones((8, 8), dtype=torch.float16, device=\"cuda:0\")\n\n # Tests if CastPolicy::fp16 ops ignore double and int\n # Currently, no ops belonging to this policy support integer inputs.\n if ignore_type is torch.double:\n with self.assertRaises(RuntimeError):\n torch.mm(a_ignore, c_16)\n with torch.autocast('cuda', enabled=False):\n type_no_autocast = torch.mm(a_ignore, b_ignore).dtype\n self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)\n\n # Tests if CastPolicy::fp32 ops ignore double and int\n with torch.autocast('cuda', enabled=False):\n type_no_autocast = torch.pow(a_ignore, 2.0).dtype\n self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)\n\n # Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int\n with torch.autocast('cuda', enabled=False):\n type_no_autocast = torch.sum(a_ignore).dtype\n self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)\n\n # Tests if CastPolicy::fp32_append_dtype ops ignore double and int\n # Currently, no ops belonging to this policy support integer inputs.\n if ignore_type is torch.double:\n with torch.autocast('cuda', enabled=False):\n type_no_autocast = torch.norm(a_ignore).dtype\n self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)\n\n def test_autocast_custom_enabled(self):\n class MyMM(torch.autograd.Function):\n @staticmethod\n @torch.cuda.amp.custom_fwd\n def forward(ctx, a, b):\n self.assertTrue(a.dtype is torch.float32)\n self.assertTrue(b.dtype is torch.float32)\n self.assertTrue(torch.is_autocast_enabled())\n ctx.save_for_backward(a, b)\n return a.mm(b)\n\n @staticmethod\n @torch.cuda.amp.custom_bwd\n def backward(ctx, grad):\n self.assertTrue(torch.is_autocast_enabled())\n a, b = ctx.saved_tensors\n return grad.mm(b.t()), a.t().mm(grad)\n\n mymm = MyMM.apply\n\n x = torch.randn((8, 8), device=\"cuda\", dtype=torch.float32, requires_grad=True)\n y = torch.randn((8, 8), device=\"cuda\", dtype=torch.float32, requires_grad=True)\n\n with torch.cuda.amp.autocast():\n output = mymm(x, y)\n self.assertTrue(output.dtype is torch.float16)\n loss = output.sum()\n loss.backward()\n\n def test_autocast_custom_cast_inputs(self):\n class MyMM(torch.autograd.Function):\n @staticmethod\n @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)\n def forward(ctx, a, container, expect_type):\n b = container[1][0]\n self.assertTrue(a.dtype is expect_type)\n self.assertTrue(b.dtype is expect_type)\n self.assertFalse(torch.is_autocast_enabled())\n ctx.save_for_backward(a, b)\n return a.mm(b)\n\n @staticmethod\n @torch.cuda.amp.custom_bwd\n def backward(ctx, grad):\n self.assertFalse(torch.is_autocast_enabled())\n a, b = ctx.saved_tensors\n return grad.mm(b.t()), None, None\n\n mymm = MyMM.apply\n\n x = torch.randn((8, 8), device=\"cuda\", dtype=torch.float16, requires_grad=True)\n # Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,\n # because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.\n # Sets requires_grad=False explicitly so we don't lie about expecting a gradient.\n y = (0, {0: torch.randn((8, 8), device=\"cuda\", dtype=torch.float16, requires_grad=False)})\n\n with torch.autocast('cuda', ):\n output = mymm(x, y, torch.float32)\n self.assertTrue(output.dtype is torch.float32)\n loss = output.sum()\n loss.backward()\n\n # Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.\n output = mymm(x, y, torch.float16)\n self.assertTrue(output.dtype is torch.float16)\n loss = output.sum()\n loss.backward()\n\n def test_autocast_cat_jit(self):\n # Reported at https://github.com/pytorch/pytorch/issues/38958\n\n class Model(torch.nn.Module):\n def forward(self):\n a = torch.randn(1)\n b = torch.randn(1)\n c = torch.cat((a, b), 0)\n d = torch.stack([c, c], 0)\n return d\n\n # The JIT here doesn't really matter, we just need to call\n # cat via the boxed API\n model = Model()\n model_jit_script = torch.jit.script(model)\n\n with torch.autocast('cuda', enabled=True):\n model()\n model_jit_script()\n\n # cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)\n # so they get a dedicated test.\n # Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).\n @skipIfRocm\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_autocast_rnn(self):\n with torch.backends.cudnn.flags(enabled=True, deterministic=True):\n # seq, batch, features, hidden size\n clses = (\"RNN\", \"GRU\", \"LSTM\")\n T, B, F, H = 3, 4, 5, 6\n dtypes = (torch.float16, torch.float32)\n input_layouts = (\"seq_first\", \"batch_first\", \"packed\")\n\n for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,\n input_dtype, hidden_dtype, weight_dtype) in \\\n product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),\n dtypes, dtypes, dtypes):\n if input_layout == \"seq_first\":\n batch_first = False\n x = torch.randn((T, B, F), device=\"cuda\", dtype=input_dtype)\n elif input_layout == \"batch_first\":\n batch_first = True\n x = torch.randn((B, T, F), device=\"cuda\", dtype=input_dtype)\n elif input_layout == \"packed\":\n batch_first = False\n x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),\n device=\"cuda\", dtype=input_dtype),\n lengths=(3, 2, 1, 3),\n enforce_sorted=False)\n\n rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,\n bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)\n\n if try_nonpreflattened_weights:\n for p in rnn.parameters():\n with torch.no_grad():\n p.set_(p.clone())\n\n h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),\n device=\"cuda\", dtype=hidden_dtype)\n if cls == \"LSTM\":\n c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),\n device=\"cuda\", dtype=hidden_dtype)\n h = (h, c)\n\n with torch.autocast('cuda', ):\n out, h_out = rnn(x, h)\n out = out.data if input_layout == \"packed\" else out\n self.assertEqual(out.dtype, torch.float16)\n # Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee\n # at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has\n # occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.\n self.assertEqual(out.grad_fn.name(), \"CudnnRnnBackward0\")\n out.sum().backward()\n grads = [p.grad.clone() for p in rnn.parameters()]\n\n rnn.zero_grad()\n\n if cls == \"LSTM\":\n out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))\n else:\n out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())\n out_control = out_control.data if input_layout == \"packed\" else out_control\n out_control.sum().backward()\n grads_control = [p.grad.clone() for p in rnn.parameters()]\n\n # Compares with default tolerances, even for FP16 execution. Barring nondeterminism,\n # autocast and control results should be bitwise identical.\n self.assertEqual(out, out_control)\n\n if cls == \"LSTM\":\n self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)\n self.assertEqual(h_out[0], h_out_control[0])\n self.assertEqual(h_out[1], h_out_control[1])\n else:\n self.assertEqual(h_out.dtype, torch.float16)\n self.assertEqual(h_out, h_out_control)\n for grad, grad_control in zip(grads, grads_control):\n self.assertEqual(grad.half(), grad_control)\n\n def test_autocast_cache_leak(self):\n # Reported at https://github.com/pytorch/pytorch/issues/48049\n # Test is used to check, if autocast recaches the same parameters\n # when executed in a `torch.no_grad()` block.\n\n linear = torch.nn.Linear(10, 10).to('cuda')\n data = torch.randn(1, 10, device='cuda')\n\n with torch.autocast('cuda', ):\n with torch.no_grad():\n out = linear(data)\n first_iter_mem = torch.cuda.memory_allocated()\n for _ in range(3):\n out = linear(data)\n self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())\n\n def test_autocast_checkpointing(self):\n model = torch.nn.Sequential(torch.nn.Linear(8, 8),\n torch.nn.Linear(8, 8),\n torch.nn.Linear(8, 8)).cuda()\n input = torch.rand((8, 8), device=\"cuda\", dtype=torch.float16, requires_grad=True)\n with torch.autocast('cuda', ):\n output = checkpoint_sequential(model, 2, input)\n self.assertTrue(output.requires_grad)\n self.assertTrue(output.dtype is torch.float16)\n output.sum().backward()\n\n @slowTest\n @unittest.skipIf(not TEST_LARGE_TENSOR, \"not enough memory\")\n def test_max_large_axis(self):\n x = torch.zeros(2**32, device='cuda', dtype=torch.int8)\n x[-1] = 1\n val, idx = x.max(0)\n self.assertEqual(val, 1)\n self.assertEqual(idx, x.shape[0] - 1)\n\n @unittest.skipIf(not TEST_NUMPY, \"Numpy not found\")\n def test_to_numpy(self):\n self.assertRaises(TypeError, lambda: torch.empty(1, device=\"cuda\").numpy())\n\n def test_graph_is_current_stream_capturing(self):\n self.assertFalse(torch.cuda.is_current_stream_capturing())\n\n if (TEST_CUDA and (not TEST_WITH_ROCM) and int(torch.version.cuda.split(\".\")[0]) >= 11):\n s = torch.cuda.Stream()\n with torch.cuda.stream(s):\n g = torch.cuda.CUDAGraph()\n self.assertFalse(torch.cuda.is_current_stream_capturing())\n g.capture_begin()\n self.assertTrue(torch.cuda.is_current_stream_capturing())\n g.capture_end()\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_capture_simple(self):\n s = torch.cuda.Stream()\n\n with torch.cuda.stream(s):\n a = torch.full((1000,), 1, device=\"cuda\")\n g = torch.cuda.CUDAGraph()\n torch.cuda.empty_cache()\n g.capture_begin()\n b = a\n for _ in range(10):\n b = b + 1\n g.capture_end()\n torch.cuda.current_stream().wait_stream(s)\n\n g.replay()\n\n self.assertTrue(b.sum().item() == 11000.)\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_capture_oom(self):\n with self.assertRaisesRegex(RuntimeError, \"out of memory\"):\n with torch.cuda.graph(torch.cuda.CUDAGraph()):\n torch.zeros(2 ** 40, device=\"cuda\")\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_rng_functional(self):\n ops_with_kwargs = ((torch.nn.functional.dropout, {\"p\": 0.1}),\n (torch.nn.functional.rrelu, {\"training\": True}),)\n size = 10000\n\n def run(op, kwargs):\n a = torch.randn((size,), device=\"cuda\", dtype=torch.float)\n\n # Control\n torch.cuda.manual_seed(5)\n eager_out = a\n for _ in range(6):\n eager_out = op(eager_out, **kwargs)\n\n graph_in = a.clone()\n stream = torch.cuda.Stream()\n stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(stream):\n torch.cuda.manual_seed(5)\n\n g = torch.cuda.CUDAGraph()\n torch.cuda.empty_cache()\n g.capture_begin()\n graph_out = graph_in\n for _ in range(2):\n graph_out = op(graph_out, **kwargs)\n g.capture_end()\n torch.cuda.current_stream().wait_stream(stream)\n\n # Runs a graphed->eager->graphed sequence of RNG ops.\n # replay() plays 2 invocations of the op, so the sequence has 6\n # invocations total, matching Control.\n # replay() reads from graph_in and writes to graph_out.\n g.replay()\n out = op(graph_out, **kwargs)\n out = op(out, **kwargs)\n graph_in.copy_(out)\n g.replay()\n\n # If replay() updated RNG state correctly, graph_out\n # should now hold data equal to eager_out.\n try:\n self.assertEqual(eager_out, graph_out)\n except Exception as e:\n raise RuntimeError(\"Failed on \", op) from e\n\n # We hold references to all tensors used across streams up til this sync,\n # so no need to call record_stream on those tensors.\n torch.cuda.synchronize()\n\n for op, kwargs in ops_with_kwargs:\n run(op, kwargs)\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_rng_distributions(self):\n size = 10000\n input = torch.rand((size,), device=\"cuda\", dtype=torch.float)\n alloc = torch.empty((size,), device=\"cuda\", dtype=torch.float)\n\n # Torch ops to test with sample args (tuple) and kwargs (dict)\n torch_with_args = ((\"bernoulli\", (input.clone(),), {}),\n # multinomial uses some uncapturable CUDA calls.\n # TODO: reenable multinomial tests if/when the implementation is capturable.\n # (\"multinomial\", (input.clone(), size, True), {}),\n # (\"multinomial\", (input.clone(), size // 2, False), {}),\n # TODO: reenable normal test, where std is a device\n # tensor, when graph test failures are fixed\n # (\"normal\", (input.clone() + 1, input.clone()), {}),\n (\"normal\", (input.clone() + 1, 1.0), {}),\n (\"poisson\", (input.clone(),), {}),\n (\"rand\", (size,), {\"device\": \"cuda\", \"dtype\": torch.float}),\n (\"randint\", (0, 3, (size,)), {\"device\": \"cuda\", \"dtype\": torch.float}),\n (\"randn\", (size,), {\"device\": \"cuda\", \"dtype\": torch.float}),)\n\n # Tensor methods to test with sample args (tuple)\n tensor_with_args = ((\"bernoulli_\", (input.clone(),)),\n (\"cauchy_\", ()),\n (\"exponential_\", ()),\n (\"geometric_\", (0.3,)),\n (\"log_normal_\", ()),\n (\"normal_\", ()),\n (\"random_\", ()),\n (\"uniform_\", ()),)\n\n def run(module, op, args, kwargs):\n torch.cuda.manual_seed(5)\n\n # Each path runs a dummy op to increment the state a bit before creating controls.\n if (module == \"torch\"):\n dummy = getattr(torch, op)(*args, **kwargs)\n control1 = getattr(torch, op)(*args, **kwargs)\n control2 = getattr(torch, op)(*args, **kwargs)\n else:\n dummy = alloc.clone()\n control1 = alloc.clone()\n control2 = alloc.clone()\n getattr(dummy, op)(*args)\n getattr(control1, op)(*args)\n getattr(control2, op)(*args)\n\n stream = torch.cuda.Stream()\n stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(stream):\n torch.cuda.manual_seed(5)\n\n g = torch.cuda.CUDAGraph()\n torch.cuda.empty_cache()\n if (module == \"torch\"):\n g.capture_begin()\n t1 = getattr(torch, op)(*args, **kwargs)\n t2 = getattr(torch, op)(*args, **kwargs)\n g.capture_end()\n else:\n t1 = alloc.clone()\n t2 = alloc.clone()\n g.capture_begin()\n getattr(t1, op)(*args)\n getattr(t2, op)(*args)\n g.capture_end()\n torch.cuda.current_stream().wait_stream(stream)\n\n try:\n self.assertNotEqual(control1, t1)\n self.assertNotEqual(control2, t2)\n except Exception as e:\n raise RuntimeError(\"Failed on \" + module + \".\" + op) from e\n\n # Runs a dummy op prelude, as for controls, to make sure replay()\n # picks up the dummy op's state increment.\n if module == \"torch\":\n dummy = getattr(torch, op)(*args, **kwargs)\n else:\n dummy = alloc.clone()\n getattr(dummy, op)(*args)\n\n # Runs RNG ops that fill t1 and t2.\n g.replay()\n\n try:\n self.assertEqual(control1, t1)\n self.assertEqual(control2, t2)\n except Exception as e:\n raise RuntimeError(\"Failed on \" + module + \".\" + op) from e\n\n # We hold references to all tensors used across streams up til this sync,\n # so no need to call record_stream on those tensors.\n torch.cuda.synchronize()\n\n for op_with_args in torch_with_args:\n run(\"torch\", *op_with_args)\n\n for meth_with_args in tensor_with_args:\n # Adds an empty dict for kwargs, which none of the Tensor methods use\n run(\"Tensor\", *(meth_with_args + ({},)))\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_two_successive(self):\n torch.cuda.empty_cache()\n\n size = 1000\n kSmallBuffer = 2097152\n\n def func_with_temps(t, val):\n x = t.clone() + val\n y = t.clone() + val\n return x + y\n\n s = torch.cuda.Stream()\n\n for share_mem in (\"Don't share\", \"via pool()\", \"via graph_pool_handle()\"):\n g0 = torch.cuda.CUDAGraph()\n g1 = torch.cuda.CUDAGraph()\n\n a = torch.ones((size,), device=\"cuda\")\n\n s.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s):\n g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == \"via graph_pool_handle()\" else ()\n g0.capture_begin(*g0_args)\n b = a.clone()\n for _ in range(5):\n b = func_with_temps(b, 1)\n g0.capture_end()\n\n g1_args = (g0.pool(),) if share_mem == \"via pool()\" else g0_args\n g1.capture_begin(*g1_args)\n for _ in range(5):\n b = func_with_temps(b, 1)\n g1.capture_end()\n torch.cuda.current_stream().wait_stream(s)\n\n # mixes unrelated eager ops with replays\n c = a.clone()\n for _ in range(2):\n c = func_with_temps(c, 3)\n g0.replay()\n for _ in range(2):\n c = func_with_temps(c, 3)\n g1.replay()\n for _ in range(2):\n c = func_with_temps(c, 3)\n\n self.assertEqual(b.sum().item(), size * 3070)\n self.assertEqual(c.sum().item(), size * 442)\n\n if share_mem != \"Don't share\":\n self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()[\"reserved_bytes.all.current\"],\n kSmallBuffer)\n else:\n reserved_no_sharing = torch.cuda.memory_stats()[\"reserved_bytes.all.current\"]\n\n del a, b, c, g0, g1\n # Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.\n torch.cuda.synchronize()\n torch.cuda.empty_cache()\n\n @unittest.skip(\"Temporarily disabled due to a graphs bug in libcuda.so, \" +\n \"see https://github.com/pytorch/pytorch/pull/57556\")\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_concurrent_replay(self):\n torch.cuda.empty_cache()\n\n size = 1000000 # largeish to help expose race conditions\n\n def func_with_temps(t, val):\n x = t.clone() + val\n y = t.clone() + val\n return x + y\n\n s = torch.cuda.Stream()\n\n for share_mem in (\"Don't share\", \"via pool()\", \"via graph_pool_handle()\"):\n g0 = torch.cuda.CUDAGraph()\n g1 = torch.cuda.CUDAGraph()\n\n s0 = torch.cuda.Stream()\n s1 = torch.cuda.Stream()\n\n a = torch.ones((size,), device=\"cuda\")\n\n s.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s):\n g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == \"via graph_pool_handle()\" else ()\n g0.capture_begin(*g0_args)\n b = a.clone()\n for _ in range(5):\n b = func_with_temps(b, 1)\n g0.capture_end()\n\n g1_args = (g0.pool(),) if share_mem == \"via pool()\" else g0_args\n g1.capture_begin(*g1_args)\n c = a.clone()\n for _ in range(5):\n c = func_with_temps(c, 2)\n g1.capture_end()\n\n # To reproduce data corruption, I need g0 and g1's kernels to run concurrently.\n # But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.\n # The following pattern helps align device-side execution of g0 and g1's kernels.\n torch.cuda.synchronize()\n with torch.cuda.stream(s0):\n torch.cuda._sleep(1000000)\n s1.wait_stream(s0)\n g0.replay()\n with torch.cuda.stream(s1):\n g1.replay()\n torch.cuda.current_stream().wait_stream(s0)\n torch.cuda.current_stream().wait_stream(s1)\n\n if share_mem != \"Don't share\":\n # Confirms concurrent replays using the same mempool corrupted each other.\n self.assertNotEqual(b.sum().item(), size * 94)\n self.assertNotEqual(c.sum().item(), size * 156)\n else:\n # Confirms concurrent replays using different mempools did not corrupt each other.\n self.assertEqual(b.sum().item(), size * 94)\n self.assertEqual(c.sum().item(), size * 156)\n\n del a, b, c, g0, g1\n # Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.\n torch.cuda.synchronize()\n torch.cuda.empty_cache()\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_three_successive(self):\n torch.cuda.empty_cache()\n\n size = 1000\n\n s = torch.cuda.Stream()\n\n for share_mem in (\"Don't share\", \"via pool()\", \"via graph_pool_handle()\"):\n a = torch.ones((size,), device=\"cuda\")\n\n g0 = torch.cuda.CUDAGraph()\n g1 = torch.cuda.CUDAGraph()\n g2 = torch.cuda.CUDAGraph()\n\n s.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s):\n g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == \"via graph_pool_handle()\" else ()\n g0.capture_begin(*g0_args)\n b = a.clone()\n c = b + 1\n d = b + 2\n g0.capture_end()\n\n args = (g0.pool(),) if share_mem == \"via pool()\" else g0_args\n\n g1.capture_begin(*args)\n e = c + 3\n del c\n g1.capture_end()\n\n g2.capture_begin(*args)\n f = d + 4\n g2.capture_end()\n torch.cuda.current_stream().wait_stream(s)\n\n # Tests that replaying in capture order is valid\n g0.replay()\n g1.replay()\n g2.replay()\n\n self.assertEqual(e.sum().item(), size * 5)\n self.assertEqual(f.sum().item(), size * 7)\n\n # Tests that replaying as g0, g2, g1 is only valid if they don't share a pool\n g0.replay()\n g2.replay()\n g1.replay()\n\n # If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,\n # so we expect g1's captured \"e = c + 3\" mistakenly filled e with \"f's vals + 3\".\n self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != \"Don't share\" else size * 5)\n self.assertEqual(f.sum().item(), size * 7)\n\n del a, b, d, e, f, g0, g1, g2\n # Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.\n torch.cuda.synchronize()\n torch.cuda.empty_cache()\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_memory_stats_and_use_result_after_destroy_graph(self):\n kSmallSize = 1048576\n kSmallBuffer = 2097152\n kLargeBuffer = 20971520\n kMinLargeAlloc = 10485760\n kRoundLarge = 2097152\n\n elem = 4\n\n # this was annoying to write but stresses the expectations pretty rigorously\n cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, \"small_pool\"),\n (kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, \"small_pool\"),\n ((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, \"large_pool\"),\n ((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, \"large_pool\"),\n ((kMinLargeAlloc + 512) // elem, 3,\n 3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),\n kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),\n \"large_pool\"),)\n\n stats_to_check = (\"segment.\",\n \"reserved_bytes.\",\n \"active.\",\n \"active_bytes.\")\n\n gc.collect()\n torch.cuda.empty_cache()\n\n s = torch.cuda.Stream()\n\n for (numel,\n delta_cudaMallocs,\n delta_cudaMalloc_bytes,\n delta_cudaMalloc_bytes_post_del_g,\n pool_string) in cases:\n if pool_string == \"small_pool\":\n delta_active_blocks = 2 # one from \"b\" plus a sneaky one from CUDAGraph's one-element rng offset holder\n delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder\n else:\n delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder\n delta_active_bytes = numel * elem\n\n g = torch.cuda.CUDAGraph()\n s.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s):\n # Allocation stat estimates assume input is created on the same stream as capture_begin()\n # (in other words, the same stream silo as the rng offset holder, which is not allocated from the\n # capture's private pool).\n a = torch.ones((numel,), device=\"cuda\")\n\n precapture_stats = torch.cuda.memory_stats()\n\n g.capture_begin()\n b = a.clone()\n for _ in range(5):\n b = b.clone() + 1\n g.capture_end()\n torch.cuda.current_stream().wait_stream(s)\n\n gc.collect()\n\n postcapture_stats = torch.cuda.memory_stats()\n\n expecteds = (delta_cudaMallocs,\n delta_cudaMalloc_bytes,\n delta_active_blocks,\n delta_active_bytes)\n # Double checks replay and stats before and after a call to empty_cache\n for i in range(2):\n for stat, expected in zip(stats_to_check, expecteds):\n stat = stat + pool_string + \".current\"\n current = postcapture_stats[stat] - precapture_stats[stat]\n self.assertEqual(current, expected, \"Pre to post capture delta of \" +\n stat + \" = {}, expected = {}, numel = {}\".format(current, expected, numel))\n\n g.replay()\n self.assertEqual(b.sum().item(), 6 * numel)\n if i == 0:\n torch.cuda.empty_cache()\n\n del g\n gc.collect()\n torch.cuda.empty_cache()\n postdel_stats = torch.cuda.memory_stats()\n\n # Uses graph result b after graph has been deleted\n self.assertEqual(b.sum().item(), 6 * numel)\n\n # b should be the only live reference remaining from the graph's private pool\n expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)\n for stat, expected in zip(stats_to_check, expecteds):\n stat = stat + pool_string + \".current\"\n current = postdel_stats[stat] - precapture_stats[stat]\n self.assertEqual(current, expected, \"Pre capture to post graph delete delta of \" +\n stat + \" = {}, expected = {}, numel = {}\".format(current, expected, numel))\n\n # del a, b before the next case is essential, otherwise overwriting a and b in the next case\n # can throw off its allocation/deallocation counts.\n del a, b\n # Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.\n torch.cuda.synchronize()\n torch.cuda.empty_cache()\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_record_stream(self):\n # Makes sure graph capture defers attempting to reclaim allocations used across streams. See\n # \"Q. Why skip process_events if a capture might be underway?\" in c10/cuda/CUDACachingAllocator.cpp\n torch.cuda.empty_cache()\n\n potential_problem = torch.zeros((3,), device=\"cuda\")\n a = torch.zeros((3,), device=\"cuda\")\n s0 = torch.cuda.Stream()\n s1 = torch.cuda.Stream()\n s2 = torch.cuda.Stream()\n g = torch.cuda.CUDAGraph()\n\n torch.cuda.synchronize()\n with torch.cuda.stream(s0):\n potential_problem.record_stream(s0)\n torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)\n potential_problem.fill_(1.)\n del potential_problem\n\n with torch.cuda.stream(s1):\n g.capture_begin()\n # potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc\n # mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life\n # event, which will cause the capture to error.\n b = a.clone()\n\n # Let's also see what happens if we record_stream on a tensor during capture.\n s2.wait_stream(s1)\n with torch.cuda.stream(s2):\n b.fill_(1.)\n b.record_stream(s2) # dummy record_stream\n del b\n s1.wait_stream(s2)\n g.capture_end()\n torch.cuda.synchronize()\n\n # dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.\n c = torch.zeros((3,), device=\"cuda\")\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n # If this test is the first in the process to try cudnn rnns with dropout, it'll initialize\n # DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior\n # as a memory leak unless we skip the leak check.\n @skipCUDAMemoryLeakCheckIf(True)\n def test_graph_cudnn_dropout(self):\n # Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.\n # In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should\n # avoid syncing noncapturing streams with captured events or vice versa.\n torch.cuda.empty_cache()\n\n model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()\n x = torch.ones(100, 192, 512, device=\"cuda\")\n\n y = model(x)\n\n g = torch.cuda.CUDAGraph()\n s = torch.cuda.Stream()\n s.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s):\n g.capture_begin()\n y = model(x)\n g.capture_end()\n torch.cuda.current_stream().wait_stream(s)\n\n y = model(x)\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_grad_scaling(self):\n torch.cuda.empty_cache()\n\n scaler = torch.cuda.amp.GradScaler(init_scale=4.)\n g = torch.cuda.CUDAGraph()\n s = torch.cuda.Stream()\n\n weight = torch.ones((100,), device=\"cuda\", requires_grad=True)\n opt = torch.optim.SGD([weight], lr=0.1)\n static_input = torch.ones_like(weight)\n static_grad = torch.ones_like(weight)\n\n # warmup\n s = torch.cuda.Stream()\n s.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(s):\n loss = (weight.half() * static_input).sum()\n scaler.scale(loss).backward()\n torch.cuda.current_stream().wait_stream(s)\n\n opt.zero_grad(set_to_none=True)\n\n # capture\n with torch.cuda.graph(g):\n loss = (weight.half() * static_input).sum()\n scaler.scale(loss).backward()\n\n input_vals = [5, 20000, 5, 40000]\n # If the scale gets updated properly, these are the scale, growth tracker,\n # and grad values we expect.\n expected_scales = [4, 2, 2, 1]\n expected_growth_trackers = [1, 0, 1, 0]\n expected_grad_vals = [5 * 4, float(\"inf\"), 5 * 2, float(\"inf\")]\n\n for data, scale, growth_tracker, grad_val in zip(input_vals,\n expected_scales,\n expected_growth_trackers,\n expected_grad_vals):\n static_input.fill_(data)\n g.replay()\n self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))\n scaler.step(opt)\n scaler.update()\n self.assertEqual(scaler._scale, scale)\n self.assertEqual(scaler._growth_tracker, growth_tracker)\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_make_graphed_callables(self):\n torch.manual_seed(5)\n torch.cuda.manual_seed(5)\n\n N, D_in, H, D_out = 640, 4096, 2048, 1024\n\n models = []\n for _ in range(2):\n model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),\n torch.nn.Dropout(p=0.1)).cuda()\n model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),\n torch.nn.Dropout(p=0.2)).cuda()\n models.append(torch.nn.Sequential(model_section1, model_section2))\n\n model_graphed = models[0]\n model_control = models[1]\n\n model_graphed.load_state_dict(model_control.state_dict())\n\n opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)\n opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)\n\n x = torch.randn(N, D_in, device='cuda')\n h = torch.randn(N, H, device='cuda', requires_grad=True)\n y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)\n y = torch.randn(N, D_out, device='cuda')\n\n loss_fn_control = torch.nn.functional.mse_loss\n relu_control = torch.nn.functional.relu\n\n # This is a good stress test. It graphs four callables: two Modules and two python functions.\n model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \\\n torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),\n ((x,), (h,), (y_pred,), (y_pred, y)))\n\n real_inputs = [torch.rand_like(x) for _ in range(10)]\n real_targets = [torch.rand_like(y) for _ in range(10)]\n\n for m, opt, relu, loss_fn in zip((model_graphed, model_control),\n (opt_graphed, opt_control),\n (relu_graphed, relu_control),\n (loss_fn_graphed, loss_fn_control)):\n # Resets RNC states before iterations for graphed and ungraphed models,\n # so dropout math should be bitwise identical for both.\n torch.manual_seed(5)\n torch.cuda.manual_seed(5)\n for data, target in zip(real_inputs, real_targets):\n opt.zero_grad(set_to_none=True)\n y_pred = m(data)\n y_pred = relu(y_pred)\n loss = loss_fn(y_pred, target)\n loss.backward()\n opt.step()\n\n for p, pc in zip(model_graphed.parameters(), model_control.parameters()):\n self.assertEqual(p, pc)\n\n # We graphed the models in training mode. Eval should still run ungraphed.\n model_graphed.eval()\n model_control.eval()\n self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))\n\n @unittest.skipIf((not TEST_CUDA) or\n TEST_WITH_ROCM or\n int(torch.version.cuda.split(\".\")[0]) < 11, \"CUDA >= 11.0 required for graphs\")\n def test_graph_adam_adamw(self):\n OptClasses = (torch.optim.Adam, torch.optim.AdamW)\n cases = []\n # Needs generalization if we want to extend this test to non-Adam-like optimizers.\n for Class, foreach, amsgrad in product(OptClasses, (False, True), (False, True)):\n cases.append((Class, {\"lr\": 0.1, \"betas\": (0.8, 0.7), \"foreach\": foreach, \"amsgrad\": amsgrad}))\n\n steps_warmup = 3\n steps_train = 2\n\n for OptClass, kwargs in cases:\n for actually_do_graphs in (True, False):\n params = [torch.randn((i + 5, i + 5), device=\"cuda\") for i in range(2)]\n params_control = [p.clone().requires_grad_() for p in params]\n params_graphed = [p.clone().requires_grad_() for p in params]\n\n grads = [[torch.randn_like(p) for p in params] for _ in range(steps_warmup + steps_train)]\n\n # Control (capturable=False)\n\n opt = OptClass(params_control, capturable=False, **kwargs)\n\n for i in range(steps_warmup + steps_train):\n for j, p in enumerate(params_control):\n p.grad = grads[i][j]\n opt.step()\n\n # capturable=True\n\n opt = OptClass(params_graphed, capturable=True, **kwargs)\n\n for i in range(steps_warmup):\n for j, p in enumerate(params_graphed):\n p.grad = grads[i][j]\n opt.step()\n\n if actually_do_graphs:\n g = torch.cuda.CUDAGraph()\n with torch.cuda.graph(g):\n opt.step()\n\n for i in range(steps_train):\n if actually_do_graphs:\n for j, p in enumerate(params_graphed):\n p.grad.copy_(grads[i + steps_warmup][j])\n g.replay()\n else:\n # Passing capturable=True to the constructor and running without graphs should still be\n # numerically correct, even if it's not ideal for performance.\n for j, p in enumerate(params_graphed):\n p.grad = grads[i + steps_warmup][j]\n opt.step()\n\n for p_control, p_graphed in zip(params_control, params_graphed):\n self.assertEqual(p_control, p_graphed)\n\n def test_batch_norm_gather_stats(self):\n input = torch.randn(1, 3, 3, 3, device='cuda')\n mean, invstd = torch.batch_norm_gather_stats(\n input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),\n running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2\n )\n self.assertEqual(mean, torch.ones(3, device='cuda'))\n self.assertEqual(invstd, torch.ones(3, device='cuda'))\n\n @unittest.skipIf(not TEST_MULTIGPU, \"Test needs multiple GPUs\")\n def test_cuda_device_memory_allocated(self):\n from torch.cuda import memory_allocated\n device_count = torch.cuda.device_count()\n current_alloc = [memory_allocated(idx) for idx in range(device_count)]\n x = torch.ones(10, device=\"cuda:0\")\n self.assertTrue(memory_allocated(0) > current_alloc[0])\n self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))\n\n def test_matmul_memory_use(self):\n def get_max_used():\n torch.cuda.synchronize()\n val = torch.cuda.max_memory_allocated()\n torch.cuda.reset_peak_memory_stats()\n return val\n\n a = torch.rand(1, 32, 32, device=\"cuda\")\n b = torch.rand(24, 32, 1, device=\"cuda\")\n\n get_max_used()\n\n torch.matmul(a, b)\n\n matmul_mem = get_max_used()\n\n a = a.expand(24, 32, 32)\n torch.matmul(a, b)\n\n matmul_expand_mem = get_max_used()\n\n torch.bmm(a, b)\n\n bmm_mem = get_max_used()\n\n self.assertEqual(matmul_expand_mem, matmul_mem)\n self.assertEqual(bmm_mem, matmul_mem)\n\n @unittest.skipIf(not TEST_WITH_ROCM, \"ROCm-only test\")\n def test_rocm_backward_pass_guard(self):\n # The test exercises a ROCm-specific feature.\n\n class MyFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, tensor, constant):\n self.assertFalse(torch._C._rocm_is_backward_pass())\n ctx.constant = constant\n return tensor * constant\n\n @staticmethod\n def backward(ctx, grad_output):\n self.assertTrue(torch._C._rocm_is_backward_pass())\n return grad_output * ctx.constant, None\n\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.a = torch.nn.Parameter(torch.randn(()))\n\n def forward(self, x):\n return MyFunction.apply(x, self.a)\n\n model = MyModule()\n criterion = torch.nn.MSELoss(reduction='sum')\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)\n\n x = torch.randn(5, 5)\n result = model(x)\n loss = criterion(result, x)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\nclass TestCudaComm(TestCase):\n def _test_broadcast(self, input):\n if not TEST_MULTIGPU:\n raise unittest.SkipTest(\"only one GPU detected\")\n # test regular\n results = comm.broadcast(input, (0, 1))\n for i, t in enumerate(results):\n self.assertEqual(t.get_device(), i)\n self.assertEqual(t, input)\n if input.is_cuda and input.get_device() == i: # test not copying on same device\n self.assertEqual(t.data_ptr(), input.data_ptr())\n # test out=\n for inplace in [True, False]:\n if inplace:\n outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]\n else:\n outputs = [input.cuda(0), torch.empty_like(input, device=1)]\n results = comm.broadcast(input, out=outputs)\n for r, o in zip(results, outputs):\n self.assertIs(r, o)\n for i, t in enumerate(results):\n self.assertEqual(t.get_device(), i)\n self.assertEqual(t, input)\n # test error msg\n with self.assertRaisesRegex(RuntimeError, r\"Exactly one of 'devices' and 'out'\"):\n comm.broadcast(input, (0, 1), out=outputs)\n with self.assertRaisesRegex(RuntimeError,\n r\"Expected all output tensors to be CUDA tensors, but output tensor at index 1\"):\n comm.broadcast(input, out=[input.cuda(0), input.cpu()])\n with self.assertRaisesRegex(RuntimeError,\n r\"Expected all output tensors to have same shape as the source .+ at index 1\"):\n comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])\n\n def test_broadcast_cpu(self):\n self._test_broadcast(torch.randn(5, 5))\n\n def test_broadcast_gpu(self):\n self._test_broadcast(torch.randn(5, 5).cuda())\n\n def _test_broadcast_coalesced(self, tensors, buffer_size):\n b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]\n for (_, bt), t in zip(b_tensors, tensors):\n self.assertEqual(bt.get_device(), 1)\n self.assertEqual(bt, t)\n self.assertIsInstance(bt, type(t))\n\n bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)\n bc_tensors_t = list(zip(*bc_tensors))\n self.assertEqual(b_tensors, bc_tensors_t)\n for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):\n self.assertEqual(bt.get_device(), bct.get_device())\n self.assertIsInstance(bct, type(bt))\n\n # check that tensors on device[0] are returned as-is\n for out_tensors in (b_tensors, bc_tensors_t):\n for inp_t, (out_t, _) in zip(tensors, out_tensors):\n self.assertIs(inp_t, out_t)\n\n # check that the tensors not on device[0] have different version counters\n # NOTE [ Version Counter in comm.*_coalesced ]\n versions = [t._version for _, t in bc_tensors_t]\n for old_version, (_, t) in zip(versions, bc_tensors_t):\n self.assertEqual(t._version, old_version)\n t.zero_()\n self.assertEqual(t._version, old_version + 1)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n # Note: fails sometimes on the CI, passes on dual gfx906\n def test_broadcast_coalesced(self):\n numel = 5\n num_bytes = numel * 8\n tensors = [\n make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),\n torch.randn(numel).long().cuda(),\n torch.randn(numel).cuda(),\n make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),\n make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),\n make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),\n make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),\n torch.randn(numel).long().cuda(),\n torch.randn(numel).long().cuda(),\n make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),\n torch.randn(numel * 2).int().cuda(), # int is 2x shorter\n torch.randn(numel).cuda(),\n ]\n self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_broadcast_coalesced_dense_only(self):\n numel = 5\n num_bytes = numel * 8\n tensors = [\n torch.randn(numel).long().cuda(),\n torch.randn(numel).cuda(),\n torch.randn(numel).long().cuda(),\n torch.randn(numel).long().cuda(),\n torch.randn(numel * 2).int().cuda(), # int is 2x shorter\n torch.randn(numel).cuda(),\n ]\n self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_broadcast_coalesced_empty_tensors(self):\n tensors = [\n torch.tensor([]).byte().cuda(),\n torch.randn(5).cuda(),\n torch.randn(5).double().cuda()\n ]\n self._test_broadcast_coalesced(tensors, 256)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_reduce_add(self):\n x = torch.randn(5, 5)\n y = torch.randn(5, 5)\n x_cuda = x.cuda(0)\n y_cuda = y.cuda(1)\n result = comm.reduce_add((x_cuda, y_cuda))\n self.assertEqual(result.get_device(), 0)\n self.assertEqual(result.cpu(), x + y)\n\n def _test_reduce_add_coalesced(self, tensors, buffer_size):\n dup_tensors = [tensors, [t.cuda(1) for t in tensors]]\n\n r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]\n for r, t in zip(r_tensors, tensors):\n self.assertEqualTypeString(r, t)\n self.assertEqual(r.coalesce() if r.is_sparse else r, t * 2)\n\n rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)\n self.assertEqual(r_tensors, rc_tensors)\n for r, rc in zip(r_tensors, rc_tensors):\n self.assertEqualTypeString(rc, r)\n\n # Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.\n # We can check that they have different version counters.\n # NOTE [ Version Counter in comm.*_coalesced ]\n versions = [t._version for t in rc_tensors]\n for old_version, t in zip(versions, rc_tensors):\n self.assertEqual(t._version, old_version)\n t.zero_()\n self.assertEqual(t._version, old_version + 1)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_reduce_add_coalesced(self):\n numel = 5\n num_bytes = numel * 8\n tensors = [\n make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),\n torch.randn(numel).long().cuda(),\n torch.randn(numel).cuda(),\n make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),\n make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),\n make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),\n make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),\n torch.randn(numel).long().cuda(),\n torch.randn(numel).long().cuda(),\n make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),\n torch.randn(numel * 2).int().cuda(), # int is 2x shorter\n torch.randn(numel).cuda(),\n ]\n self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_reduce_add_coalesced_dense_only(self):\n numel = 5\n num_bytes = numel * 8\n tensors = [\n torch.randn(numel).long().cuda(),\n torch.randn(numel).cuda(),\n torch.randn(numel).long().cuda(),\n torch.randn(numel).long().cuda(),\n torch.randn(numel * 2).int().cuda(), # int is 2x shorter\n torch.randn(numel).cuda(),\n ]\n self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)\n\n def _test_scatter(self, input, chunk_sizes=None, dim=0):\n if not TEST_MULTIGPU:\n raise unittest.SkipTest(\"only one GPU detected\")\n if chunk_sizes is None:\n ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))\n else:\n ref_chunk_sizes = chunk_sizes\n\n # test regular\n result = comm.scatter(input, (0, 1), chunk_sizes, dim)\n self.assertEqual(len(result), 2)\n chunk_start = 0\n for i, r in enumerate(result):\n chunk_end = chunk_start + ref_chunk_sizes[i]\n index = [slice(None, None) for _ in range(input.dim())]\n index[dim] = slice(chunk_start, chunk_end)\n self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)\n chunk_start = chunk_end\n if r.device == input.device:\n self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned\n\n # test out\n out = [torch.empty_like(t) for t in result]\n result = comm.scatter(input, dim=dim, out=out)\n self.assertEqual(len(result), 2)\n chunk_start = 0\n for i, r in enumerate(result):\n self.assertIs(r, out[i])\n chunk_end = chunk_start + ref_chunk_sizes[i]\n index = [slice(None, None) for _ in range(input.dim())]\n index[dim] = slice(chunk_start, chunk_end)\n self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)\n chunk_start = chunk_end\n\n # test error msg\n if chunk_sizes is not None:\n with self.assertRaisesRegex(RuntimeError, r\"Expected devices and chunk_sizes to be of same length\"):\n comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)\n with self.assertRaisesRegex(RuntimeError, r\"'devices' must not be specified\"):\n comm.scatter(input, (0, 1), dim=dim, out=out)\n with self.assertRaisesRegex(RuntimeError, r\"Expected at least one device to scatter to\"):\n comm.scatter(input, (), dim=dim)\n with self.assertRaisesRegex(RuntimeError, r\"Expected at least one output tensor to scatter to\"):\n comm.scatter(input, dim=dim, out=[])\n with self.assertRaisesRegex(RuntimeError,\n r\"Expected all output tensors to be CUDA tensors, but output tensor at index 0\"):\n comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))\n with self.assertRaisesRegex(RuntimeError, r\"Output tensor at index 0 has incorrect shape\"):\n comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))\n with self.assertRaisesRegex(RuntimeError, r\"Total size for output tensors along scatter dim \\d+ does not match\"):\n index = [slice(None, None) for _ in range(input.dim())]\n index[dim] = slice(1, None)\n comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))\n\n def test_scatter_cpu(self):\n self._test_scatter(torch.randn(4, 4), dim=0)\n\n def test_scatter_cpu_dim(self):\n self._test_scatter(torch.randn(4, 4), dim=1)\n\n def test_scatter_cpu_neg_dim(self):\n self._test_scatter(torch.randn(4, 4), dim=-2)\n\n def test_scatter_cpu_sizes(self):\n self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))\n\n def test_scatter_gpu(self):\n self._test_scatter(torch.randn(4, 4).cuda(), dim=0)\n\n def test_scatter_gpu_dim(self):\n self._test_scatter(torch.randn(4, 4).cuda(), dim=1)\n\n def test_scatter_gpu_neg_dim(self):\n self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)\n\n def test_scatter_gpu_sizes(self):\n self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))\n\n def _test_gather(self, dim):\n if not TEST_MULTIGPU:\n raise unittest.SkipTest(\"only one GPU detected\")\n x = torch.randn(2, 5, device=0)\n y = torch.randn(2, 5, device=1)\n expected_size = list(x.size())\n expected_size[dim] += y.size(dim)\n expected_size = torch.Size(expected_size)\n\n destinations = [None, torch.device('cuda:0'), torch.device('cpu')]\n if torch.cuda.device_count() > 2:\n destinations.append(torch.device('cuda:2'))\n with torch.cuda.device(1):\n for destination in destinations:\n if destination is None:\n expected_device = torch.device('cuda', torch.cuda.current_device())\n else:\n expected_device = destination\n for use_out in [True, False]:\n if use_out:\n out = torch.empty(expected_size, device=expected_device)\n result = comm.gather((x, y), dim, out=out)\n self.assertIs(out, result)\n else:\n result = comm.gather((x, y), dim, destination=destination)\n\n self.assertEqual(result.device, expected_device)\n self.assertEqual(result.size(), expected_size)\n\n index = [slice(None, None), slice(None, None)]\n index[dim] = slice(0, x.size(dim))\n self.assertEqual(result[tuple(index)], x)\n index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))\n self.assertEqual(result[tuple(index)], y)\n\n # test error msg\n with self.assertRaisesRegex(RuntimeError, r\"'destination' must not be specified\"):\n comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))\n with self.assertRaisesRegex(RuntimeError, r\"Expected at least one tensor to gather from\"):\n comm.gather(())\n with self.assertRaisesRegex(RuntimeError, r\"Expected all input tensors to be CUDA tensors, \"):\n comm.gather((x.cpu(), y))\n with self.assertRaisesRegex(RuntimeError, r\"Expected all input tensors to have the same number of dimensions\"):\n comm.gather((x, y.unsqueeze(0)))\n with self.assertRaisesRegex(RuntimeError, r\"Input tensor at index 1 has invalid shape\"):\n if dim in [0, -2]:\n comm.gather((x, y[:, 1:]), dim=dim)\n elif dim in [1, -1]:\n comm.gather((x, y[1:, :]), dim=dim)\n\n def test_gather(self):\n self._test_gather(0)\n\n def test_gather_dim(self):\n self._test_gather(1)\n\n def test_gather_neg_dim(self):\n self._test_gather(-1)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"only one GPU detected\")\n def test_memory_format_scatter_gather(self):\n nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)\n results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)\n for result in results:\n self.assertFalse(result.is_contiguous())\n self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))\n\n gathered = torch.cuda.comm.gather(results)\n self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))\n\n\n def test_matmul_device_mismatch(self):\n cpu = torch.rand((10, 10))\n cuda = cpu.cuda()\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors to be on the same device\"):\n cpu @ cuda\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors to be on the same device\"):\n cuda @ cpu\n\n for s, m1, m2 in product((cpu, cuda), repeat=3):\n if s.device == m1.device == m2.device:\n torch.addmm(s, m1, m2)\n else:\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors to be on the same device\"):\n torch.addmm(s, m1, m2)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"Test needs multiple GPUs\")\n def test_scatter_namedtuple(self):\n # tests ability to scatter namedtuples and retrieve a list where each\n # element is of the expected namedtuple type.\n fields = (\"a\", \"b\")\n TestNamedTupleInput_0 = collections.namedtuple(\"NamedTuple\", fields)\n num_gpus = torch.cuda.device_count()\n a = torch.rand(num_gpus * 2, device=0)\n b = torch.rand(num_gpus * 2, device=0)\n a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]\n b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]\n\n inp = TestNamedTupleInput_0(a, b)\n target_gpus = [torch.device(i) for i in range(num_gpus)]\n scatter_out = scatter_gather.scatter(inp, target_gpus)\n\n for i, x in enumerate(scatter_out):\n self.assertTrue(isinstance(x, type(inp)))\n self.assertEqual(x._fields, fields)\n expected_a = a_tensors_for_gpu[i]\n expected_b = b_tensors_for_gpu[i]\n self.assertEqual(expected_a, x.a)\n self.assertEqual(expected_b, x.b)\n\n class TestNamedTupleInput_1(NamedTuple):\n a: torch.tensor\n b: torch.tensor\n\n a = torch.rand(num_gpus * 2, device=0)\n b = torch.rand(num_gpus * 2, device=0)\n a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]\n b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]\n inp = TestNamedTupleInput_1(a, b)\n\n scatter_out = scatter_gather.scatter(inp, target_gpus)\n for i, x in enumerate(scatter_out):\n self.assertTrue(isinstance(x, type(inp)))\n self.assertEqual(x._fields, fields)\n expected_a = a_tensors_for_gpu[i]\n expected_b = b_tensors_for_gpu[i]\n self.assertEqual(expected_a, x.a)\n self.assertEqual(expected_b, x.b)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"Test needs multiple GPUs\")\n def test_gather_namedtuple(self):\n # tests ability to gather a list of namedtuples and return a namedtuple where each\n # element is of the expected tensor type.\n fields = ['a', 'b']\n TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)\n\n num_gpus = torch.cuda.device_count()\n a = torch.rand(num_gpus * 2, device=0)\n b = torch.rand(num_gpus * 2, device=1)\n out1 = TestNamedTupleInput_0(a, b)\n\n a = torch.rand(num_gpus * 2, device=1)\n b = torch.rand(num_gpus * 2, device=0)\n out2 = TestNamedTupleInput_0(a, b)\n\n outputs = [out1, out2]\n\n out = scatter_gather.gather(outputs, 'cpu') # test on CPU\n for i, x in enumerate(out):\n self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor\n cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))\n self.assertTrue(torch.equal(x, cat))\n\n out = scatter_gather.gather(outputs, 0) # test on GPU\n for i, x in enumerate(out):\n self.assertTrue(isinstance(x, type(out2[-1])))\n cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))\n self.assertTrue(torch.equal(x, cat))\n\n class TestNamedTupleInput_1(NamedTuple):\n a: torch.tensor\n b: torch.tensor\n\n a = torch.rand(num_gpus * 2, device=0)\n b = torch.rand(num_gpus * 2, device=1)\n out1 = TestNamedTupleInput_1(a, b)\n\n a = torch.rand(num_gpus * 2, device=1)\n b = torch.rand(num_gpus * 2, device=0)\n out2 = TestNamedTupleInput_1(a, b)\n\n outputs = [out1, out2]\n\n out = scatter_gather.gather(outputs, 0) # test on GPU\n for i, x in enumerate(out):\n self.assertTrue(isinstance(x, type(out2[-1])))\n cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))\n self.assertTrue(torch.equal(x, cat))\n\n out = scatter_gather.gather(outputs, 'cpu') # test on CPU\n for i, x in enumerate(out):\n self.assertTrue(isinstance(x, type(out2[-1])))\n cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))\n self.assertTrue(torch.equal(x, cat))\n\nif __name__ == '__main__':\n run_tests()\n", "# Owner(s): [\"oncall: quantization\"]\n\nimport numpy as np\nimport math\nimport torch\nimport io\nimport unittest\nfrom copy import deepcopy\nfrom hypothesis import given\nfrom hypothesis import strategies as st\nfrom torch.testing._internal.common_utils import TemporaryFileName\nfrom torch.testing._internal.common_cuda import TEST_CUDA\nfrom torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM\nimport torch.testing._internal.hypothesis_utils as hu\n\nhu.assert_deadline_disabled()\n\nimport itertools\nimport tempfile\n\nclass Foo(torch.nn.Module):\n def __init__(self):\n super(Foo, self).__init__()\n self.qscheme = torch.per_tensor_symmetric\n\ndef _calculate_dynamic_qparams(X, dtype, reduce_range=False):\n \"\"\"Calculate the dynamic quantization parameters (scale, zero_point)\n according to the min and max element of the tensor\"\"\"\n if isinstance(X, torch.Tensor):\n X = X.cpu().data.numpy()\n if dtype == torch.qint8:\n if reduce_range:\n qmin, qmax = -64, 63\n else:\n qmin, qmax = -128, 127\n else: # dtype == torch.quint8\n if reduce_range:\n qmin, qmax = 0, 127\n else:\n qmin, qmax = 0, 255\n\n min_val = X.min().astype(dtype=np.float32)\n max_val = X.max().astype(dtype=np.float32)\n min_val = min(0.0, min_val)\n max_val = max(0.0, max_val)\n scale = (np.float64(max_val) - min_val) / (qmax - qmin)\n if scale == 0.0 or math.isinf(1.0 / scale):\n scale = np.float64(0.1)\n zero_point = 0\n\n zero_point_from_min = qmin - min_val / float(scale)\n zero_point_from_max = qmax - max_val / float(scale)\n zero_point_from_min_error = abs(qmin) - abs(min_val / float(scale))\n zero_point_from_max_error = abs(qmax) - abs(max_val / float(scale))\n if zero_point_from_min_error < zero_point_from_max_error:\n initial_zero_point = zero_point_from_min\n else:\n initial_zero_point = zero_point_from_max\n nudged_zero_point = 0\n\n if initial_zero_point < qmin:\n nudged_zero_point = qmin\n elif initial_zero_point > qmax:\n nudged_zero_point = qmax\n else:\n nudged_zero_point = int(round(initial_zero_point))\n\n return [scale.astype(np.float32), int(nudged_zero_point)]\n\ndef get_supported_device_types():\n return ['cpu', 'cuda'] if torch.cuda.is_available() and not TEST_WITH_ROCM else ['cpu']\n\n# Note we explicitly cast variables to np.float32 in a couple of places to avoid\n# the default casting in Python often resuling in double precision and to make\n# sure we're doing the same numerics as C++ code.\ndef param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):\n xmin, xmax = np.min(x), np.max(x)\n stepsize = (xmax - xmin) / np.float32(n_bins)\n min_bins = np.float32(n_bins) * (np.float32(1) - np.float32(ratio))\n xq, loss = _compress_uniform_simplified(x, bit_rate, xmin, xmax)\n\n solutions = [] # [(left, right, loss)] # local optima solution\n\n cur_min, cur_max, cur_loss = xmin, xmax, loss\n thr = min_bins * stepsize\n while cur_min + thr < cur_max:\n # move left\n xq, loss1 = _compress_uniform_simplified(\n x, bit_rate, cur_min + stepsize, cur_max\n )\n # move right\n xq, loss2 = _compress_uniform_simplified(\n x, bit_rate, cur_min, cur_max - stepsize\n )\n\n if cur_loss < loss1 and cur_loss < loss2:\n # found a local optima\n solutions.append((cur_min, cur_max, cur_loss))\n if loss1 < loss2:\n cur_min, cur_max, cur_loss = cur_min + stepsize, cur_max, loss1\n else:\n cur_min, cur_max, cur_loss = cur_min, cur_max - stepsize, loss2\n if len(solutions):\n best = solutions[0]\n for solution in solutions:\n if solution[-1] < best[-1]:\n best = solution\n return best[1], best[0] # xmax, xmin\n return xmax, xmin\n\n\ndef _compress_uniform_simplified(X, bit_rate, xmin, xmax, fp16_scale_bias=True):\n # affine transform to put Xq in [0,2**bit_rate - 1]\n # Xq = (2 ** bit_rate - 1) * (Xq - xmin) / data_range\n if fp16_scale_bias:\n xmin = xmin.astype(np.float16).astype(np.float32)\n data_range = xmax - xmin\n scale = np.where(\n data_range == 0, np.float32(1), data_range / np.float32(2 ** bit_rate - 1)\n )\n if fp16_scale_bias:\n scale = scale.astype(np.float16).astype(np.float32)\n inverse_scale = np.float32(1) / scale\n Xq = np.clip(np.round((X - xmin) * inverse_scale), 0, np.float32(2 ** bit_rate - 1))\n Xq = Xq * scale + xmin\n\n # Manually compute loss instead of using np.linalg.norm to use the same\n # accumulation order used by C++ code\n vlen = 8\n loss_v = np.zeros(vlen).astype(np.float32)\n for i in range(len(Xq) // vlen * vlen):\n loss_v[i % vlen] += (X[i] - Xq[i]) * (X[i] - Xq[i])\n loss = np.float32(0)\n for i in range(vlen):\n loss += loss_v[i]\n for i in range(len(Xq) // vlen * vlen, len(Xq)):\n loss += (X[i] - Xq[i]) * (X[i] - Xq[i])\n loss = np.sqrt(loss)\n\n return Xq, loss\n\nclass TestQuantizedTensor(TestCase):\n def test_per_tensor_qtensor_to_memory_format(self):\n n = np.random.randint(1, 10)\n c = np.random.randint(2, 10)\n h = np.random.randint(2, 10)\n w = np.random.randint(2, 10)\n x = torch.rand(n, c, h, w)\n scale = np.random.uniform(0.1, 1.0)\n zero_point = np.random.randint(0.0, 10)\n qints = [torch.qint8, torch.quint8, torch.qint32]\n dtype = qints[np.random.randint(0, len(qints))]\n qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)\n x_nhwc = x.to(memory_format=torch.channels_last)\n qx_nhwc_using_to = qx.to(memory_format=torch.channels_last)\n qx_nhwc_using_contiguous = qx.contiguous(memory_format=torch.channels_last)\n self.assertEqual(qx_nhwc_using_to.stride(), qx_nhwc_using_contiguous.stride())\n self.assertEqual(qx_nhwc_using_to.stride(), x_nhwc.stride())\n\n # When the last two dimensions of a 4D tensor are both size 1 or if c == 1, we have a degenerate case\n # see https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html\n # In this case, the output of torch.Tensor.to and torch.Tensor.contiguous should not be the same\n x = torch.rand(10, 2, 1, 1)\n qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)\n qx_nhwc_using_to = qx.to(memory_format=torch.channels_last)\n qx_nhwc_using_contiguous = qx.contiguous(memory_format=torch.channels_last)\n self.assertNotEqual(qx_nhwc_using_to.stride(), qx_nhwc_using_contiguous.stride())\n\n x = torch.rand(10, 1, 2, 2)\n qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=dtype)\n qx_nhwc_using_to = qx.to(memory_format=torch.channels_last)\n qx_nhwc_using_contiguous = qx.contiguous(memory_format=torch.channels_last)\n self.assertNotEqual(qx_nhwc_using_to.stride(), qx_nhwc_using_contiguous.stride())\n\n def test_per_channel_qtensor_to_memory_format(self):\n n = np.random.randint(1, 10)\n c = np.random.randint(2, 10)\n h = np.random.randint(2, 10)\n w = np.random.randint(2, 10)\n x = torch.rand(n, c, h, w)\n x_nhwc = x.to(memory_format=torch.channels_last)\n scale = np.random.uniform(0.1, 1.0)\n zero_point = np.random.randint(0.0, 10)\n qints = [torch.qint8, torch.quint8, torch.qint32]\n dtype = qints[np.random.randint(0, len(qints))]\n for axis in range(x.ndim):\n scales = torch.rand(x.size(axis)) + 0.00001\n zero_points = torch.randint(low=0, high=10, size=(x.size(axis), ))\n qx = torch.quantize_per_channel(x, scales=scales, zero_points=zero_points, dtype=dtype, axis=axis)\n qx_nhwc_using_to = qx.to(memory_format=torch.channels_last)\n self.assertEqual(qx_nhwc_using_to.stride(), x_nhwc.stride())\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is available.\")\n def test_qtensor_cuda(self):\n self._test_qtensor(torch.device('cuda'))\n self._test_qtensor_dynamic(torch.device('cuda'))\n\n def test_qtensor_cpu(self):\n self._test_qtensor(torch.device('cpu'))\n self._test_qtensor_dynamic(torch.device('cpu'))\n\n def _test_qtensor_dynamic(self, device):\n # max number of tensor dimensions\n max_tensor_order = 4\n # max size for any tensor dimension\n max_dim_sz = 20\n\n num_dim = np.random.randint(low=1, high=max_tensor_order)\n dims = np.random.randint(low=1, high=max_dim_sz, size=num_dim)\n mat2quant = torch.randn(*dims, dtype=torch.float, device=device)\n reduce_flag = False\n\n for dtype in [torch.qint8, torch.quint8]:\n q_d = torch.quantize_per_tensor_dynamic(mat2quant, dtype, reduce_flag)\n scale, zero_pt = _calculate_dynamic_qparams(mat2quant, dtype, reduce_flag)\n q_s = torch.quantize_per_tensor(mat2quant, scale, zero_pt, dtype)\n\n self.assertEqual(q_d, q_s)\n\n def _test_qtensor(self, device):\n device = str(device)\n num_elements = 10\n scale = 1.0\n zero_point = 2\n for dtype in [torch.qint8, torch.quint8, torch.qint32]:\n r = torch.ones(num_elements, dtype=torch.float, device=device)\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)\n self.assertEqual(qr.q_scale(), scale)\n self.assertEqual(qr.q_zero_point(), zero_point)\n self.assertTrue(qr.is_quantized)\n self.assertFalse(r.is_quantized)\n self.assertEqual(qr.qscheme(), torch.per_tensor_affine)\n self.assertTrue(isinstance(qr.qscheme(), torch.qscheme))\n # slicing and int_repr\n int_repr = qr.int_repr()\n for num in int_repr:\n self.assertEqual(num, 3)\n for num in qr[2:].int_repr():\n self.assertEqual(num, 3)\n # dequantize\n rqr = qr.dequantize()\n for i in range(num_elements):\n self.assertEqual(r[i], rqr[i])\n # we can also print a qtensor\n empty_r = torch.ones((0, 1), dtype=torch.float, device=device)\n empty_qr = torch.quantize_per_tensor(empty_r, scale, zero_point, dtype)\n\n device_msg = \"\" if device == 'cpu' else \"device='\" + device + \":0', \"\n dtype_msg = str(dtype) + \", \"\n self.assertEqual(' '.join(str(empty_qr).split()),\n \"tensor([], \" + device_msg + \"size=(0, 1), dtype=\" + dtype_msg +\n \"quantization_scheme=torch.per_tensor_affine, \" +\n \"scale=1.0, zero_point=2)\")\n\n def test_qtensor_int_repr(self):\n # to catch edge case when num elements * bit rate < 8, make sure at lease allocate one byte to hold the int repr\n num_elements = 1\n device = torch.device('cpu')\n scale = 1.0\n zero_point = 2\n dtype = torch.quint2x4\n r = torch.ones(num_elements, dtype=torch.float, device=device)\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)\n int_repr = qr.int_repr()\n self.assertEqual(int_repr.numel(), 1)\n # Packed one entry looks like 00000011\n self.assertEqual(int_repr[0], 3)\n\n def test_qtensor_sub_byte_aligned_cols(self):\n # Packed 4 entries, each of value 3, look like 00110011, 00110011 for torch.qunit4x2, or 11111111 for torch.quint2x4\n self._test_qtensor_sub_byte(1, 4, torch.quint4x2, 2, [51, 51])\n self._test_qtensor_sub_byte(1, 4, torch.quint2x4, 4, [255])\n\n def test_qtensor_sub_byte_not_aligned_cols(self):\n # Packed 5 entries, each of value 3, look like 00110011, 00110011, 00000011 for torch.qunit4x2,\n # or 11111111, 00000011 for torch.quint2x4\n self._test_qtensor_sub_byte(1, 5, torch.quint4x2, 2, [51, 51, 3])\n self._test_qtensor_sub_byte(1, 5, torch.quint2x4, 4, [255, 3])\n\n def _test_qtensor_sub_byte(self, rows, cols, dtype, elements_per_byte, expected_packed_vals):\n num_elements = rows * cols\n scale = 1.0\n zero_point = 2\n\n r = torch.ones((rows, cols), dtype=torch.float)\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)\n self.assertEqual(qr.q_scale(), scale)\n self.assertEqual(qr.q_zero_point(), zero_point)\n self.assertTrue(qr.is_quantized)\n self.assertFalse(r.is_quantized)\n self.assertEqual(qr.storage().size(), rows * math.ceil(cols / elements_per_byte), f\"with {dtype}, {elements_per_byte}\")\n\n int_repr = qr.int_repr()\n self.assertEqual(int_repr.numel(), len(expected_packed_vals))\n for num, expected in zip(int_repr, expected_packed_vals):\n self.assertEqual(num, expected, f\"with dtype={dtype}, elements_per_byte={elements_per_byte}, rows={rows}, cols={cols}\")\n\n # Test tensor creation\n q = torch._empty_affine_quantized([num_elements], scale=scale, zero_point=zero_point, dtype=dtype)\n self.assertEqual(q.storage().size(), math.ceil(num_elements / elements_per_byte), f\"with {dtype}, {elements_per_byte}\")\n\n # Test save/load\n with tempfile.NamedTemporaryFile() as f:\n torch.save(qr, f)\n f.seek(0)\n loaded_q = torch.load(f)\n loaded_int_repr = loaded_q.int_repr()\n self.assertEqual(int_repr, loaded_int_repr)\n\n def test_qtensor_channel_float_assignment(self):\n t1 = torch.rand(2, 3, 5, 5)\n t2 = torch.rand(2, 3, 5, 5)\n for axis in range(t1.ndim):\n scales = np.random.rand(t1.size()[axis])\n zero_points = np.random.randint(low=0, high=50, size=t1.size()[axis])\n for dtype in [torch.qint8, torch.quint8, torch.qint32]:\n qt1 = torch.quantize_per_channel(t1, scales=torch.tensor(scales),\n zero_points=torch.tensor(zero_points), dtype=dtype, axis=axis)\n qt2 = torch.quantize_per_channel(t2, scales=torch.tensor(scales),\n zero_points=torch.tensor(zero_points), dtype=dtype, axis=axis)\n i = 0\n j = 1\n k = 2\n l = 4\n # scalar assignment verification\n qt1[i][j][k][l] = t2[i][j][k][l]\n self.assertEqual(qt1[i][j][k][l], qt2[i][j][k][l])\n # 1D tensor assignment verification\n qt1[i][j][k][2:l] = t2[i][j][k][2:l]\n self.assertEqual(qt1[i][j][k][2:l], qt2[i][j][k][2:l])\n qt1[i][j][k] = t2[i][j][k]\n self.assertEqual(qt1[i][j][k], qt2[i][j][k])\n # 2D tensor assignment verification\n qt1[i][j][k:] = t2[i][j][k:]\n self.assertEqual(qt1[i][j][k:], qt2[i][j][k:])\n qt1[i][j] = t2[i][j]\n self.assertEqual(qt1[i][j], qt2[i][j])\n # 3D tensor assignment verification\n qt1[i][j:] = t2[i][j:]\n self.assertEqual(qt1[i][j:], qt2[i][j:])\n qt1[i] = t2[i]\n self.assertEqual(qt1[i], qt2[i])\n # 4D tensor assignment verification\n qt1[:1] = t2[:1]\n self.assertEqual(qt1[:1], qt2[:1])\n qt1[:] = t2[:]\n self.assertEqual(qt1[:], qt2[:])\n # non-contiguous case **this should raise an exception**\n with self.assertRaisesRegex(RuntimeError, \"Quantized copy only works with contiguous Tensors\"):\n qt1[:, 0] = t2[:, 0]\n\n def test_qtensor_float_assignment(self):\n # Scalar Tensor\n # item\n scale = 1.0\n zero_point = 2\n devices = [\"cpu\", \"cuda\"] if torch.cuda.is_available() else [\"cpu\"]\n for device in devices:\n r = torch.ones(1, dtype=torch.float).to(device=device)\n for dtype in [torch.qint8, torch.quint8, torch.qint32]:\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)\n self.assertEqual(qr.item(), 1)\n self.assertEqual(qr[0].item(), 1)\n # assignment\n self.assertTrue(qr[0].is_quantized)\n qr[0] = torch.Tensor([11.3]).to(device=device) # float assignment\n self.assertEqual(qr.item(), 11)\n x = torch.ones(1, dtype=torch.float).to(device=device) * 15.3\n # Copying from a float Tensor\n qr[:] = x\n self.assertEqual(qr.item(), 15)\n\n dtype_msg = str(dtype) + \", \"\n if device == \"cuda\":\n self.assertEqual(' '.join(str(qr).split()),\n \"tensor([15.], device='\" + str(qr.device) + \"', size=(1,), dtype=\" + dtype_msg +\n \"quantization_scheme=torch.per_tensor_affine, \" +\n \"scale=1.0, zero_point=2)\")\n else:\n self.assertEqual(' '.join(str(qr).split()),\n \"tensor([15.], size=(1,), dtype=\" + dtype_msg +\n \"quantization_scheme=torch.per_tensor_affine, \" +\n \"scale=1.0, zero_point=2)\")\n\n def test_qtensor_quant_dequant(self):\n scale = 0.02\n zero_point = 2\n for device in get_supported_device_types():\n r = torch.rand(3, 2, 4, 5, dtype=torch.float, device=device) * 4 - 2\n for memory_format in [torch.contiguous_format, torch.channels_last]:\n r = r.contiguous(memory_format=memory_format)\n for dtype in [torch.qint8, torch.quint8, torch.qint32]:\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)\n rqr = qr.dequantize()\n self.assertTrue(np.allclose(r.cpu().numpy(), rqr.cpu().numpy(), atol=2 / scale))\n # Also check 5D tensors work.\n for device in get_supported_device_types():\n r = torch.rand(3, 2, 4, 5, 6, dtype=torch.float, device=device) * 4 - 2\n for dtype in [torch.qint8, torch.quint8, torch.qint32]:\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)\n rqr = qr.dequantize()\n self.assertTrue(np.allclose(r.cpu().numpy(), rqr.cpu().numpy(), atol=2 / scale))\n\n # legacy constructor/new doesn't support qtensors\n def test_qtensor_legacy_new_failure(self):\n r = torch.rand(3, 2, dtype=torch.float) * 4 - 2\n scale = 0.02\n zero_point = 2\n qr = torch.quantize_per_tensor(r, scale, zero_point, torch.quint8)\n self.assertRaises(RuntimeError, lambda: qr.new(device='cpu'))\n self.assertRaises(RuntimeError, lambda: qr.new(r.storage()))\n self.assertRaises(RuntimeError, lambda: qr.new(r))\n self.assertRaises(RuntimeError, lambda: qr.new(torch.Size([2, 3])))\n self.assertRaises(RuntimeError, lambda: qr.new([6]))\n\n def test_per_channel_qtensor_creation_cpu(self):\n self._test_per_channel_qtensor_creation(torch.device('cpu'))\n\n def _test_dequantize_fp16(self, device):\n data_orig = torch.randn(1, 2, 4, 4, dtype=torch.float, device=device)\n data_fp16 = data_orig.to(torch.float16)\n data_fp16_dequant = data_fp16.dequantize()\n data_fp16_fp32 = data_fp16.to(torch.float)\n self.assertTrue(data_fp16_dequant.dtype == torch.float)\n self.assertTrue(torch.allclose(data_fp16_fp32, data_fp16_dequant))\n\n def test_dequantize_fp16_cpu(self):\n self._test_dequantize_fp16(torch.device('cpu'))\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is available.\")\n def test_dequantize_fp16_cuda(self):\n self._test_dequantize_fp16(torch.device('cuda'))\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is available.\")\n def test_per_channel_qtensor_creation_cuda(self):\n self._test_per_channel_qtensor_creation(torch.device('cuda'))\n\n def _test_per_channel_qtensor_creation(self, device):\n numel = 10\n ch_axis = 0\n scales = torch.rand(numel, device=device)\n zero_points_int = torch.randint(0, 10, size=(numel,), device=device)\n zero_points_float = torch.randn(numel, device=device)\n for dtype, zero_points in itertools.product([torch.qint8, torch.quint8], [zero_points_float, zero_points_int]):\n q = torch._empty_per_channel_affine_quantized(\n [numel], scales=scales, zero_points=zero_points, axis=ch_axis, dtype=dtype, device=device)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(scales, q.q_per_channel_scales())\n self.assertEqual(zero_points, q.q_per_channel_zero_points())\n self.assertEqual(ch_axis, q.q_per_channel_axis())\n\n # create Tensor from uint8_t Tensor, scales and zero_points\n for zero_points in [zero_points_float, zero_points_int]:\n int_tensor = torch.randint(0, 100, size=(numel,), dtype=torch.uint8, device=device)\n q = torch._make_per_channel_quantized_tensor(int_tensor, scales, zero_points, ch_axis)\n self.assertEqual(int_tensor, q.int_repr())\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(scales, q.q_per_channel_scales())\n self.assertEqual(zero_points, q.q_per_channel_zero_points())\n self.assertEqual(ch_axis, q.q_per_channel_axis())\n\n def test_qtensor_creation(self):\n scale = 0.5\n zero_point = 10\n numel = 10\n for device in get_supported_device_types():\n q = torch._empty_affine_quantized([numel], scale=scale, zero_point=zero_point,\n device=device, dtype=torch.quint8)\n self.assertEqual(scale, q.q_scale())\n self.assertEqual(zero_point, q.q_zero_point())\n\n # create Tensor from uint8_t Tensor, scale and zero_point\n int_tensor = torch.randint(0, 100, size=(10,), device=device, dtype=torch.uint8)\n q = torch._make_per_tensor_quantized_tensor(int_tensor, scale, zero_point)\n self.assertEqual(int_tensor, q.int_repr())\n self.assertEqual(scale, q.q_scale())\n self.assertEqual(zero_point, q.q_zero_point())\n\n # create via empty_like\n q = torch._empty_affine_quantized([numel], scale=scale, zero_point=zero_point,\n device=device, dtype=torch.quint8)\n q_el = torch.empty_like(q)\n self.assertEqual(q.q_scale(), q_el.q_scale())\n self.assertEqual(q.q_zero_point(), q_el.q_zero_point())\n self.assertEqual(q.dtype, q_el.dtype)\n\n # create via empty_like but change the dtype (currently not supported)\n with self.assertRaises(RuntimeError):\n torch.empty_like(q, dtype=torch.qint8)\n\n def test_qtensor_dtypes(self):\n r = torch.rand(3, 2, dtype=torch.float) * 4 - 2\n scale = 0.2\n zero_point = 2\n for dtype in [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4]:\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)\n rqr = qr.dequantize()\n self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / scale))\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is available.\")\n def test_per_tensor_to_device(self):\n dtypes = [\n torch.quint8,\n torch.qint8,\n torch.qint32,\n ]\n device = torch.device('cuda')\n for dtype in dtypes:\n r = torch.rand(2, 2, dtype=torch.float) * 10\n scale = torch.rand(2).abs().max().item()\n zero_point = (torch.rand(2) * 10).round().to(torch.long).max().item()\n\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype)\n qr = qr.to(device)\n qr_cuda = torch.quantize_per_tensor(r.to(device), scale, zero_point, dtype)\n qr_cuda = qr_cuda.to('cpu')\n self.assertEqual('cuda', qr.device.type)\n self.assertEqual('cpu', qr_cuda.device.type)\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is available.\")\n def test_per_channel_to_device(self):\n dtype_and_zero_types = [\n (torch.quint8, torch.float),\n (torch.qint8, torch.float),\n # (torch.qint32, torch.float) not supported for quantize_per_channel\n (torch.quint8, torch.long),\n (torch.qint8, torch.long),\n (torch.qint32, torch.long),\n ]\n axis = 1\n device = torch.device('cuda')\n for dtype, zero_type in dtype_and_zero_types:\n r = torch.rand(2, 2, dtype=torch.float) * 10\n scales = torch.rand(2).abs()\n zero_points = (torch.rand(2) * 10).round().to(zero_type)\n\n dqr = torch.quantize_per_channel(r, scales, zero_points, axis, dtype)\n dqr = dqr.to(device)\n dqr_cuda = torch.quantize_per_channel(r.to(device), scales.to(\n device), zero_points.to(device), axis, dtype)\n dqr_cuda = dqr_cuda.to('cpu')\n\n self.assertEqual('cuda', dqr.device.type)\n self.assertEqual('cuda', dqr.q_per_channel_scales().device.type)\n self.assertEqual('cuda', dqr.q_per_channel_zero_points().device.type)\n\n self.assertEqual('cpu', dqr_cuda.device.type)\n self.assertEqual('cpu', dqr_cuda.q_per_channel_scales().device.type)\n self.assertEqual('cpu', dqr_cuda.q_per_channel_zero_points().device.type)\n\n @unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')\n def test_compare_per_tensor_device_numerics(self):\n dtypes = [\n torch.quint8,\n torch.qint8,\n torch.qint32,\n ]\n device = torch.device('cuda')\n for dtype in dtypes:\n r = torch.rand(2, 2) * 10\n r[0, 0] = 2.5\n scale = torch.rand(2).abs().max().item()\n zero_point = (torch.rand(2) * 10).round().to(torch.long).max().item()\n\n qtr = torch.quantize_per_tensor(r, scale, zero_point, dtype)\n dqtr = qtr.dequantize()\n qtr_cuda = torch.quantize_per_tensor(r.to(device), scale, zero_point, dtype)\n dqtr_cuda = qtr_cuda.dequantize()\n self.assertEqual(qtr.int_repr(), qtr_cuda.int_repr())\n self.assertTrue(np.allclose(dqtr, dqtr_cuda.cpu()))\n\n @unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')\n def test_compare_per_channel_device_numerics(self):\n dtype_and_zero_types = [\n (torch.quint8, torch.float),\n (torch.qint8, torch.float),\n # (torch.qint32, torch.float) not supported for quantize_per_channel\n (torch.quint8, torch.long),\n (torch.qint8, torch.long),\n (torch.qint32, torch.long),\n ]\n axis = 1\n device = torch.device('cuda')\n for i in range(20):\n for dtype, zero_type in dtype_and_zero_types:\n r = torch.rand(2, 2) * 10\n r[0, 0] = 2.5\n scales = torch.rand(2).abs()\n zero_points = (torch.rand(2) * 10).round().to(zero_type)\n\n qr = torch.quantize_per_channel(r, scales, zero_points, axis, dtype)\n dqr = qr.dequantize()\n qr_cuda = torch.quantize_per_channel(r.to(device), scales.to(\n device), zero_points.to(device), axis, dtype)\n dqr_cuda = qr_cuda.dequantize()\n self.assertEqual(qr.int_repr(), qr_cuda.int_repr())\n self.assertTrue(np.allclose(dqr, dqr_cuda.cpu()))\n\n def _test_quantize_per_channel(self, r, scales, zero_points, axis, float_params):\n\n def _quantize_per_channel_ref_nd(data, scales, zero_points, float_params):\n dims = data.size()\n data = data.view(-1, dims[axis], np.prod(dims[axis + 1:]))\n res = torch.empty_like(data)\n quant_min, quant_max = 0, 255\n for i in range(res.size()[0]):\n for j in range(res.size()[1]):\n for k in range(res.size()[2]):\n if float_params:\n inv_scale = 1.0 / scales[j]\n res[i][j][k] = np.clip(\n np.round(data[i][j][k] * inv_scale + zero_points[j]), quant_min, quant_max)\n else:\n res[i][j][k] = np.clip(\n np.round(data[i][j][k] / scales[j]) + zero_points[j], quant_min, quant_max)\n res = res.view(*dims)\n return res\n\n contig_format = torch.channels_last if r.ndim == 4 else torch.channels_last_3d\n for memory_format in [torch.contiguous_format, contig_format]:\n ref_res = _quantize_per_channel_ref_nd(r, scales, zero_points, float_params)\n r_contig = r.contiguous(memory_format=memory_format)\n qr = torch.quantize_per_channel(r_contig, scales, zero_points, axis, torch.quint8)\n rqr = qr.dequantize()\n self.assertTrue(np.allclose(qr.int_repr(), ref_res))\n self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / np.min(scales.numpy())))\n\n def test_qtensor_quantize_per_channel(self):\n r = torch.rand(3, 2, dtype=torch.float) * 4 - 2\n scales = torch.tensor([0.2, 0.03], dtype=torch.double)\n zero_points = torch.tensor([5, 10], dtype=torch.long)\n axis = 1\n\n def quantize_c(data, scales, zero_points):\n res = torch.empty((3, 2))\n quant_min, quant_max = 0, 255\n for i in range(3):\n for j in range(2):\n res[i][j] = np.clip(np.round(data[i][j] / scales[j]) + zero_points[j], quant_min, quant_max)\n return res\n qr = torch.quantize_per_channel(r, scales, zero_points, axis, torch.quint8)\n rqr = qr.dequantize()\n self.assertTrue(np.allclose(qr.int_repr(), quantize_c(r, scales, zero_points)))\n self.assertTrue(np.allclose(r.numpy(), rqr.numpy(), atol=2 / np.min(scales.numpy())))\n\n # Check 4D tensor with 2 different memory formats.\n r = torch.rand(3, 2, 4, 5, dtype=torch.float) * 4 - 2\n scales = torch.tensor([0.2, 0.03], dtype=torch.double)\n zero_points = torch.tensor([5, 10], dtype=torch.long)\n self._test_quantize_per_channel(r, scales, zero_points, 1 , False)\n\n scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.double)\n zero_points = torch.tensor([5, 10, 7], dtype=torch.long)\n self._test_quantize_per_channel(r, scales, zero_points, 0, False)\n\n # Check 5D tensor.\n r = torch.rand(3, 2, 4, 5, 7, dtype=torch.float) * 4 - 2\n scales = torch.tensor([0.2, 0.03], dtype=torch.double)\n zero_points = torch.tensor([5, 10], dtype=torch.long)\n self._test_quantize_per_channel(r, scales, zero_points, 1, False)\n\n scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.double)\n zero_points = torch.tensor([5, 10, 7], dtype=torch.long)\n self._test_quantize_per_channel(r, scales, zero_points, 0, False)\n\n def test_quantize_per_channel_float_qparams(self):\n r = torch.rand(3, 2, dtype=torch.float) * 4\n scales = torch.tensor([0.2, 0.03], dtype=torch.float)\n zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)\n axis = 1\n\n # Reference quantize function with FP zero_point.\n def quantize_ref(data, scales, zero_points):\n res = torch.empty((3, 2))\n quant_min, quant_max = 0, 255\n for i in range(3):\n for j in range(2):\n inv_scale = 1.0 / scales[j]\n res[i][j] = np.clip(np.round(data[i][j] * inv_scale + zero_points[j]), quant_min, quant_max)\n return res\n\n qr = torch.quantize_per_channel(r, scales, zero_points, axis, torch.quint8)\n dequant_tensor = qr.dequantize()\n ref = quantize_ref(r, scales, zero_points)\n self.assertTrue(np.allclose(qr.int_repr(), ref))\n self.assertTrue(np.allclose(r.numpy(), dequant_tensor.numpy(), atol=1))\n\n # Check 4D tensor with 2 different memory formats.\n r = torch.rand(3, 2, 4, 5, dtype=torch.float) * 4\n scales = torch.tensor([0.2, 0.03], dtype=torch.float)\n zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)\n self._test_quantize_per_channel(r, scales, zero_points, 1, True)\n\n scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.float)\n zero_points = torch.tensor([0.1, 0.2, 1.], dtype=torch.float)\n self._test_quantize_per_channel(r, scales, zero_points, 0, True)\n\n # Check 5D tensor.\n r = torch.rand(3, 2, 4, 5, 7, dtype=torch.float) * 4 - 2\n scales = torch.tensor([0.2, 0.03], dtype=torch.float)\n zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)\n self._test_quantize_per_channel(r, scales, zero_points, 1, True)\n\n scales = torch.tensor([0.2, 0.03, 0.5], dtype=torch.float)\n zero_points = torch.tensor([0.1, 0.2, 1.], dtype=torch.float)\n self._test_quantize_per_channel(r, scales, zero_points, 0, True)\n\n def test_quantize_per_channel_sub_byte(self):\n \"\"\" Tests the per channel quantization scheme for 4-bit qtensors.\n The scale and zero point for this have to be in floating point. \"\"\"\n r = torch.rand(3, 2, dtype=torch.float) * 4\n scales = torch.tensor([0.2, 0.3, 0.1], dtype=torch.float)\n zero_points = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float)\n qr = torch.quantize_per_channel(r, scales, zero_points, 0, torch.quint4x2)\n dequant_tensor = qr.dequantize()\n\n def _get_qranges(bit_width):\n if bit_width == 4:\n return 0, 15\n\n def _quantize_per_channel_sub_byte_ref(data, scales, zero_points, axis, bit_width):\n dims = data.size()\n data = data.view(-1, dims[axis], np.prod(dims[axis + 1:]))\n qtensor_size = math.ceil(data.numel() / 2)\n res = torch.empty(qtensor_size, dtype=torch.uint8)\n elem_per_byte = 8 // bit_width\n quant_min, quant_max = _get_qranges(bit_width)\n for i in range(data.size()[0]):\n for j in range(data.size()[1]):\n for k in range(data.size()[2]):\n inv_scale = 1.0 / scales[j]\n index = i * data.size()[1] * data.size()[2] + j * data.size()[2] + k\n qvalue = np.clip(\n np.round(data[i][j][k] * inv_scale + zero_points[j]), quant_min, quant_max).to(dtype=torch.int)\n res_idx = int(index / elem_per_byte)\n if (index % elem_per_byte == 0):\n res[res_idx] = qvalue\n else:\n res[res_idx] |= (qvalue << ((index % elem_per_byte) * bit_width))\n return res\n\n ref_res = _quantize_per_channel_sub_byte_ref(r, scales, zero_points, 0, 4)\n self.assertTrue(np.allclose(qr.int_repr(), ref_res))\n self.assertTrue(np.allclose(r.numpy(), dequant_tensor.numpy(), atol=1 / np.min(scales.numpy())))\n\n # Check 4D tensor with non-zero axis.\n r = torch.rand(3, 2, 4, 5, dtype=torch.float) * 4\n scales = torch.tensor([0.2, 0.03], dtype=torch.float)\n zero_points = torch.tensor([0.1, 0.2], dtype=torch.float)\n qr = torch.quantize_per_channel(r, scales, zero_points, axis=1, dtype=torch.quint4x2)\n ref_res = _quantize_per_channel_sub_byte_ref(r, scales, zero_points, 1, 4)\n self.assertTrue(np.allclose(qr.int_repr(), ref_res))\n\n def test_qtensor_permute(self):\n scale = 0.02\n zero_point = 1\n for device in get_supported_device_types():\n r = torch.rand(10, 30, 2, 2, device=device, dtype=torch.float) * 4 - 2\n for dtype in [torch.qint8, torch.quint8, torch.qint32]:\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)\n qr = qr.transpose(0, 1)\n rqr = qr.dequantize()\n # compare transpose + dequantized result with orignal transposed result\n self.assertTrue(np.allclose(r.cpu().numpy().transpose([1, 0, 2, 3]), rqr.cpu().numpy(), atol=2 / scale))\n\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)\n qr1 = qr.permute([1, 0, 2, 3])\n qr2 = qr.transpose(0, 1)\n # compare int representation after transformations\n self.assertEqual(qr1.int_repr(), qr2.int_repr())\n self.assertEqual(qr1.q_scale(), qr2.q_scale())\n self.assertEqual(qr1.q_zero_point(), qr2.q_zero_point())\n # compare dequantized result\n self.assertEqual(qr1.dequantize(), qr2.dequantize())\n # compare permuted + dequantized result with original transposed result\n self.assertTrue(np.allclose(qr2.dequantize().cpu().numpy(),\n r.cpu().numpy().transpose([1, 0, 2, 3]), atol=2 / scale))\n # make permuted result contiguous\n self.assertEqual(qr2.contiguous().int_repr(), qr2.int_repr())\n\n # change memory format\n qlast = qr.contiguous(memory_format=torch.channels_last)\n self.assertEqual(qr.stride(), list(reversed(sorted(qr.stride()))))\n self.assertNotEqual(qlast.stride(), list(reversed(sorted(qlast.stride()))))\n self.assertEqual(qr.int_repr(), qlast.int_repr())\n self.assertEqual(qr.q_scale(), qlast.q_scale())\n self.assertEqual(qr.q_zero_point(), qlast.q_zero_point())\n self.assertEqual(qlast.dequantize(), qr.dequantize())\n\n # permuting larger tensors\n x = torch.randn(64, 64, device=device)\n qx = torch.quantize_per_tensor(x, 1.0, 0, dtype)\n # should work\n qx.permute([1, 0])\n\n def test_qtensor_per_channel_permute(self):\n for device in get_supported_device_types():\n r = torch.rand(20, 10, 2, 2, dtype=torch.float, device=device) * 4 - 2\n dtype = torch.qint8\n scales = torch.rand(10, device=device) * 0.02 + 0.01\n zero_points = torch.round(torch.rand(10, device=device) * 2 - 1).to(torch.long)\n qr = torch.quantize_per_channel(r, scales, zero_points, 1, dtype)\n\n # we can't reorder the axis\n with self.assertRaises(RuntimeError):\n qr.transpose(0, 1)\n\n # but we can change memory format\n qlast = qr.contiguous(memory_format=torch.channels_last)\n self.assertEqual(qr.stride(), list(reversed(sorted(qr.stride()))))\n self.assertNotEqual(qlast.stride(), list(reversed(sorted(qlast.stride()))))\n self.assertEqual(qr.int_repr(), qlast.int_repr())\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(scales, qlast.q_per_channel_scales())\n self.assertEqual(zero_points, qlast.q_per_channel_zero_points())\n self.assertEqual(1, qlast.q_per_channel_axis())\n self.assertEqual(qlast.dequantize(), qr.dequantize())\n\n def test_qtensor_load_save(self):\n scale = 0.2\n zero_point = 10\n # storage is not accessible on the cuda right now\n device = \"cpu\"\n r = torch.rand(15, 2, dtype=torch.float32, device=device) * 2\n for dtype in [torch.qint8, torch.quint8, torch.qint32]:\n qr = torch.quantize_per_tensor(r, scale, zero_point, dtype=dtype)\n qrv = qr[:, 1]\n with tempfile.NamedTemporaryFile() as f:\n # Serializing and Deserializing Tensor\n torch.save((qr, qrv), f)\n f.seek(0)\n qr2, qrv2 = torch.load(f)\n self.assertEqual(qr, qr2)\n self.assertEqual(qrv, qrv2)\n self.assertEqual(qr2.storage().data_ptr(), qrv2.storage().data_ptr())\n\n def test_qtensor_per_channel_load_save(self):\n r = torch.rand(20, 10, dtype=torch.float) * 4 - 2\n scales = torch.rand(10, dtype=torch.double) * 0.02 + 0.01\n zero_points = torch.round(torch.rand(10) * 20 + 1).to(torch.long)\n # quint32, cuda is not supported yet\n for dtype in [torch.quint8, torch.qint8, torch.quint4x2]:\n if dtype == torch.quint4x2:\n zero_points = torch.ones(10, dtype=torch.float)\n qr = torch.quantize_per_channel(r, scales, zero_points, 1, dtype)\n with tempfile.NamedTemporaryFile() as f:\n # Serializing and Deserializing Tensor\n torch.save(qr, f)\n f.seek(0)\n qr2 = torch.load(f)\n self.assertEqual(qr, qr2)\n\n def test_qtensor_copy(self):\n scale = 0.5\n zero_point = 10\n numel = 10\n for dtype in [torch.qint8, torch.quint8, torch.qint32]:\n for device in get_supported_device_types():\n # copy from same scale and zero_point\n q = torch._empty_affine_quantized([numel], scale=scale,\n zero_point=zero_point, device=device, dtype=dtype)\n q2 = torch._empty_affine_quantized([numel], scale=scale,\n zero_point=zero_point, device=device, dtype=dtype)\n q.copy_(q2)\n self.assertEqual(q.int_repr(), q2.int_repr())\n self.assertEqual(q.q_scale(), q2.q_scale())\n self.assertEqual(q.q_zero_point(), q2.q_zero_point())\n # copying from different scale and zero_point\n new_scale = 3.2\n new_zero_point = 5\n q = torch._empty_affine_quantized([numel], scale=new_scale,\n zero_point=new_zero_point, device=device, dtype=dtype)\n # check original scale and zero_points are set correctly\n self.assertEqual(q.q_scale(), new_scale)\n self.assertEqual(q.q_zero_point(), new_zero_point)\n q.copy_(q2)\n # check scale and zero_points has been copied\n self.assertEqual(q, q2)\n # can't copy from quantized tensor to non-quantized tensor\n r = torch.empty([numel], dtype=torch.float)\n q = torch._empty_affine_quantized([numel], scale=scale, zero_point=zero_point, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, \"please use dequantize\"):\n r.copy_(q)\n # copy from float doesn't support cuda\n device = 'cpu'\n # check copy from non-quantized to quantized\n r = torch.randn([numel], dtype=torch.float, device=device)\n q = torch._empty_affine_quantized([numel], scale=scale, zero_point=zero_point, dtype=dtype, device=device)\n q.copy_(r)\n qr = torch.quantize_per_tensor(r, scale=scale, zero_point=zero_point, dtype=dtype)\n self.assertEqual(q, qr)\n\n def test_torch_qtensor_deepcopy(self):\n # cuda is not supported yet\n device = \"cpu\"\n q_int = torch.randint(0, 100, [3, 5], device=device, dtype=torch.uint8)\n scale, zero_point = 2.0, 3\n q = torch._make_per_tensor_quantized_tensor(q_int, scale=scale, zero_point=zero_point)\n qc = deepcopy(q)\n self.assertEqual(qc, q)\n\n def test_clone(self):\n numel = 10\n scale = 0.5\n zero_point = 10\n\n options = itertools.product(\n get_supported_device_types(),\n [torch.qint8, torch.quint8, torch.qint32])\n\n for device, dtype in options:\n per_tensor_quantized = torch._empty_affine_quantized(\n [numel], scale=scale, zero_point=zero_point,\n device=device, dtype=dtype)\n per_channel_quantized = torch._empty_per_channel_affine_quantized(\n [numel],\n scales=torch.tensor([scale] * numel, device=device),\n zero_points=torch.tensor([zero_point] * numel, device=device),\n axis=0,\n device=device,\n dtype=dtype\n )\n qtensors = [per_tensor_quantized, per_channel_quantized]\n\n for q in qtensors:\n q2 = q.clone()\n # Check to make sure the scale and zero_point has been copied.\n self.assertEqual(q, q2)\n\n def test_qtensor_fill_per_tensor(self):\n numel = 10\n scale = 0.5\n zero_point = 10\n\n ones = torch.ones(numel).to(torch.float)\n\n qtypes = [torch.qint8, torch.quint8, torch.qint32]\n vals2fill = [-1, 1, 2**32] # positive, negative, overflow\n\n devices = get_supported_device_types()\n for qtype, val2fill, device in itertools.product(qtypes, vals2fill, devices):\n ones = ones.to(device)\n q_filled = torch._empty_affine_quantized(\n [numel], scale=scale, zero_point=zero_point, device=device,\n dtype=qtype)\n q_filled.fill_(val2fill)\n # reference tensor for comparing q_filled\n q_ref = torch.quantize_per_tensor(ones * val2fill, scale,\n zero_point, qtype)\n self.assertEqual(q_filled.int_repr(), q_ref.int_repr())\n self.assertEqual(q_filled.dequantize(), q_ref.dequantize())\n # Make sure the scale and zero_point don't change\n self.assertEqual(q_filled.q_scale(), scale)\n self.assertEqual(q_filled.q_zero_point(), zero_point)\n\n # adapted from test_qtensor_fill_per_tensor\n def test_qtensor_fill_per_channel(self):\n dims = [4, 5]\n axis = 0\n # adding a constant to avoid too small of a scale\n scales = torch.rand(dims[axis], dtype=torch.float64) + 0.1\n zero_points = torch.randint(low=0, high=10, size=(dims[axis], ))\n\n ones = torch.ones(dims).to(torch.float)\n\n qtypes = [torch.qint8, torch.quint8, torch.qint32]\n vals2fill = [-1, 1, 2**32] # positive, negative, overflow\n\n devices = get_supported_device_types()\n for qtype, val2fill, device in itertools.product(qtypes, vals2fill, devices):\n scales = scales.to(device)\n zero_points = zero_points.to(device)\n ones = ones.to(device)\n q_filled = torch._empty_per_channel_affine_quantized(\n dims, scales=scales, zero_points=zero_points, device=device,\n axis=axis, dtype=qtype)\n q_filled.fill_(val2fill)\n # reference tensor for comparing q_filled\n q_ref = torch.quantize_per_channel(ones * val2fill, scales=scales,\n zero_points=zero_points, axis=axis, dtype=qtype)\n self.assertEqual(q_filled.int_repr(), q_ref.int_repr())\n self.assertEqual(q_filled.dequantize(), q_ref.dequantize())\n # Make sure the scale and zero_point don't change\n self.assertEqual(q_filled.q_per_channel_scales(), scales)\n self.assertEqual(q_filled.q_per_channel_zero_points(), zero_points)\n\n # adapted from test_qtensor_fill_per_tensor\n def test_qtensor_masked_fill(self):\n numel = 10\n scale = 0.5\n zero_point = 10\n\n ones = torch.ones(numel).to(torch.float)\n\n types = [torch.qint8, torch.quint8, torch.qint32]\n fills = [-1, 1, 2**32] # positive, negative, overflow\n\n device = 'cpu'\n ones = ones.to(device)\n for qtype, fill_with in itertools.product(types, fills):\n q_filled = torch._empty_affine_quantized(\n [numel], scale=scale, zero_point=zero_point, device=device,\n dtype=qtype)\n q_filled.fill_(fill_with)\n q_masked_fill = torch._empty_affine_quantized(\n [numel], scale=scale, zero_point=zero_point, device=device,\n dtype=qtype)\n # mask fill the whole tensor, equivalent to calling plain vanilla fill\n mask = torch.tensor(True)\n torch.tensor(fill_with)\n q_masked_fill.masked_fill_(mask, fill_with)\n int_repr = torch.quantize_per_tensor(ones * fill_with, scale,\n zero_point, qtype)\n fill_with = int_repr.dequantize()\n int_repr = int_repr.int_repr()\n\n self.assertEqual(q_filled, q_masked_fill)\n self.assertEqual(q_masked_fill.int_repr(), int_repr)\n self.assertEqual(q_masked_fill.dequantize(), fill_with)\n # Make sure the scale and zero_point don't change\n self.assertEqual(q_masked_fill.q_scale(), scale)\n self.assertEqual(q_masked_fill.q_zero_point(), zero_point)\n\n # the above loop does the same test as test_qtensor_fill\n # now we will check masked_fill for subset of indices\n mask = torch.randint(0, 2, (numel, ))\n mask = mask.bool()\n x = torch.rand(numel)\n qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=qtype)\n for qtype, fill_with in itertools.product(types, fills):\n q_masked_fill = qx.clone()\n q_masked_fill.masked_fill_(mask, fill_with)\n ref = qx.clone()\n for i in range(numel):\n if mask[i]:\n # this assignment doesn't end up calling masked_fill, allowing us to compare the different implementations\n ref[i] = fill_with\n\n self.assertEqual(q_masked_fill, ref)\n self.assertEqual(q_masked_fill.int_repr(), ref.int_repr())\n self.assertEqual(q_masked_fill.dequantize(), ref.dequantize())\n\n def test_qtensor_index_put(self):\n n = 10\n m = 10\n x_orig = torch.rand(n, m)\n indices = tuple(torch.tensor([[0, 0], [1, 1], [5, 5], [7, 3], [0, 5], [6, 9], [-1, -1]]).t())\n # for the scalar tensor case, index_put routes to masked_fill\n values_list = [torch.tensor(2.5), torch.rand(len(indices[0])) * 1000]\n scale = 0.5\n zero_point = 10\n types = [torch.qint8, torch.quint8, torch.qint32]\n fills = [-1, 1, 2**32] # positive, negative, overflow\n for qtype, values in itertools.product(types, values_list):\n x_ref = x_orig.clone()\n x_ref[indices] = values.to(dtype=x_ref.dtype)\n qx_ref = torch.quantize_per_tensor(x_ref, scale=scale, zero_point=zero_point, dtype=qtype)\n\n x = x_orig.clone()\n qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=qtype)\n qx[indices] = values\n\n self.assertEqual(qx_ref, qx)\n\n @unittest.skipIf(not TEST_CUDA, \"No gpu is available.\")\n def test_qtensor_index_select_cuda(self):\n self._test_qtensor_index_select('cuda')\n\n def test_qtensor_index_select_cpu(self):\n self._test_qtensor_index_select('cpu')\n\n def _test_qtensor_index_select(self, device):\n for quant_type in [torch.quint8, torch.qint8]:\n dims = 3\n index = torch.randint(dims, [1]).item()\n selected = torch.randperm(dims)[:2].to(device)\n scale = 1\n zp = 0\n x = torch.randn([3] * dims, device=device) * 10\n\n x_selected = torch.index_select(x, index, selected)\n x_selected_quantized = torch.quantize_per_tensor(x_selected, scale, zp, quant_type)\n\n x_quantized = torch.quantize_per_tensor(x, scale, zp, quant_type)\n x_quantized_selected = torch.index_select(x_quantized, index, selected)\n\n self.assertEqual(x_quantized_selected, x_selected_quantized)\n\n def test_qtensor_view(self):\n scale, zero_point, dtype = 1.0, 2, torch.uint8\n for device in get_supported_device_types():\n q_int = torch.randint(0, 100, [1, 2, 3], device=device, dtype=dtype)\n q = torch._make_per_tensor_quantized_tensor(q_int, scale=scale, zero_point=zero_point)\n q2 = q.view(1, 3, 2)\n self.assertEqual(q.numel(), q2.numel())\n # testing -1\n self.assertEqual(q, q2.view(1, -1, 3))\n\n a_int = torch.randint(0, 100, [1, 2, 3, 4], device=device, dtype=dtype)\n a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)\n b = a.transpose(1, 2) # swaps 2nd and 3rd dimension\n c = a.view(1, 3, 2, 4) # does not change tensor layout in memory\n self.assertEqual(b.size(), c.size())\n self.assertEqual(b.q_scale(), c.q_scale())\n self.assertEqual(b.q_zero_point(), c.q_zero_point())\n self.assertNotEqual(b.stride(), c.stride())\n # size is the same but the underlying data is different\n self.assertNotEqual(b.int_repr(), c.int_repr())\n # torch.equal is not supported for the cuda backend\n if device == 'cpu':\n self.assertFalse(torch.equal(b, c))\n\n # a case can't view non-contiguos Tensor\n a_int = torch.randint(0, 100, [1, 2, 3, 4], device=device, dtype=dtype)\n a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)\n b = a.transpose(1, 2) # swaps 2nd and 3rd dimension\n err_str = \"view size is not compatible with input tensor's size and stride*\"\n with self.assertRaisesRegex(RuntimeError, err_str):\n b.view(1, 4, 2, 3)\n # view on contiguous tensor is fine\n b.contiguous().view(1, 4, 2, 3)\n\n def test_qtensor_resize(self):\n for device in get_supported_device_types():\n scale, zero_point, dtype = 1.0, 2, torch.uint8\n sizes1 = [1, 2, 3, 4]\n sizes2 = [1 * 2, 3 * 4]\n sizes3 = [1, 2 * 3, 4]\n sizes4 = [1 * 2 * 3 * 4]\n sizes5 = [1, 2, 1, 3, 1, 4]\n\n q1_int = torch.randint(0, 100, sizes1, dtype=dtype, device=device)\n q1 = torch._make_per_tensor_quantized_tensor(q1_int, scale=scale, zero_point=zero_point)\n q2 = q1.resize(*sizes2)\n q3 = q2.resize(*sizes3)\n q4 = q3.resize(*sizes4)\n q5 = q4.resize(*sizes5)\n\n self.assertEqual(q1.numel(), q2.numel())\n self.assertEqual(q1.numel(), q3.numel())\n self.assertEqual(q1.numel(), q4.numel())\n self.assertEqual(q1.numel(), q5.numel())\n\n # Compare original and post-transpose\n a_int = torch.randint(0, 100, sizes1, dtype=dtype, device=device)\n a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)\n b = a.transpose(1, 2) # swaps 2nd and 3rd dimension\n c = b.resize(*sizes1) # Change the sizes back to the original\n\n self.assertEqual(a.size(), c.size())\n self.assertEqual(b.q_scale(), c.q_scale())\n self.assertEqual(b.q_zero_point(), c.q_zero_point())\n self.assertNotEqual(b.stride(), c.stride())\n # size is the same but the underlying data is different\n self.assertNotEqual(b.int_repr(), c.int_repr())\n # torch.equal is not supported for the cuda backend\n if device == 'cpu':\n self.assertFalse(torch.equal(b, c))\n\n # Throws an error if numel is wrong\n q1_int = torch.randint(0, 100, sizes1, dtype=dtype, device=device)\n q1 = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)\n err_str = \"requested resize to*\"\n with self.assertRaisesRegex(RuntimeError, err_str):\n q2 = q1.resize(*sizes1[:-1])\n # resize on both contiguous and non-contiguous tensor should be fine\n q3 = q1.resize(*sizes2)\n q4 = q1.contiguous().resize(*sizes2)\n\n def test_qtensor_reshape(self):\n scale, zero_point, dtype = 1.0, 2, torch.uint8\n for device in get_supported_device_types():\n q_int = torch.randint(0, 100, [3, 5], dtype=dtype, device=device)\n q = torch._make_per_tensor_quantized_tensor(q_int, scale=scale, zero_point=zero_point)\n q2 = q.reshape([15])\n self.assertEqual(q.numel(), q2.numel())\n self.assertEqual(q2.size(), [15])\n # testing -1\n self.assertEqual(q, q2.reshape([3, -1]))\n\n a_int = torch.randint(0, 100, [1, 2, 3, 4], dtype=dtype, device=device)\n a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)\n b = a.transpose(1, 2) # swaps 2nd and 3rd dimension\n c = a.reshape(1, 3, 2, 4) # does not change tensor layout\n self.assertEqual(b.size(), c.size())\n self.assertEqual(b.q_scale(), c.q_scale())\n self.assertEqual(b.q_zero_point(), c.q_zero_point())\n self.assertNotEqual(b.stride(), c.stride())\n self.assertNotEqual(b.int_repr(), c.int_repr())\n # torch.equal is not supported for the cuda backend\n if device == 'cpu':\n self.assertFalse(torch.equal(b, c))\n\n # we can use reshape for non-contiguous Tensor\n a_int = torch.randint(0, 100, [1, 2, 3, 4], dtype=dtype, device=device)\n a = torch._make_per_tensor_quantized_tensor(a_int, scale=scale, zero_point=zero_point)\n b = a.transpose(1, 2) # swaps 2nd and 3rd dimension\n c = b.reshape(1, 4, 2, 3)\n\n def test_qtensor_unsqueeze(self):\n for device in get_supported_device_types():\n x = torch.randn((1, 3, 4), device=device)\n qx = torch.quantize_per_tensor(x, scale=1.0, zero_point=0, dtype=torch.quint8)\n qy = qx.unsqueeze(2)\n self.assertEqual(qy.size(), (1, 3, 1, 4))\n qy = qy.squeeze(2)\n self.assertEqual(qy.size(), qx.size())\n\n # Per channel qtensor\n scales = torch.tensor([1.0], device=device)\n zero_points = torch.tensor([0], device=device)\n qx = torch.quantize_per_channel(x, scales=scales, zero_points=zero_points, dtype=torch.quint8, axis=0)\n qy = qx.unsqueeze(0)\n self.assertEqual(qy.size(), (1, 1, 3, 4))\n self.assertEqual(qy.q_per_channel_axis(), 1)\n\n qz = qy.squeeze(0)\n self.assertEqual(qz.size(), x.size())\n self.assertEqual(qz.q_per_channel_axis(), 0)\n with self.assertRaisesRegex(RuntimeError, \"Squeeze is only possible on non-axis dimension for Per-Channel\"):\n qz = qy.squeeze(1)\n\n # squeeze without dim specified\n x = torch.randn((3, 1, 2, 1, 4), device=device)\n scales = torch.tensor([1.0, 1.0], device=device)\n zero_points = torch.tensor([0, 0], device=device)\n qx = torch.quantize_per_channel(x, scales=scales, zero_points=zero_points, dtype=torch.quint8, axis=2)\n qz = qx.squeeze()\n self.assertEqual(qz.size(), (3, 2, 4))\n self.assertEqual(qz.q_per_channel_axis(), 1)\n with self.assertRaisesRegex(RuntimeError, \"Squeeze is only possible on non-axis dimension for Per-Channel\"):\n qz = qy.squeeze()\n\n def test_repeat(self):\n scale, zero_point, dtype = 1.0, 2, torch.uint8\n for device in get_supported_device_types():\n q_int = torch.randint(0, 100, [3], dtype=dtype, device=device)\n q_int_repeat = q_int.repeat(4, 2)\n q_ref = torch._make_per_tensor_quantized_tensor(q_int_repeat, scale=scale, zero_point=zero_point)\n\n q = torch._make_per_tensor_quantized_tensor(q_int, scale=scale, zero_point=zero_point)\n q_repeat = q.repeat(4, 2)\n self.assertEqual(q_ref, q_repeat)\n\n def test_qscheme_pickle(self):\n f = Foo()\n buf = io.BytesIO()\n torch.save(f, buf)\n\n buf.seek(0)\n f2 = torch.load(buf)\n\n self.assertEqual(f2.qscheme, torch.per_tensor_symmetric)\n\n @given(X=hu.tensor(shapes=hu.array_shapes(min_dims=2, max_dims=4,\n min_side=1, max_side=10),\n qparams=hu.qparams()),\n reduce_range=st.booleans()\n )\n @unittest.skip(\n \"this is broken without changes to any relevant code, \"\n \"we need to remove hypothesis testing in CI\")\n def test_choose_qparams(self, X, reduce_range):\n X, (scale, zero_point, torch_type) = X\n X = torch.from_numpy(X)\n X_scale, X_zp = _calculate_dynamic_qparams(X, torch.quint8, reduce_range=reduce_range)\n qparams = torch._choose_qparams_per_tensor(X, reduce_range)\n np.testing.assert_array_almost_equal(X_scale, qparams[0], decimal=3)\n self.assertEqual(X_zp, qparams[1])\n\n @unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')\n def test_cuda_quantization_does_not_pin_memory(self):\n # Context - https://github.com/pytorch/pytorch/issues/41115\n x = torch.randn(3)\n self.assertEqual(x.is_pinned(), False)\n\n q_int = torch.randint(0, 100, [1, 2, 3], device=\"cuda\", dtype=torch.uint8)\n q = torch._make_per_tensor_quantized_tensor(q_int, scale=0.1, zero_point=0)\n\n x = torch.randn(3)\n self.assertEqual(x.is_pinned(), False)\n\n # There's no way to actually pin the memory of a quantized tensor\n @unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')\n def test_quant_pin_memory(self):\n x = torch.randn(3).pin_memory()\n self.assertEqual(x.is_pinned(), True)\n x_q = torch.quantize_per_tensor(x, 1, 0, torch.quint8)\n self.assertEqual(x_q.is_pinned(), False)\n x_pin = torch.empty_quantized([3], x_q, pin_memory=True, dtype=torch.quint8)\n self.assertEqual(x_pin.is_pinned(), False)\n self.assertRaises(RuntimeError, lambda: x_q.pin_memory())\n\n def test_fp16_saturate_op(self):\n x = torch.ones(5, 5, dtype=torch.float32) * 65532\n x[0] = torch.ones(5) * -65532\n # range of fp16 value is [-65504, + 65504]\n ref = torch.ones(5, 5) * 65504\n ref[0] = torch.ones(5) * -65504\n y = torch._saturate_weight_to_fp16(x)\n self.assertEqual(y, ref)\n\n def test_choose_qparams_optimized(self):\n for bit_width in [4, 2]:\n x = torch.randn(64, dtype=torch.float)\n y = torch.choose_qparams_optimized(x, numel=64, n_bins=200, ratio=0.16, bit_width=bit_width)\n ref = param_search_greedy(x.numpy(), bit_rate=bit_width)\n self.assertEqual(y[0].numpy(), ref[0])\n self.assertEqual(y[1].numpy(), ref[1])\n\n def _test_pickle_checkpoint_qtensor(self, device):\n with TemporaryFileName() as fname:\n class M(torch.jit.ScriptModule):\n __constants__ = ['fname']\n\n def __init__(self):\n super(M, self).__init__()\n self.fname = fname\n\n @torch.jit.script_method\n def forward(self, x, y):\n torch.save((x, y), self.fname)\n return y\n\n q = torch.quantize_per_tensor(\n torch.rand(2, 3, dtype=torch.float), scale=0.1, zero_point=10, dtype=torch.quint8).to(device)\n qc = torch.quantize_per_channel(\n torch.rand(2, 3, dtype=torch.float),\n scales=torch.tensor([0.1, 0.5, 0.01]),\n zero_points=torch.tensor([10, 0, 20]),\n axis=1, dtype=torch.quint8).to(device)\n m = M()\n m(q, qc)\n with open(fname, \"rb\") as handle:\n loaded_q, loaded_qc = torch.load(fname)\n self.assertEqual(loaded_q, q)\n self.assertEqual(loaded_qc, qc)\n\n def test_pickle_checkpoint_qtensor(self):\n self._test_pickle_checkpoint_qtensor('cpu')\n\n def test_jit_serialization(self):\n class SimpleQTensor(torch.jit.ScriptModule):\n def __init__(self, per_channel):\n super(SimpleQTensor, self).__init__()\n x = torch.rand(5, 5).float()\n if not per_channel:\n x_q = torch.quantize_per_tensor(x, 0.2, 10, torch.quint8)\n else:\n s = torch.rand(5, dtype=torch.float64) + 0.1\n zp = torch.randint(5, 15, (5,))\n x_q = torch.quantize_per_channel(x, s, zp, 1, torch.quint8)\n self.register_buffer('x', x_q)\n\n @torch.jit.script_method\n def forward(self):\n return self.x\n\n for per_channel in [False, True]:\n model = SimpleQTensor(per_channel)\n buffer = io.BytesIO()\n torch.jit.save(model, buffer)\n buffer.seek(0)\n model_loaded = torch.jit.load(buffer)\n self.assertEqual(model_loaded(), model())\n\n def test_bfp16_quantize(self):\n X = torch.randn(5 , 10)\n quantized_X = X.to(torch.bfloat16)\n dedequantized_X = quantized_X.to(torch.float32)\n torch.testing.assert_allclose(X, dedequantized_X, rtol=1e-4, atol=5e-3)\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_quantization.py TESTNAME\\n\\n\"\n \"instead.\")\n" ]
[ [ "torch.cuda.graph", "torch.cat", "torch.cuda.amp.autocast", "torch.triu_indices", "torch.cuda.comm.broadcast", "torch.cuda.device", "torch.bmm", "torch.cuda.default_stream", "torch.cuda.comm.gather", "torch.load", "torch.nn.parallel.scatter_gather.gather", "torch.get_float32_matmul_precision", "torch._amp_foreach_non_finite_check_and_unscale_", "torch.utils.checkpoint.checkpoint_sequential", "torch.tensor", "torch.cuda.memory_snapshot", "torch.nn.functional.conv2d", "torch.testing._internal.common_methods_invocations._compare_trilu_indices", "torch._amp_update_scale_", "torch.save", "torch.cuda.device_count", "torch.full_like", "torch.autograd.Variable._execution_engine.queue_callback", "torch.cuda.empty_cache", "torch.testing._internal.common_utils.freeze_rng_state", "torch.cuda.CUDAGraph", "torch.cuda.nvtx.range_end", "torch.backends.cudnn.flags", "torch.pow", "torch.cuda.synchronize", "torch.cuda.ByteTensor", "torch.nn.MSELoss", "torch.cuda.get_device_capability", "torch.cuda.nvtx.range_start", "torch.testing._internal.common_utils.run_tests", "torch.nn.Linear", "torch.cuda.current_device", "torch._C._get_cublas_allow_tf32", "torch.multinomial", "torch.autocast", "torch.sum", "torch.Size", "torch.cuda.ExternalStream", "torch.autograd.backward", "torch.cuda.current_stream", "torch.set_float32_matmul_precision", "torch.IntTensor", "torch.cuda.amp.custom_fwd", "torch.randint", "torch.cuda.graph_pool_handle", "torch.randn_like", "torch.equal", "torch.cuda.Stream", "torch.zeros", "torch.device", "torch.cuda.get_rng_state_all", "torch.cuda.get_device_name", "torch.cuda.is_bf16_supported", "torch.cuda.is_current_stream_capturing", "torch.cuda.nvtx.mark", "torch.cuda.amp.GradScaler", "torch.matmul", "torch.cuda.mem_get_info", "torch.rand", "torch.rand_like", "torch.cuda._sleep", "torch.cuda.nvtx.range_pop", "torch.cuda.manual_seed", "torch.stack", "torch.cuda._utils._get_device_index", "torch.nn.parallel.scatter_gather.scatter", "torch.cuda.FloatTensor", "torch.cuda.stream", "torch.sparse_coo_tensor", "torch.cuda.caching_allocator_delete", "torch.norm", "torch.FloatTensor", "torch.tril_indices", "torch.manual_seed", "torch.autograd.grad", "torch.cuda.initial_seed", "torch.testing._internal.common_methods_invocations._compare_large_trilu_indices", "torch.cuda.comm.scatter", "torch.jit.script", "torch.cartesian_prod", "torch.nn.Sequential", "torch.cuda.max_memory_allocated", "torch._C._rocm_is_backward_pass", "torch.cuda.nvtx.range_push", "torch.cuda.IntStorage", "torch.testing._internal.common_utils.skipCUDAMemoryLeakCheckIf", "torch.cuda.cudart", "torch.cuda.reset_peak_memory_stats", "torch.no_grad", "torch.cuda.memory_allocated", "torch.cuda.max_memory_reserved", "torch.cuda.Stream.priority_range", "torch.cuda.comm.broadcast_coalesced", "torch.cuda.check_error", "torch.nn.LSTM", "torch.cuda.Event", "torch.cuda.make_graphed_callables", "torch._C._get_cublas_allow_fp16_reduced_precision_reduction", "torch.randperm", "torch.ones", "torch.cuda.caching_allocator_alloc", "torch.cuda.is_available", "torch.LongTensor", "torch.testing._internal.common_utils.get_cycles_per_ms", "torch.cuda.memory_stats", "torch.nn.functional.avg_pool2d", "torch.ByteTensor", "torch.cuda.comm.reduce_add", "torch.empty", "torch.cuda.comm.reduce_add_coalesced", "torch.cuda.set_rng_state_all", "torch.cuda.LongTensor", "torch.optim.SGD", "torch.mm", "torch.cuda.set_device", "torch.full", "torch.cuda.memory_reserved", "torch.addmm", "torch.nn.Dropout", "torch.arange", "torch.cuda.get_device_properties", "torch.testing._internal.common_utils.skipCUDANonDefaultStreamIf", "torch.is_autocast_enabled", "torch.ones_like", "torch.version.cuda.split", "torch.cuda.set_per_process_memory_fraction", "torch.randn", "torch.empty_like" ], [ "torch._empty_per_channel_affine_quantized", "torch._saturate_weight_to_fp16", "torch._make_per_channel_quantized_tensor", "torch.randperm", "numpy.min", "torch.ones", "torch.testing._internal.hypothesis_utils.array_shapes", "torch._empty_affine_quantized", "torch.cuda.is_available", "torch.load", "torch.allclose", "torch.testing._internal.hypothesis_utils.assert_deadline_disabled", "numpy.max", "torch.Size", "torch.testing._internal.common_utils.TemporaryFileName", "torch.quantize_per_channel", "numpy.testing.assert_array_almost_equal", "torch.jit.load", "torch.randint", "torch._choose_qparams_per_tensor", "numpy.random.randint", "torch.tensor", "numpy.sqrt", "torch.jit.save", "numpy.prod", "torch.index_select", "torch.empty", "torch.equal", "torch.Tensor", "torch.device", "numpy.zeros", "numpy.round", "torch.save", "torch.quantize_per_tensor_dynamic", "numpy.float64", "torch.randn", "numpy.float32", "torch.testing._internal.hypothesis_utils.qparams", "torch.testing.assert_allclose", "torch.empty_quantized", "torch.rand", "torch.from_numpy", "numpy.random.uniform", "torch.choose_qparams_optimized", "torch._make_per_tensor_quantized_tensor", "torch.quantize_per_tensor", "torch.empty_like" ] ]
consbio/clover
[ "f8da46fa67240cc3f1ef0460e3a249e57e5f7224" ]
[ "trefoil/netcdf/variable.py" ]
[ "from bisect import bisect_left, bisect_right\n\nimport numpy\nfrom datetime import date, datetime\nimport pytz\nfrom affine import Affine\nfrom netCDF4 import num2date, date2num, Variable\nfrom pyproj import Proj\nimport six\n\nfrom trefoil.geometry.bbox import BBox\nfrom trefoil.utilities.proj import is_latlong\nfrom trefoil.utilities.window import Window\nfrom trefoil.netcdf.utilities import get_ncattrs\nfrom trefoil.netcdf.crs import PROJ4_GEOGRAPHIC\n\n\nclass CoordinateVariable(object):\n \"\"\"\n Wraps a one-dimensional variable with the same name as a dimension\n (http://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html).\n \"\"\"\n\n def __init__(self, input):\n \"\"\"\n A Coordinate Variable can be created from a netCDF dataset variable or a numpy array.\n\n :param input: variable in a netCDF dataset or a numpy array\n \"\"\"\n\n self._ncattrs = dict()\n\n if isinstance(input, Variable):\n self.values = input[:]\n for attr in input.ncattrs():\n if not attr == '_FillValue':\n self._ncattrs[attr] = input.getncattr(attr)\n else:\n self.values = input[:].copy()\n\n def __len__(self):\n return self.values.shape[0]\n\n def is_ascending_order(self):\n return self.values[0] < self.values[1]\n\n def indices_for_range(self, start, stop):\n \"\"\"\n Returns the indices in this variable for the start and stop values\n :param start: start value\n :param stop: stop value\n :return: start and stop indices\n \"\"\"\n\n assert stop > start\n\n if start > self.values.max():\n return self.values.size - 1, self.values.size - 1\n elif stop < self.values.min():\n return 0, 0\n\n if self.is_ascending_order():\n start_index = min(self.values.searchsorted(start), self.values.size - 1)\n\n # Need to move 1 index to the left unless we matched an index closely (allowing for precision errors)\n if start_index > 0 and not numpy.isclose(start, self.values[start_index]):\n start_index -= 1\n\n stop_index = min(self.values.searchsorted(stop), self.values.size - 1)\n if not numpy.isclose(stop, self.values[stop_index]) and stop < self.values[stop_index]:\n stop_index -= 1\n\n return start_index, stop_index\n else:\n # If values are not ascending, they need to be reversed\n temp = self.values[::-1]\n start_index = min(temp.searchsorted(start), temp.size - 1)\n\n if start_index > 0 and not numpy.isclose(start, temp[start_index]):\n start_index -= 1\n\n stop_index = min(temp.searchsorted(stop), temp.size - 1)\n if not numpy.isclose(stop, temp[stop_index]) and stop < temp[stop_index]:\n stop_index -= 1\n\n size = self.values.size - 1\n return max(size - stop_index, 0), max(size - start_index, 0)\n\n def slice_by_range(self, start, stop):\n \"\"\"\n Slices a subset of values between start and stop values.\n\n :param start: start value\n :param stop: stop value\n :return: sliced view of self.values. Make sure to copy this before altering it!\n \"\"\"\n assert stop > start\n if start >= self.values.max() or stop <= self.values.min():\n return numpy.array([])\n\n start_index, stop_index = self.indices_for_range(start, stop)\n return self.values[start_index:stop_index+1]\n\n def add_to_dataset(self, dataset, name, is_unlimited=False, **kwargs):\n \"\"\"\n :param dataset: name of the dataset to add the dimension and variable to\n :param name: name of the dimension and variable\n :param is_unlimited: set the dimension as unlimited\n :param kwargs: creation options for output variable. Should be limited to compression info.\n :return: the newly created variable\n \"\"\"\n\n if name in dataset.variables:\n raise Exception(\"Variable already exists in dataset\")\n\n if name in dataset.dimensions:\n dimension = dataset.dimensions[name]\n if is_unlimited != dimension.isunlimited() or len(self) != len(dimension):\n raise Exception(\"Dimension already exists in dataset, but has different size\")\n else:\n dimension_length = None if is_unlimited else len(self)\n dataset.createDimension(name, dimension_length)\n\n if 'fill_value' not in kwargs:\n fill_value = getattr(self.values, 'fill_value', None)\n if fill_value is not None:\n kwargs['fill_value'] = fill_value\n\n if self.values.dtype.char == 'S':\n variable = dataset.createVariable(name, 'string', (name,), **kwargs)\n # Have to write each index at a time, and cast to string. Not optimal but seems to be the only way allowed by netCDF4.\n for index, value in enumerate(self.values):\n variable[index] = str(value)\n else:\n variable = dataset.createVariable(name, self.values.dtype, (name,), **kwargs)\n variable[:] = self.values[:]\n\n for att, value in six.iteritems(self._ncattrs):\n variable.setncattr(att, value)\n\n return variable\n\n\nclass BoundsCoordinateVariable(CoordinateVariable):\n \"\"\"\n Wraps a two-dimensional variable representing bounds. Shape is always (N, 2).\n\n Useful for representing time ranges, etc.\n\n Example: http://www.cgd.ucar.edu/cms/eaton/netcdf/CF-20010629.htm#grid_ex4\n \"\"\"\n\n def is_ascending_order(self):\n return self.values[0][0] < self.values[1][0]\n\n def indices_for_range(self, start, stop):\n raise NotImplementedError(\"Not yet implemented\")\n\n def add_to_dataset(self, dataset, name, is_unlimited=False, **kwargs):\n \"\"\"\n :param dataset: name of the dataset to add the dimension and variable to\n :param name: name of the dimension and variable. Note: a new dimension for the bounds '_bnds' will be created.\n :param is_unlimited: set the dimension as unlimited\n :param kwargs: creation options for output variable. Should be limited to compression info.\n :return: the newly created variable\n \"\"\"\n\n if name in dataset.variables:\n raise Exception(\"Variable already exists in dataset\")\n\n bounds_dimension_name = '_bnds'\n if bounds_dimension_name in dataset.dimensions:\n if len(dataset.dimensions[bounds_dimension_name]) != 2:\n raise ValueError('Bounds dimension _bnds is already present in dataset and not of size 2')\n else:\n dataset.createDimension(bounds_dimension_name, 2)\n\n if name in dataset.dimensions:\n dimension = dataset.dimensions[name]\n if is_unlimited != dimension.isunlimited() or len(self) != len(dimension):\n raise Exception(\"Dimension already exists in dataset, but has different size\")\n else:\n dimension_length = None if is_unlimited else len(self)\n dataset.createDimension(name, dimension_length)\n\n fill_value = getattr(self.values, 'fill_value', None)\n if fill_value is not None:\n kwargs['fill_value'] = fill_value\n\n variable = dataset.createVariable(name, self.values.dtype, (name,bounds_dimension_name), **kwargs)\n variable[:] = self.values[:]\n\n for att, value in six.iteritems(self._ncattrs):\n variable.setncattr(att, value)\n\n return variable\n\n\nclass SpatialCoordinateVariable(CoordinateVariable):\n \"\"\"\n Abstracts properties for a given spatial dimension (e.g., longitude).\n Assumes that pixels follow a regular grid, and that dimension values represent centroids\n \"\"\"\n\n @property\n def min(self):\n return self.values.min()\n\n @property\n def max(self):\n return self.values.max()\n\n @property\n def pixel_size(self):\n return float(abs(self.values[1] - self.values[0]))\n\n @property\n def edges(self):\n \"\"\"\n Return coordinates of pixel edges from the min to the max\n \"\"\"\n\n pixel_size = self.pixel_size\n\n if self.is_ascending_order():\n temp = numpy.append(self.values, self.values[-1] + pixel_size)\n else:\n temp = numpy.append(self.values[0] + pixel_size, self.values)\n return temp - (pixel_size / 2.0)\n\n def get_offset_for_subset(self, coordinate_variable):\n \"\"\"\n Find the offset index of coordinate_variable within this coordinate variable.\n This assumes that coordinate_variable is a subset of this one, and that coordinates and projections match.\n \"\"\"\n\n assert len(coordinate_variable) <= self.values.shape[0]\n #TODO: make this a fuzzy match within a certain decimal precision\n return list(self.values).index(coordinate_variable.values[0])\n\n\nclass SpatialCoordinateVariables(object):\n \"\"\"\n Encapsulates x and y coordinates with projection information\n \"\"\"\n\n def __init__(self, x, y, projection):\n assert isinstance(x, SpatialCoordinateVariable)\n assert isinstance(y, SpatialCoordinateVariable)\n if projection is not None:\n assert isinstance(projection, Proj)\n\n self.x = x\n self.y = y\n self.projection = projection\n\n @property\n def shape(self):\n return (len(self.y), len(self.x))\n\n @property\n def bbox(self):\n\n half_x_pixel_size = self.x.pixel_size / 2.0\n half_y_pixel_size = self.y.pixel_size / 2.0\n\n return BBox(\n (\n self.x.min - half_x_pixel_size,\n self.y.min - half_y_pixel_size,\n self.x.max + half_x_pixel_size,\n self.y.max + half_y_pixel_size\n ),\n self.projection\n )\n\n @property\n def affine(self):\n bbox = self.bbox\n\n return Affine(\n self.x.pixel_size,\n 0, # Not used\n bbox.xmin,\n 0, # Not used\n self.y.values[1] - self.y.values[0], # Negative if y is descending\n bbox.ymin if self.y.is_ascending_order() else bbox.ymax\n )\n\n @classmethod\n def from_dataset(cls, dataset, x_name='longitude', y_name='latitude', projection=None):\n \"\"\"\n Return a SpatialCoordinateVariables object for a dataset\n\n :param dataset: netCDF dataset\n :param x_varname: name of the x dimension\n :param y_varname: name of the y dimension\n :param projection: pyproj Proj object\n :return: CoordinateVariables instance\n \"\"\"\n\n #TODO: detect x and y names, and projection\n if projection is None and x_name == 'longitude':\n projection = Proj(PROJ4_GEOGRAPHIC)\n\n\n return cls(\n SpatialCoordinateVariable(dataset.variables[x_name]),\n SpatialCoordinateVariable(dataset.variables[y_name]),\n projection\n )\n\n @staticmethod\n def from_bbox(bbox, x_size, y_size, dtype='float32', y_ascending=False):\n \"\"\"\n Return a SpatialCoordinateVariables object from BBox and dimensions\n\n :param bbox: instance of a BBox, must have a projection\n :param x_size: number of pixels in x dimension (width or number of columns)\n :param y_size: number of pixels in y dimension (height or number of rows)\n :param dtype: data type (string or numpy dtype object) of values\n :param y_ascending: by default, y values are anchored from top left and are descending; if True, this inverts that order\n :return: CoordinateVariables instance, assuming that rows are ordered in decreasing value\n \"\"\"\n\n assert isinstance(bbox, BBox)\n if not bbox.projection:\n raise ValueError('bbox projection must be defined')\n\n x_pixel_size = (bbox.xmax - bbox.xmin) / float(x_size)\n y_pixel_size = (bbox.ymax - bbox.ymin) / float(y_size)\n\n x_arr = numpy.arange(x_size, dtype=dtype)\n x_arr *= x_pixel_size\n x_arr += (bbox.xmin + x_pixel_size / 2.0)\n\n if y_ascending:\n y_arr = numpy.arange(y_size, dtype=dtype)\n y_arr *= y_pixel_size\n y_arr += (bbox.ymin + y_pixel_size / 2.0)\n\n else:\n y_arr = numpy.arange(0, -y_size, -1, dtype=dtype)\n y_arr *= y_pixel_size\n y_arr += (bbox.ymax - y_pixel_size / 2.0)\n\n x = SpatialCoordinateVariable(x_arr)\n y = SpatialCoordinateVariable(y_arr)\n\n return SpatialCoordinateVariables(x, y, bbox.projection)\n\n def add_to_dataset(self, dataset, x_name, y_name, **kwargs):\n x_var = self.x.add_to_dataset(dataset, x_name, **kwargs)\n y_var = self.y.add_to_dataset(dataset, y_name, **kwargs)\n\n x_var.setncattr('axis', 'X')\n y_var.setncattr('axis', 'Y')\n\n if self.projection:\n if is_latlong(self.projection):\n x_var.setncattr('standard_name', 'longitude')\n x_var.setncattr('long_name', 'longitude')\n x_var.setncattr('units', 'degrees_east')\n y_var.setncattr('standard_name', 'latitude')\n y_var.setncattr('long_name', 'latitude')\n y_var.setncattr('units', 'degrees_north')\n\n else:\n x_var.setncattr('standard_name', 'projection_x_coordinate')\n x_var.setncattr('long_name', 'x coordinate of projection')\n y_var.setncattr('standard_name', 'projection_y_coordinate')\n y_var.setncattr('long_name', 'y coordinate of projection')\n\n\n def slice_by_bbox(self, bbox):\n assert isinstance(bbox, BBox)\n\n x_half_pixel_size = float(self.x.pixel_size)/2\n y_half_pixel_size = float(self.y.pixel_size)/2\n\n # Note: this is very sensitive to decimal precision.\n x = SpatialCoordinateVariable(\n self.x.slice_by_range(bbox.xmin + x_half_pixel_size, bbox.xmax - x_half_pixel_size)\n )\n y = SpatialCoordinateVariable(\n self.y.slice_by_range(bbox.ymin + y_half_pixel_size, bbox.ymax - y_half_pixel_size)\n )\n return SpatialCoordinateVariables(x, y, self.projection)\n\n def slice_by_window(self, window):\n assert isinstance(window, Window)\n\n x = SpatialCoordinateVariable(self.x.values[window.x_slice])\n y = SpatialCoordinateVariable(self.y.values[window.y_slice])\n return SpatialCoordinateVariables(x, y, self.projection)\n\n def get_window_for_subset(self, subset_coordinates):\n \"\"\"\n return a Window representing offsets of subset_coordinates self within subset_coordinates.\n Assumed to be in same projection, etc.\n\n :param subset_coordinates: the coordinates of the subset within self\n \"\"\"\n\n assert isinstance(subset_coordinates, SpatialCoordinateVariables)\n\n y_offset = self.y.get_offset_for_subset(subset_coordinates.y)\n x_offset = self.x.get_offset_for_subset(subset_coordinates.x)\n return Window((y_offset, len(subset_coordinates.y) + y_offset),\n (x_offset, len(subset_coordinates.x) + x_offset))\n\n def get_window_for_bbox(self, bbox):\n \"\"\"\n return a Window representing offsets of bbox within self\n :param bbox: instance of bounding box representing coordinates to use for Window\n :return: Window instance to extract data from within coordinate range of self\n \"\"\"\n\n assert isinstance(bbox, BBox)\n\n y_half_pixel_size = float(self.y.pixel_size)/2\n x_half_pixel_size = float(self.x.pixel_size)/2\n\n y_offset, y_max = self.y.indices_for_range(bbox.ymin + y_half_pixel_size, bbox.ymax - y_half_pixel_size)\n x_offset, x_max = self.x.indices_for_range(bbox.xmin + x_half_pixel_size, bbox.xmax - x_half_pixel_size)\n return Window((y_offset, y_max + 1), (x_offset, x_max + 1))\n\n\nclass DateVariable(CoordinateVariable):\n \"\"\"\n Provides utility wrapper of a date variable, especially when stored according to CF convention.\n If variable conforms to CF convention pattern (has units with 'since' in label and calendar) then\n dates are extracted and converted to python date objects.\n\n Dates are assumed to be sorted in ascending order.\n \"\"\"\n\n def __init__(self, input, units_start_date=date(2000, 1, 1), calendar='360_day'):\n \"\"\"\n Create from a variable with CF Convention units and calendar, or\n an array of years.\n\n If created from years, values are recorded on the first day of the month\n for each year, and are exported using units of days (years not allowed\n by CF convention. Lame).\n \"\"\"\n\n assert calendar in ('360_day', 'gregorian', 'standard', 'julian', '360', 'noleap')\n\n super(DateVariable, self).__init__(input)\n\n if isinstance(input, Variable):\n attributes = get_ncattrs(input)\n self.units = attributes.get('units', '').lower()\n self.calendar = attributes.get('calendar', '').lower()\n if self.units and self.calendar and 'since' in self.units.lower():\n self.dates = num2date(self.values, self.units, self.calendar)\n elif (self.units and 'year' in self.units) or 'year' in input._name.lower():\n self.dates = numpy.array([datetime(y, 1, 1, tzinfo=pytz.UTC) for y in self.values.astype('int')])\n else:\n raise ValueError('Variable is missing required attributes: units, calendar')\n else:\n self.units = 'year' if self.unit == 'year' else '{0}s since {1}'.format(self.unit, str(units_start_date))\n self.calendar = calendar\n\n if self.values.dtype.kind in ('i', 'u', 'f'):\n self.dates = numpy.array([datetime(y, 1, 1) for y in self.values])\n elif isinstance(self.values[0], datetime):\n self.dates = self.values.copy()\n\n if self.unit == 'year':\n self.values = numpy.array([x.year for x in self.values], dtype='int32')\n else:\n self.values = numpy.array(\n date2num(self.dates, units=self.units, calendar=self.calendar), dtype=numpy.int32\n )\n\n @property\n def datetimes(self):\n \"\"\"\n Convert to python datetimes if not done automatically (calendar not compatible with python datetimes).\n Use with caution\n \"\"\"\n\n if isinstance(self.dates[0], datetime):\n return self.dates\n else:\n return numpy.array([datetime(*d.timetuple()[:6], tzinfo=pytz.UTC) for d in self.dates])\n\n @property\n def unit(self):\n def varies_by_year(x, y):\n if y.year == x.year or (y - x).seconds != 0 or x.month != y.month or x.day != y.day:\n return False\n\n return True\n\n def varies_by_month(x, y):\n if x.month == y.month or (y - x).seconds != 0 or x.day != y.day:\n return False\n\n return True\n\n datetimes = self.datetimes if not self.values.dtype == datetime else self.values\n\n if all(varies_by_year(datetimes[i], datetimes[i-1]) for i in range(1, len(datetimes))):\n return 'year'\n elif all(varies_by_month(datetimes[i], datetimes[i-1]) for i in range(1, len(datetimes))):\n return 'month'\n\n deltas = datetimes[1:] - datetimes[:-1]\n\n for unit, seconds in (('day', 86400), ('hour', 3600), ('minute', 60), ('second', 1)):\n if any(x.seconds % seconds != 0 for x in deltas):\n continue\n break\n\n return unit\n\n def add_to_dataset(self, dataset, name, **kwargs):\n variable = super(DateVariable, self).add_to_dataset(dataset, name, **kwargs)\n for att in ('units', 'calendar'):\n variable.setncattr(att, getattr(self, att))\n\n def indices_for_range(self, start, stop):\n \"\"\"\n Returns the indices in this variable for the start and stop values. Data must be in ascending order\n :param start: start value. Can be a date object or a year.\n :param stop: stop value. Can be a date object or a year.\n :return: start and stop indices\n \"\"\"\n\n if not self.is_ascending_order():\n raise ValueError(\"Dates must be in ascending order\")\n\n if not isinstance(start, date):\n start = date(start, 1, 1)\n\n if not isinstance(stop, date):\n stop = date(stop, 12, 31)\n\n return numpy.searchsorted(self.dates, start), numpy.searchsorted(self.dates, stop)\n" ]
[ [ "numpy.array", "numpy.isclose", "numpy.arange", "numpy.append", "numpy.searchsorted" ] ]
VitaminBrad/AMICI
[ "d220128f262747ad9f15db83377e4f8a5d2006d3" ]
[ "python/amici/pandas.py" ]
[ "\"\"\"\nPandas Wrappers\n---------------\nThis modules contains convenience wrappers that allow for easy interconversion\nbetween C++ objects from :mod:`amici.amici` and pandas DataFrames\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport math\nimport copy\n\nfrom typing import List, Union, Optional, Dict, SupportsFloat\nfrom .numpy import ExpDataView\nimport amici\n\nExpDatas = Union[\n List[amici.amici.ExpData], List[amici.ExpDataPtr],\n amici.amici.ExpData, amici.ExpDataPtr\n]\nReturnDatas = Union[\n List[amici.ReturnDataView], amici.ReturnDataView\n]\n\nAmiciModel = Union[amici.ModelPtr, amici.Model]\n\n\ndef _process_edata_list(edata_list: ExpDatas) -> List[amici.amici.ExpData]:\n \"\"\"\n Maps single instances of :class:`amici.amici.ExpData` to lists of\n :class:`amici.amici.ExpData`\n\n :param edata_list:\n list of instances or single instance\n\n :return:\n list of instance(s)\n \"\"\"\n if isinstance(edata_list, (amici.amici.ExpData, amici.ExpDataPtr)):\n return [edata_list]\n else:\n return edata_list\n\n\ndef _process_rdata_list(rdata_list: ReturnDatas) -> List[amici.ReturnDataView]:\n \"\"\"\n Maps single instances of :class:`amici.ReturnData` to lists of\n :class:`amici.ReturnData`\n\n :param rdata_list:\n list of instances or single instance\n\n :return:\n list of instance(s)\n \"\"\"\n if isinstance(rdata_list, amici.ReturnDataView):\n return [rdata_list]\n else:\n return rdata_list\n\n\ndef getDataObservablesAsDataFrame(\n model: AmiciModel,\n edata_list: ExpDatas,\n by_id: Optional[bool] = False) -> pd.DataFrame:\n \"\"\"\n Write Observables from experimental data as DataFrame.\n\n :param model:\n Model instance.\n\n :param edata_list:\n list of ExpData instances with experimental data.\n May also be a single ExpData instance.\n\n :param by_id:\n If True, uses observable ids as column names in the generated\n DataFrame, otherwise the possibly more descriptive observable names\n are used.\n\n :return:\n pandas DataFrame with conditions/timepoints as rows and observables as\n columns.\n \"\"\"\n edata_list = _process_edata_list(edata_list)\n\n # list of all column names using either ids or names\n cols = _get_extended_observable_cols(model, by_id=by_id)\n\n # aggregate recrods\n dicts = []\n for edata in edata_list:\n npdata = ExpDataView(edata)\n for i_time, timepoint in enumerate(edata.getTimepoints()):\n datadict = {\n 'time': timepoint,\n 'datatype': 'data'\n }\n # add observables and noises\n for i_obs, obs in enumerate(_get_names_or_ids(\n model, 'Observable', by_id=by_id)):\n datadict[obs] = npdata['observedData'][i_time, i_obs]\n datadict[obs + '_std'] = \\\n npdata['observedDataStdDev'][i_time, i_obs]\n\n # add conditions\n _fill_conditions_dict(datadict, model, edata, by_id=by_id)\n\n dicts.append(datadict)\n\n return pd.DataFrame.from_records(dicts, columns=cols)\n\n\ndef getSimulationObservablesAsDataFrame(\n model: amici.Model,\n edata_list: ExpDatas,\n rdata_list: ReturnDatas,\n by_id: Optional[bool] = False\n) -> pd.DataFrame:\n \"\"\"\n Write Observables from simulation results as DataFrame.\n\n :param model:\n Model instance.\n\n :param edata_list:\n list of ExpData instances with experimental data.\n May also be a single ExpData instance.\n\n :param rdata_list:\n list of ReturnData instances corresponding to ExpData.\n May also be a single ReturnData instance.\n\n :param by_id:\n If True, ids are used as identifiers, otherwise the possibly more\n descriptive names.\n\n :return:\n pandas DataFrame with conditions/timepoints as rows and state\n variables as columns.\n \"\"\"\n edata_list = _process_edata_list(edata_list)\n rdata_list = _process_rdata_list(rdata_list)\n\n # list of all column names using either names or ids\n cols = _get_extended_observable_cols(model, by_id=by_id)\n\n # aggregate recrods\n dicts = []\n for edata, rdata in zip(edata_list, rdata_list):\n for i_time, timepoint in enumerate(rdata['t']):\n datadict = {\n 'time': timepoint,\n 'datatype': 'simulation',\n }\n # append simulations\n for i_obs, obs in enumerate(_get_names_or_ids(\n model, 'Observable', by_id=by_id)):\n datadict[obs] = rdata['y'][i_time, i_obs]\n datadict[obs + '_std'] = rdata['sigmay'][i_time, i_obs]\n\n # use edata to fill conditions columns\n _fill_conditions_dict(datadict, model, edata, by_id=by_id)\n\n # append to dataframe\n dicts.append(datadict)\n\n return pd.DataFrame.from_records(dicts, columns=cols)\n\n\ndef getSimulationStatesAsDataFrame(\n model: amici.Model,\n edata_list: ExpDatas,\n rdata_list: ReturnDatas,\n by_id: Optional[bool] = False) -> pd.DataFrame:\n \"\"\"\n Compute model residuals according to lists of ReturnData and ExpData.\n\n :param model:\n Model instance.\n\n :param edata_list:\n list of ExpData instances with experimental data.\n May also be a single ExpData instance.\n\n :param rdata_list:\n list of ReturnData instances corresponding to ExpData.\n May also be a single ReturnData instance.\n\n :param by_id:\n If True, ids are used as identifiers, otherwise the possibly more\n descriptive names.\n\n :return: pandas DataFrame with conditions/timpoints as rows and\n observables as columns.\n \"\"\"\n edata_list = _process_edata_list(edata_list)\n rdata_list = _process_rdata_list(rdata_list)\n\n # get conditions and state column names by name or id\n cols = _get_state_cols(model, by_id=by_id)\n\n # aggregate recrods\n dicts = []\n for edata, rdata in zip(edata_list, rdata_list):\n for i_time, timepoint in enumerate(rdata['t']):\n datadict = {\n 'time': timepoint,\n }\n\n # append states\n for i_state, state in enumerate(\n _get_names_or_ids(model, 'State', by_id=by_id)):\n datadict[state] = rdata['x'][i_time, i_state]\n\n # use data to fill condition columns\n _fill_conditions_dict(datadict, model, edata, by_id=by_id)\n\n # append to dataframe\n dicts.append(datadict)\n\n return pd.DataFrame.from_records(dicts, columns=cols)\n\n\ndef getResidualsAsDataFrame(model: amici.Model,\n edata_list: ExpDatas,\n rdata_list: ReturnDatas,\n by_id: Optional[bool] = False) -> pd.DataFrame:\n \"\"\"\n Convert a list of ExpData to pandas DataFrame.\n\n :param model:\n Model instance.\n\n :param edata_list:\n list of ExpData instances with experimental data. May also be a\n single ExpData instance.\n\n :param rdata_list:\n list of ReturnData instances corresponding to ExpData. May also be a\n single ReturnData instance.\n\n :param by_id: bool, optional (default = False)\n If True, ids are used as identifiers, otherwise the possibly more\n descriptive names.\n\n :return:\n pandas DataFrame with conditions and observables.\n \"\"\"\n edata_list = _process_edata_list(edata_list)\n rdata_list = _process_rdata_list(rdata_list)\n\n # create observable and simulation dataframes\n df_edata = getDataObservablesAsDataFrame(\n model, edata_list, by_id=by_id)\n df_rdata = getSimulationObservablesAsDataFrame(\n model, edata_list, rdata_list, by_id=by_id)\n\n # get all column names using names or ids\n cols = _get_observable_cols(model, by_id=by_id)\n\n # aggregate recrods\n dicts = []\n for row in df_rdata.index:\n datadict = {\n 'time': df_rdata.loc[row]['time'],\n 't_presim': df_rdata.loc[row]['t_presim']\n }\n\n # iterate over observables\n for obs in _get_names_or_ids(model, 'Observable', by_id=by_id):\n # compute residual and append to dict\n datadict[obs] = abs(\n (df_edata.loc[row][obs] - df_rdata.loc[row][obs]) /\n df_rdata.loc[row][obs + '_std'])\n\n # iterate over fixed parameters\n for par in _get_names_or_ids(model, 'FixedParameter', by_id=by_id):\n # fill in conditions\n datadict[par] = df_rdata.loc[row][par]\n datadict[par + '_preeq'] = df_rdata.loc[row][par + '_preeq']\n datadict[par + '_presim'] = df_rdata.loc[row][par + '_presim']\n\n # append to dataframe\n dicts.append(datadict)\n\n return pd.DataFrame.from_records(dicts, columns=cols)\n\n\ndef _fill_conditions_dict(datadict: Dict[str, float],\n model: AmiciModel,\n edata: amici.amici.ExpData,\n by_id: bool) -> Dict[str, float]:\n \"\"\"\n Helper function that fills in condition parameters from model and\n edata.\n\n :param datadict:\n dictionary in which condition parameters will be inserted\n as key value pairs.\n\n :param model:\n Model instance.\n\n :param edata:\n ExpData instance.\n\n :param by_id:\n If True, ids are used as identifiers, otherwise the possibly more\n descriptive names.\n\n :return:\n dictionary with filled condition parameters.\n\n \"\"\"\n datadict['t_presim'] = edata.t_presim\n\n for i_par, par in enumerate(\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id)):\n if len(edata.fixedParameters):\n datadict[par] = edata.fixedParameters[i_par]\n else:\n datadict[par] = model.getFixedParameters()[i_par]\n\n if len(edata.fixedParametersPreequilibration):\n datadict[par + '_preeq'] = \\\n edata.fixedParametersPreequilibration[i_par]\n else:\n datadict[par + '_preeq'] = np.nan\n\n if len(edata.fixedParametersPresimulation):\n datadict[par + '_presim'] = \\\n edata.fixedParametersPresimulation[i_par]\n else:\n datadict[par + '_presim'] = np.nan\n return datadict\n\n\ndef _get_extended_observable_cols(model: AmiciModel,\n by_id: bool) -> List[str]:\n \"\"\"\n Construction helper for extended observable dataframe headers.\n\n :param model:\n Model instance.\n\n :param by_id:\n If True, ids are used as identifiers, otherwise the possibly more\n descriptive names.\n\n :return:\n column names as list.\n \"\"\"\n return \\\n ['time', 'datatype', 't_presim'] + \\\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id) + \\\n [name + '_preeq' for name in\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id)] + \\\n [name + '_presim' for name in\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id)] + \\\n _get_names_or_ids(model, 'Observable', by_id=by_id) + \\\n [name + '_std' for name in\n _get_names_or_ids(model, 'Observable', by_id=by_id)]\n\n\ndef _get_observable_cols(model: AmiciModel,\n by_id: bool) -> List[str]:\n \"\"\"\n Construction helper for observable dataframe headers.\n\n :param model:\n Model instance.\n\n :param by_id:\n If True, ids are used as identifiers, otherwise the possibly more\n descriptive names.\n\n :return:\n column names as list.\n \"\"\"\n return \\\n ['time', 't_presim'] + \\\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id) + \\\n [name + '_preeq' for name in\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id)] + \\\n [name + '_presim' for name in\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id)] + \\\n _get_names_or_ids(model, 'Observable', by_id=by_id)\n\n\ndef _get_state_cols(model: AmiciModel,\n by_id: bool) -> List[str]:\n \"\"\"\n Construction helper for state dataframe headers.\n\n :param model:\n Model instance.\n\n :param by_id:\n If True, ids are used as identifiers, otherwise the possibly more\n descriptive names.\n\n :return:\n column names as list.\n \"\"\"\n return \\\n ['time', 't_presim'] + \\\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id) + \\\n [name + '_preeq' for name in\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id)] + \\\n [name + '_presim' for name in\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id)] + \\\n _get_names_or_ids(model, 'State', by_id=by_id)\n\n\ndef _get_names_or_ids(model: AmiciModel,\n variable: str,\n by_id: bool) -> List[str]:\n \"\"\"\n Obtains a unique list of identifiers for the specified variable.\n First tries model.getVariableNames and then uses model.getVariableIds.\n \n :param model:\n Model instance.\n \n :param variable:\n variable name.\n \n :param by_id:\n If True, ids are used as identifiers, otherwise first the possibly\n more descriptive names are used.\n\n :return:\n column names as list.\n \"\"\"\n # check whether variable type permitted\n variable_options = ['Parameter', 'FixedParameter', 'Observable', 'State']\n if variable not in variable_options:\n raise ValueError('Variable must be in ' + str(variable_options))\n\n # extract attributes\n names = list(getattr(model, f'get{variable}Names')())\n ids = list(getattr(model, f'get{variable}Ids')())\n\n # find out if model has names and ids\n has_names = getattr(model, f'has{variable}Names')()\n has_ids = getattr(model, f'has{variable}Ids')()\n\n # extract labels\n if not by_id and has_names and len(set(names)) == len(names):\n # use variable names\n return names\n elif has_ids:\n # use variable ids\n return ids\n else:\n # unable to create unique labels\n if by_id:\n msg = f\"Model {variable} ids are not set.\"\n else:\n msg = f\"Model {variable} names are not unique and \" \\\n f\"{variable} ids are not set.\"\n raise ValueError(msg)\n\n\ndef _get_specialized_fixed_parameters(\n model: AmiciModel,\n condition: Union[Dict[str, SupportsFloat], pd.Series],\n overwrite: Union[Dict[str, SupportsFloat], pd.Series],\n by_id: bool\n) -> List[float]:\n \"\"\"\n Copies values in condition and overwrites them according to key\n value pairs specified in overwrite.\n\n :param model:\n Model instance.\n :param condition:\n fixedParameter values.\n :param overwrite:\n dict specifying which values in condition are to be replaced.\n :param by_id:\n bool\n If True, ids are used as identifiers, otherwise the possibly more\n descriptive names.\n\n :return:\n overwritten FixedParameter as list.\n\n Raises:\n\n \"\"\"\n cond = copy.deepcopy(condition)\n for field in overwrite:\n cond[field] = overwrite[field]\n return [float(cond[name]) for name in _get_names_or_ids(\n model, 'FixedParameter', by_id=by_id)]\n\n\ndef constructEdataFromDataFrame(\n df: pd.DataFrame,\n model: AmiciModel,\n condition: pd.Series,\n by_id: Optional[bool] = False\n) -> amici.amici.ExpData:\n \"\"\"\n Constructs an ExpData instance according to the provided Model\n and DataFrame.\n\n :param df:\n pd.DataFrame with Observable Names/Ids as columns.\n Standard deviations may be specified by appending '_std' as suffix.\n\n :param model:\n Model instance.\n\n :param condition:\n pd.Series with FixedParameter Names/Ids as columns.\n Preequilibration conditions may be specified by appending\n '_preeq' as suffix. Presimulation conditions may be specified by\n appending '_presim' as suffix.\n\n :param by_id:\n Indicate whether in the arguments, column headers are based on ids or\n names. This should correspond to the way `df` and `condition` was\n created in the first place.\n\n :return:\n ExpData instance.\n \"\"\"\n # initialize edata\n edata = amici.ExpData(model.get())\n\n # timepoints\n df = df.sort_values(by='time', ascending=True)\n edata.setTimepoints(df['time'].values.astype(float))\n\n # get fixed parameters from condition\n overwrite_preeq = {}\n overwrite_presim = {}\n for par in list(_get_names_or_ids(model, 'FixedParameter', by_id=by_id)):\n if par + '_preeq' in condition.keys() \\\n and not math.isnan(condition[par + '_preeq']):\n overwrite_preeq[par] = condition[par + '_preeq']\n if par + '_presim' in condition.keys() \\\n and not math.isnan(condition[par + '_presim']):\n overwrite_presim[par] = condition[par + '_presim']\n\n # fill in fixed parameters\n edata.fixedParameters = condition[\n _get_names_or_ids(model, 'FixedParameter', by_id=by_id)\n ].values\n\n # fill in preequilibration parameters\n if any([overwrite_preeq[key] != condition[key] for key in\n overwrite_preeq.keys()]):\n edata.fixedParametersPreequilibration = \\\n _get_specialized_fixed_parameters(\n model, condition, overwrite_preeq, by_id=by_id)\n elif len(overwrite_preeq.keys()):\n edata.fixedParametersPreequilibration = copy.deepcopy(\n edata.fixedParameters\n )\n\n # fill in presimulation parameters\n if any([overwrite_presim[key] != condition[key] for key in\n overwrite_presim.keys()]):\n edata.fixedParametersPresimulation = _get_specialized_fixed_parameters(\n model, condition, overwrite_presim, by_id=by_id\n )\n elif len(overwrite_presim.keys()):\n edata.fixedParametersPresimulation = copy.deepcopy(\n edata.fixedParameters\n )\n\n # fill in presimulation time\n if 't_presim' in condition.keys():\n edata.t_presim = float(condition['t_presim'])\n\n # fill in data and stds\n for obs_index, obs in enumerate(\n _get_names_or_ids(model, 'Observable', by_id=by_id)):\n if obs in df.keys():\n edata.setObservedData(df[obs].values.astype(float), obs_index)\n if obs + '_std' in df.keys():\n edata.setObservedDataStdDev(\n df[obs + '_std'].values.astype(float), obs_index\n )\n\n return edata\n\n\ndef getEdataFromDataFrame(\n model: AmiciModel,\n df: pd.DataFrame,\n by_id: Optional[bool] = False\n) -> List[amici.amici.ExpData]:\n \"\"\"\n Constructs a ExpData instance according to the provided Model and\n DataFrame.\n\n :param df:\n dataframe with Observable Names/Ids, FixedParameter Names/Ids\n and time as columns. Standard deviations may be specified by\n appending '_std' as suffix. Preequilibration fixedParameters may be\n specified by appending '_preeq' as suffix. Presimulation\n fixedParameters may be specified by appending '_presim' as suffix.\n Presimulation time may be specified as 't_presim' column.\n\n :param model:\n Model instance.\n\n :param by_id:\n Whether the column names in `df` are based on ids or names,\n corresponding to how the dataframe was created in the first place.\n\n :return:\n list of ExpData instances.\n \"\"\"\n edata_list = []\n\n # aggregate features that define a condition\n\n # fixed parameters\n condition_parameters = _get_names_or_ids(model, 'FixedParameter',\n by_id=by_id)\n # preeq and presim parameters\n for par in _get_names_or_ids(model, 'FixedParameter', by_id=by_id):\n if par + '_preeq' in df.columns:\n condition_parameters.append(par + '_preeq')\n if par + '_presim' in df.columns:\n condition_parameters.append(par + '_presim')\n # presimulation time\n if 't_presim' in df.columns:\n condition_parameters.append('t_presim')\n # drop duplicates to create final conditions\n conditions = df[condition_parameters].drop_duplicates()\n\n # iterate over conditions\n for ir, row in conditions.iterrows():\n # subselect rows that match condition\n selected = np.ones((len(df),), dtype=bool)\n for par_label, par in row.iteritems():\n if math.isnan(par):\n selected = selected & np.isnan(df[par_label].values)\n else:\n selected = selected & (df[par_label] == par)\n edata_df = df[selected]\n\n edata_list.append(\n constructEdataFromDataFrame(edata_df, model, row, by_id=by_id)\n )\n\n return edata_list\n" ]
[ [ "pandas.DataFrame.from_records", "numpy.isnan" ] ]
bklebel/keras
[ "f2af6049de7152fde27c54b8f7b0925793e049a5" ]
[ "tests/keras/backend/backend_test.py" ]
[ "import pytest\nfrom numpy.testing import assert_allclose\nimport numpy as np\nimport scipy.signal as signal\nimport scipy.sparse as sparse\nimport warnings\nfrom keras.utils.test_utils import keras_test\n\nfrom keras import backend as K\nfrom keras.backend import floatx, set_floatx, variable\nfrom keras.utils.conv_utils import convert_kernel\n\nBACKENDS = [] # Holds a list of all available back-ends\n\ntry:\n from keras.backend import cntk_backend as KC\n BACKENDS.append(KC)\nexcept ImportError:\n KC = None\n warnings.warn('Could not import the CNTK backend')\n\ntry:\n from keras.backend import tensorflow_backend as KTF\n BACKENDS.append(KTF)\nexcept ImportError:\n KTF = None\n warnings.warn('Could not import the TensorFlow backend.')\n\ntry:\n from keras.backend import theano_backend as KTH\n BACKENDS.append(KTH)\nexcept ImportError:\n KTH = None\n warnings.warn('Could not import the Theano backend')\n\n\ndef check_dtype(var, dtype):\n if K._BACKEND == 'theano':\n assert var.dtype == dtype\n else:\n assert var.dtype.name == '%s_ref' % dtype\n\n\ndef cntk_func_single_tensor(function_name, x_shape, **kwargs):\n xc = KC.placeholder(x_shape)\n output_cntk = getattr(KC, function_name)(xc, **kwargs)\n return KC.function([xc], [output_cntk])\n\n\ndef cntk_func_two_tensor(function_name, x_shape, y, **kwargs):\n if isinstance(y, (np.generic, np.ndarray)):\n xc = KC.placeholder(x_shape)\n output_cntk = getattr(KC, function_name)(xc, KC.variable(y), **kwargs)\n return KC.function([xc], [output_cntk])\n else:\n xc = KC.placeholder(ndim=len(x_shape))\n yc = KC.placeholder(y)\n output_cntk = getattr(KC, function_name)(xc, yc, **kwargs)\n return KC.function([xc, yc], [output_cntk])\n\n\ndef cntk_func_three_tensor(function_name, x_shape, y, z, **kwargs):\n xc = KC.placeholder(x_shape)\n output_cntk = getattr(KC, function_name)(xc, KC.variable(y), KC.variable(z), **kwargs)\n return KC.function([xc], [output_cntk])\n\n\ndef parse_shape_or_val(shape_or_val):\n if isinstance(shape_or_val, np.ndarray):\n return shape_or_val.shape, shape_or_val\n else:\n return shape_or_val, np.random.random(shape_or_val).astype(np.float32) - 0.5\n\n\ndef assert_list_pairwise(z_list, shape=True, allclose=True, itself=False, atol=1e-05):\n for (z1, z2) in zip(z_list[1:], z_list[:-1]):\n if shape:\n assert z1.shape == z2.shape\n if allclose:\n assert_allclose(z1, z2, atol=atol)\n if itself:\n assert z1 == z2\n\n\ndef assert_list_with_ref(z_list, ref):\n for z in z_list:\n assert z.shape == ref.shape\n assert_allclose(z, ref, atol=1e-05)\n\n\ndef assert_list_keras_shape(z_list):\n for z in z_list:\n if hasattr(z, '_keras_shape'):\n assert z._keras_shape == z.shape\n\n\n@keras_test\ndef check_single_tensor_operation(function_name, x_shape_or_val, backend_list, **kwargs):\n shape_or_val = kwargs.pop('shape_or_val', True)\n assert_value_equality = kwargs.pop('assert_value_equality', True)\n assert_value_with_ref = kwargs.pop('assert_value_with_ref', None)\n cntk_dynamicity = kwargs.pop('cntk_dynamicity', False)\n return_results = kwargs.pop('return_results', False)\n\n if shape_or_val:\n x_shape, x_val = parse_shape_or_val(x_shape_or_val)\n\n z_list = []\n for k in backend_list:\n if shape_or_val:\n if (k == KC) & (cntk_dynamicity):\n z = cntk_func_single_tensor(function_name, x_shape,\n **kwargs)([x_val])[0]\n else:\n z = k.eval(getattr(k, function_name)(k.variable(x_val), **kwargs))\n else:\n z = k.eval(getattr(k, function_name)(x_shape_or_val, **kwargs))\n z_list += [z]\n\n if return_results:\n if len(z_list) > 1:\n return z_list\n else:\n return z_list[0]\n\n if assert_value_with_ref is not None:\n assert_list_with_ref(z_list, assert_value_with_ref)\n else:\n assert_list_pairwise(z_list, allclose=assert_value_equality)\n assert_list_keras_shape(z_list)\n\n\n@keras_test\ndef check_two_tensor_operation(function_name, x_shape_or_val,\n y_shape_or_val, backend_list, **kwargs):\n shape_or_val = kwargs.pop('shape_or_val', True)\n concat_args = kwargs.pop('concat_args', False)\n cntk_dynamicity = kwargs.pop('cntk_dynamicity', False)\n cntk_two_dynamicity = kwargs.pop('cntk_two_dynamicity', False)\n return_results = kwargs.pop('return_results', False)\n\n if shape_or_val:\n x_shape, x_val = parse_shape_or_val(x_shape_or_val)\n y_shape, y_val = parse_shape_or_val(y_shape_or_val)\n\n z_list = []\n for k in backend_list:\n if shape_or_val:\n if (k == KC) & (cntk_dynamicity):\n z = cntk_func_two_tensor(function_name, x_shape,\n y=y_val, **kwargs)([x_val])[0]\n elif (k == KC) & (cntk_two_dynamicity):\n z = cntk_func_two_tensor(function_name, x_shape,\n y=y_shape, **kwargs)([x_val, y_val])[0]\n elif (k == KTH) & (function_name[:4] == 'conv'):\n z = k.eval(getattr(k, function_name)(\n k.variable(x_val), k.variable(convert_kernel(y_val)), **kwargs))\n elif concat_args:\n z = k.eval(getattr(k, function_name)(\n [k.variable(x_val), k.variable(y_val)], **kwargs))\n else:\n z = k.eval(getattr(k, function_name)(\n k.variable(x_val), k.variable(y_val), **kwargs))\n else:\n z = k.eval(getattr(k, function_name)(\n x_shape_or_val, y_shape_or_val, **kwargs))\n z_list += [z]\n\n if return_results:\n if len(z_list) > 1:\n return z_list\n else:\n return z_list[0]\n\n assert_list_pairwise(z_list)\n assert_list_keras_shape(z_list)\n\n\n@keras_test\ndef check_composed_tensor_operations(first_function_name, first_function_args,\n second_function_name, second_function_args,\n input_shape, backend_list):\n val = np.random.random(input_shape) - 0.5\n\n z_list = []\n for k in backend_list:\n x = k.variable(val)\n y = getattr(k, first_function_name)(x, **first_function_args)\n z = k.eval(getattr(k, second_function_name)(y, **second_function_args))\n z_list += [z]\n\n assert_list_pairwise(z_list)\n\n\ndef normalize_ref_conv(func):\n def wrapper(*args):\n x = args[0]\n w = args[1]\n if x.ndim == 3:\n w = np.flipud(w)\n w = np.transpose(w, (1, 2, 0))\n if args[3] == 'channels_last':\n x = np.transpose(x, (0, 2, 1))\n elif x.ndim == 4:\n w = np.fliplr(np.flipud(w))\n w = np.transpose(w, (2, 3, 0, 1))\n if args[3] == 'channels_last':\n x = np.transpose(x, (0, 3, 1, 2))\n else:\n w = np.flip(np.fliplr(np.flipud(w)), axis=2)\n w = np.transpose(w, (3, 4, 0, 1, 2))\n if args[3] == 'channels_last':\n x = np.transpose(x, (0, 4, 1, 2, 3))\n\n y = func(x, w, args[2], args[3])\n\n if args[3] == 'channels_last':\n if y.ndim == 3:\n y = np.transpose(y, (0, 2, 1))\n elif y.ndim == 4:\n y = np.transpose(y, (0, 2, 3, 1))\n else:\n y = np.transpose(y, (0, 2, 3, 4, 1))\n\n return y\n\n return wrapper\n\n\n@normalize_ref_conv\ndef ref_conv(x, w, padding, data_format):\n y = []\n for i in range(x.shape[0]):\n _y = []\n for j in range(w.shape[1]):\n __y = []\n for k in range(w.shape[0]):\n __y.append(signal.convolve(x[i, k], w[k, j], mode=padding))\n _y.append(np.sum(np.stack(__y, axis=-1), axis=-1))\n y.append(_y)\n y = np.array(y)\n return y\n\n\n@normalize_ref_conv\ndef ref_depthwise_conv(x, w, padding, data_format):\n y = []\n for i in range(x.shape[0]):\n _y = []\n for j in range(w.shape[0]):\n __y = []\n for k in range(w.shape[1]):\n __y.append(signal.convolve(x[i, j], w[j, k], mode=padding))\n _y.append(np.stack(__y, axis=0))\n y.append(np.concatenate(_y, axis=0))\n y = np.array(y)\n return y\n\n\ndef ref_separable_conv(x, w1, w2, padding, data_format):\n x2 = ref_depthwise_conv(x, w1, padding, data_format)\n return ref_conv(x2, w2, padding, data_format)\n\n\ndef ref_pool(x, pool_size, strides, padding, data_format, pool_mode):\n if data_format == 'channels_last':\n if x.ndim == 3:\n x = np.transpose(x, (0, 2, 1))\n elif x.ndim == 4:\n x = np.transpose(x, (0, 3, 1, 2))\n else:\n x = np.transpose(x, (0, 4, 1, 2, 3))\n\n if padding == 'same':\n pad = [(0, 0), (0, 0)] + [(s // 2, s // 2) for s in pool_size]\n x = np.pad(x, pad, 'constant', constant_values=-np.inf)\n\n # indexing trick\n x = np.pad(x, [(0, 0), (0, 0)] + [(0, 1) for _ in pool_size],\n 'constant', constant_values=0)\n\n if x.ndim == 3:\n y = [x[:, :, k:k1:strides[0]]\n for (k, k1) in zip(range(pool_size[0]), range(-pool_size[0], 0))]\n elif x.ndim == 4:\n y = []\n for (k, k1) in zip(range(pool_size[0]), range(-pool_size[0], 0)):\n for (l, l1) in zip(range(pool_size[1]), range(-pool_size[1], 0)):\n y.append(x[:, :, k:k1:strides[0], l:l1:strides[1]])\n else:\n y = []\n for (k, k1) in zip(range(pool_size[0]), range(-pool_size[0], 0)):\n for (l, l1) in zip(range(pool_size[1]), range(-pool_size[1], 0)):\n for (m, m1) in zip(range(pool_size[2]), range(-pool_size[2], 0)):\n y.append(x[:, :, k:k1:strides[0], l:l1:strides[1], m:m1:strides[2]])\n y = np.stack(y, axis=-1)\n if pool_mode == 'avg':\n y = np.mean(np.ma.masked_invalid(y), axis=-1).data\n elif pool_mode == 'max':\n y = np.max(y, axis=-1)\n\n if data_format == 'channels_last':\n if y.ndim == 3:\n y = np.transpose(y, (0, 2, 1))\n elif y.ndim == 4:\n y = np.transpose(y, (0, 2, 3, 1))\n else:\n y = np.transpose(y, (0, 2, 3, 4, 1))\n\n return y\n\n\ndef ref_rnn(x, w, init, go_backwards=False, mask=None, unroll=False, input_length=None):\n w_i, w_h, w_o = w\n h = []\n o = []\n\n if go_backwards:\n t_list = range(x.shape[1] - 1, -1, -1)\n else:\n t_list = range(x.shape[1])\n\n if mask is not None:\n np_mask = K.eval(mask)\n else:\n np_mask = None\n\n for (i, t) in enumerate(t_list):\n h_t = np.dot(x[:, t], w_i)\n\n if w_h is not None:\n prev = h[i - 1] if i > 0 else init\n h_t1 = np.dot(prev, w_h)\n if np_mask is not None:\n h_t1[np_mask[:, t] == 0] = prev[np_mask[:, t] == 0]\n else:\n h_t1 = 0\n\n o_t = h_t + h_t1\n if w_o is not None:\n o_t = np.dot(o_t, w_o)\n o.append(o_t)\n\n if np_mask is not None:\n h_t = h_t * np_mask[:, t].reshape(-1, 1)\n h.append(h_t + h_t1)\n\n return o[-1], np.stack(o, axis=1), np.stack(h, axis=1)\n\n\nclass TestBackend(object):\n\n def test_is_keras_tensor(self):\n for k in BACKENDS:\n np_var = np.array([1, 2])\n with pytest.raises(ValueError):\n k.is_keras_tensor(np_var)\n\n keras_var = k.variable(np_var)\n assert k.is_keras_tensor(keras_var) is False\n keras_placeholder = k.placeholder(shape=(2, 4, 5))\n assert k.is_keras_tensor(keras_placeholder) is False\n\n def test_set_learning_phase(self):\n # not supported learning_phase\n for k in BACKENDS:\n with pytest.raises(ValueError):\n k.set_learning_phase(2)\n\n def test_eye(self):\n z_list = [k.eval(k.eye(3)) for k in BACKENDS]\n assert_list_pairwise(z_list)\n\n def test_linear_operations(self):\n check_two_tensor_operation('dot', (4, 2), (2, 4), BACKENDS)\n check_two_tensor_operation('dot', (4, 2), (5, 2, 3), BACKENDS)\n\n check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),\n BACKENDS, cntk_two_dynamicity=True, axes=(2, 2))\n check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),\n BACKENDS, cntk_two_dynamicity=True, axes=(2, 1))\n check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),\n BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))\n check_two_tensor_operation('batch_dot', (32, 20), (32, 20),\n BACKENDS, cntk_two_dynamicity=True, axes=1)\n check_two_tensor_operation('batch_dot', (32, 20), (32, 20),\n BACKENDS, cntk_two_dynamicity=True, axes=(1, 1))\n\n check_single_tensor_operation('transpose', (4, 2), BACKENDS)\n check_single_tensor_operation('reverse', (4, 3, 2), BACKENDS, axes=1)\n check_single_tensor_operation('reverse', (4, 3, 2), [KTH, KTF], axes=(1, 2))\n\n def test_random_variables(self):\n check_single_tensor_operation('random_uniform_variable', (2, 3), BACKENDS,\n low=0., high=1.,\n shape_or_val=False, assert_value_equality=False)\n check_single_tensor_operation('random_normal_variable', (2, 3), BACKENDS,\n mean=0., scale=1.,\n shape_or_val=False, assert_value_equality=False)\n\n @pytest.mark.skipif(K.backend() != 'tensorflow', reason='Not supported.')\n def test_batch_dot_shape(self):\n x_batch = K.ones(shape=(32, 20))\n y_batch = K.ones(shape=(32, 20))\n xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=1)\n assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)\n xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=0)\n assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)\n # making sure swapping axes when ndim == 2 works\n x_batch = K.ones(shape=(32, 20))\n y_batch = K.ones(shape=(20, 32))\n xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(0, 1))\n assert_allclose(K.eval(xy_batch_dot), np.ones((20, 1)) * 32, atol=1e-05)\n xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(1, 0))\n assert_allclose(K.eval(xy_batch_dot), np.ones((32, 1)) * 20, atol=1e-05)\n\n def test_shape_operations(self):\n check_two_tensor_operation('concatenate', (4, 3), (4, 2), BACKENDS,\n axis=-1, concat_args=True)\n\n check_single_tensor_operation('reshape', (4, 2), BACKENDS, shape=(8, 1))\n check_single_tensor_operation('permute_dimensions', (4, 2, 3), BACKENDS,\n pattern=(2, 0, 1))\n check_single_tensor_operation('repeat', (4, 1), BACKENDS, n=3)\n check_single_tensor_operation('flatten', (4, 1), BACKENDS)\n check_single_tensor_operation('batch_flatten', (20, 2, 5), BACKENDS,\n cntk_dynamicity=True)\n check_single_tensor_operation('expand_dims', (4, 3), BACKENDS, axis=-1)\n check_single_tensor_operation('expand_dims', (4, 3, 2), BACKENDS, axis=1)\n check_single_tensor_operation('squeeze', (4, 3, 1), BACKENDS, axis=2)\n check_single_tensor_operation('squeeze', (4, 1, 1), BACKENDS, axis=1)\n check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},\n 'squeeze', {'axis': 2},\n (4, 3, 1, 1), BACKENDS)\n\n def test_none_shape_operations(self):\n # Test shape inference when input\n # shape has `None` entries\n if K.backend() == 'theano':\n x = KTH.placeholder((3, None, 4))\n\n y = KTH.batch_flatten(x)\n if hasattr(y, '_keras_shape'):\n assert y._keras_shape == (3, None)\n\n y = KTH.flatten(x)\n if hasattr(y, '_keras_shape'):\n assert y._keras_shape == (None, )\n\n def test_repeat_elements(self):\n reps = 3\n for ndims in [1, 2, 3]:\n shape = np.arange(2, 2 + ndims)\n arr = np.arange(np.prod(shape)).reshape(shape)\n\n for rep_axis in range(ndims):\n np_rep = np.repeat(arr, reps, axis=rep_axis)\n check_single_tensor_operation('repeat_elements', arr, BACKENDS,\n rep=reps, axis=rep_axis,\n assert_value_with_ref=np_rep)\n\n if K.backend() != 'cntk':\n shape = list(shape)\n shape[rep_axis] = None\n x = K.placeholder(shape=shape)\n y = K.repeat_elements(x, reps, axis=rep_axis)\n assert y._keras_shape == tuple(shape)\n assert y._keras_shape == K.int_shape(y)\n\n def test_tile(self):\n shape = (3, 4)\n arr = np.arange(np.prod(shape)).reshape(shape)\n check_single_tensor_operation('tile', arr, BACKENDS, n=[2, 1])\n check_single_tensor_operation('tile', (2, 5), BACKENDS, n=[5, 2])\n\n # test theano shape inference when\n # input shape has None entries\n if K.backend() == 'theano':\n x = K.placeholder(shape=(None, 4))\n n = 2\n y = K.tile(x, n)\n assert y._keras_shape == (None, 8)\n n = (4, 3)\n y = K.tile(x, n)\n assert y._keras_shape == (None, 12)\n\n def test_gather(self):\n shape = (10, 2, 3)\n ref = np.arange(np.prod(shape)).reshape(shape)\n inds = [1, 3, 7, 9]\n z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))\n for k in BACKENDS]\n\n assert_list_pairwise(z_list)\n assert_list_keras_shape(z_list)\n\n # test theano shape inference when\n # input shape has None entries\n if K.backend() == 'theano':\n x = K.placeholder(shape=(None, 3, 4))\n indices = K.placeholder(shape=(5, 6), dtype='int32')\n y = K.gather(x, indices)\n assert y._keras_shape == (5, 6, 3, 4)\n\n def test_value_manipulation(self):\n val = np.random.random((4, 2))\n for function_name in ['get_value', 'count_params',\n 'int_shape', 'get_variable_shape']:\n v_list = [getattr(k, function_name)(k.variable(val))\n for k in BACKENDS]\n\n if function_name == 'get_value':\n assert_list_pairwise(v_list)\n else:\n assert_list_pairwise(v_list, shape=False, allclose=False, itself=True)\n\n # print_tensor\n check_single_tensor_operation('print_tensor', (), BACKENDS)\n check_single_tensor_operation('print_tensor', (2,), BACKENDS)\n check_single_tensor_operation('print_tensor', (4, 3), BACKENDS)\n check_single_tensor_operation('print_tensor', (1, 2, 3), BACKENDS)\n\n def test_elementwise_operations(self):\n check_single_tensor_operation('max', (4, 2), BACKENDS)\n check_single_tensor_operation('max', (4, 2), BACKENDS, axis=1, keepdims=True)\n\n check_single_tensor_operation('min', (4, 2), BACKENDS)\n check_single_tensor_operation('min', (4, 2), BACKENDS, axis=1, keepdims=True)\n check_single_tensor_operation('min', (4, 2, 3), BACKENDS, axis=[1, -1])\n\n check_single_tensor_operation('mean', (4, 2), BACKENDS)\n check_single_tensor_operation('mean', (4, 2), BACKENDS, axis=1, keepdims=True)\n check_single_tensor_operation('mean', (4, 2, 3), BACKENDS, axis=-1, keepdims=True)\n check_single_tensor_operation('mean', (4, 2, 3), BACKENDS, axis=[1, -1])\n\n check_single_tensor_operation('std', (4, 2), BACKENDS)\n check_single_tensor_operation('std', (4, 2), BACKENDS, axis=1, keepdims=True)\n check_single_tensor_operation('std', (4, 2, 3), BACKENDS, axis=[1, -1])\n\n check_single_tensor_operation('prod', (4, 2), BACKENDS)\n check_single_tensor_operation('prod', (4, 2), BACKENDS, axis=1, keepdims=True)\n check_single_tensor_operation('prod', (4, 2, 3), BACKENDS, axis=[1, -1])\n\n # cntk does not support cumsum and cumprod yet\n check_single_tensor_operation('cumsum', (4, 2), [KTF, KTH])\n check_single_tensor_operation('cumsum', (4, 2), [KTF, KTH], axis=1)\n\n check_single_tensor_operation('cumprod', (4, 2), [KTF, KTH])\n check_single_tensor_operation('cumprod', (4, 2), [KTF, KTH], axis=1)\n\n check_single_tensor_operation('any', (4, 2), BACKENDS)\n check_single_tensor_operation('any', (4, 2), BACKENDS, axis=1, keepdims=True)\n\n check_single_tensor_operation('all', (4, 2), BACKENDS)\n check_single_tensor_operation('all', (4, 2), BACKENDS, axis=1, keepdims=True)\n\n check_single_tensor_operation('argmax', (4, 2), BACKENDS)\n check_single_tensor_operation('argmax', (4, 2), BACKENDS, axis=1)\n\n check_single_tensor_operation('argmin', (4, 2), BACKENDS)\n check_single_tensor_operation('argmin', (4, 2), BACKENDS, axis=1)\n\n check_single_tensor_operation('square', (4, 2), BACKENDS)\n check_single_tensor_operation('abs', (4, 2), BACKENDS)\n check_single_tensor_operation('sqrt', (4, 2), BACKENDS)\n check_single_tensor_operation('exp', (4, 2), BACKENDS)\n # cntk return -85.1 for zero or negative number, not nan, so can't compare with other backend.\n check_single_tensor_operation('log', (4, 2), [KTH, KTF])\n check_single_tensor_operation('round', (4, 2), BACKENDS)\n check_single_tensor_operation('sign', (4, 2), BACKENDS)\n check_single_tensor_operation('pow', (4, 2), BACKENDS, a=3)\n check_single_tensor_operation('clip', (4, 2), BACKENDS, min_value=0.4,\n max_value=0.6)\n\n # two-tensor ops\n check_two_tensor_operation('equal', (4, 2), (4, 2), BACKENDS)\n check_two_tensor_operation('not_equal', (4, 2), (4, 2), BACKENDS)\n check_two_tensor_operation('greater', (4, 2), (4, 2), BACKENDS)\n check_two_tensor_operation('greater_equal', (4, 2), (4, 2), BACKENDS)\n check_two_tensor_operation('less', (4, 2), (4, 2), BACKENDS)\n check_two_tensor_operation('less_equal', (4, 2), (4, 2), BACKENDS)\n check_two_tensor_operation('maximum', (4, 2), (4, 2), BACKENDS)\n check_two_tensor_operation('minimum', (4, 2), (4, 2), BACKENDS)\n\n # cntk doesn't support gradient in this way\n def test_gradient(self):\n val = np.random.random((4, 2))\n x_list = [k.variable(val) for k in [KTH, KTF]]\n z_list = []\n zero_list = []\n for x, k in zip(x_list, [KTH, KTF]):\n exp = x * k.exp(x)\n loss = k.sum(exp)\n zero_loss = k.stop_gradient(loss)\n grad = k.gradients(loss, [exp])\n zero_grad = k.gradients(loss + zero_loss, [exp])\n z_list.append(k.eval(grad[0]))\n zero_list.append(k.eval(zero_grad[0]))\n\n assert_list_pairwise(z_list)\n assert_list_pairwise(zero_list)\n for i in range(len(z_list)):\n assert_allclose(zero_list[i], z_list[i], atol=1e-05)\n\n def test_stop_gradient(self):\n # This test checks the consistency of the stop_gradient backend API.\n # It doesn't check the functionality (which is checked at the\n # test_gradient test).\n val = np.random.random((4, 2))\n for k in BACKENDS:\n a = k.variable(val)\n b = k.square(a)\n c, d = k.stop_gradient([a, b])\n e = k.stop_gradient(b)\n\n # cntk currently not support function in this way, so can't test as this\n def test_function(self):\n test_backend = [KTH, KTF]\n val = np.random.random((4, 2))\n input_val = np.random.random((4, 2))\n\n f_list = []\n x_list = []\n for k in test_backend:\n x = k.variable(val)\n x_list.append(x)\n y = k.placeholder(ndim=2)\n exp = k.square(x) + y\n update = x * 2\n f = k.function([y], [exp], updates=[(x, update)])\n f_list.append(f)\n\n function_outputs_list = [f([input_val])[0] for f in f_list]\n assert_list_pairwise(function_outputs_list)\n\n new_val_list = [k.get_value(x) for x, k in zip(x_list, test_backend)]\n assert_list_pairwise(new_val_list)\n\n def test_function_tf_fetches(self):\n # Additional operations can be passed to tf.Session().run() via its\n # `fetches` arguments. In contrast to `updates` argument of\n # KTF.function() these do not have control dependency on `outputs`, so\n # they can run in parallel. Also they should not contribute to output of\n # KTF.function().\n\n x = KTF.variable(0.)\n y = KTF.variable(0.)\n x_placeholder = KTF.placeholder(shape=())\n y_placeholder = KTF.placeholder(shape=())\n\n f = KTF.function(inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n updates=[(x, x_placeholder + 1.)],\n fetches=[KTF.update(y, 5.)])\n output = f([10., 20.])\n assert output == [30.]\n assert KTF.get_session().run(fetches=[x, y]) == [11., 5.]\n\n def test_function_tf_feed_dict(self):\n # Additional substitutions can be passed to `tf.Session().run()` via its\n # `feed_dict` arguments. Note that the feed_dict is passed once in the\n # constructor but we can modify the values in the dictionary. Through\n # this feed_dict we can provide additional substitutions besides Keras\n # inputs.\n\n x = KTF.variable(0.)\n y = KTF.variable(0.)\n x_placeholder = KTF.placeholder(shape=())\n y_placeholder = KTF.placeholder(shape=())\n\n feed_dict = {y_placeholder: 3.}\n\n f = KTF.function(inputs=[x_placeholder],\n outputs=[x_placeholder + 1.],\n updates=[(x, x_placeholder + 10.)],\n feed_dict=feed_dict,\n fetches=[KTF.update(y, y_placeholder * 10.)])\n output = f([10.])\n assert output == [11.]\n assert KTF.get_session().run(fetches=[x, y]) == [20., 30.]\n\n # updated value in feed_dict will be modified within the K.function()\n feed_dict[y_placeholder] = 4.\n output = f([20.])\n assert output == [21.]\n assert KTF.get_session().run(fetches=[x, y]) == [30., 40.]\n\n def test_rnn(self):\n # implement a simple RNN\n num_samples = 4\n input_dim = 5\n output_dim = 3\n timesteps = 6\n\n _, x = parse_shape_or_val((num_samples, timesteps, input_dim))\n _, h0 = parse_shape_or_val((num_samples, output_dim))\n _, wi = parse_shape_or_val((input_dim, output_dim))\n _, wh = parse_shape_or_val((output_dim, output_dim))\n mask = np.random.randint(2, size=(num_samples, timesteps))\n\n x_k = K.variable(x)\n h0_k = [K.variable(h0)]\n wi_k = K.variable(wi)\n wh_k = K.variable(wh)\n mask_k = K.variable(mask)\n\n def rnn_fn(x_k, h_k):\n assert len(h_k) == 1\n y_k = K.dot(x_k, wi_k) + K.dot(h_k[0], wh_k)\n return y_k, [y_k]\n\n # test default setup\n last_output_list = []\n outputs_list = []\n state_list = []\n\n kwargs_list = [\n {'go_backwards': False, 'mask': None},\n {'go_backwards': False, 'mask': None, 'unroll': True, 'input_length': timesteps},\n {'go_backwards': True, 'mask': None},\n {'go_backwards': True, 'mask': None, 'unroll': True, 'input_length': timesteps},\n {'go_backwards': False, 'mask': mask_k},\n {'go_backwards': False, 'mask': mask_k, 'unroll': True, 'input_length': timesteps},\n ]\n\n for (i, kwargs) in enumerate(kwargs_list):\n last_y1, y1, h1 = ref_rnn(x, [wi, wh, None], h0, **kwargs)\n last_y2, y2, h2 = K.rnn(rnn_fn, x_k, h0_k, **kwargs)\n\n assert len(h2) == 1\n last_y2 = K.eval(last_y2)\n y2 = K.eval(y2)\n h1 = h1[:, -1]\n h2 = K.eval(h2[0])\n\n if kwargs['mask'] is not None:\n last_y1 = last_y1 * np.expand_dims(mask[:, -1], -1)\n last_y2 = last_y2 * np.expand_dims(mask[:, -1], -1)\n y1 = y1 * np.expand_dims(mask, -1)\n y2 = y2 * np.expand_dims(mask, -1)\n h1 = h1 * np.expand_dims(mask[:, -1], -1)\n h2 = h2 * np.expand_dims(mask[:, -1], -1)\n\n last_output_list.append(last_y2)\n outputs_list.append(y2)\n state_list.append(h2)\n\n if i % 2 == 0:\n assert_allclose(last_y1, last_y2, atol=1e-05)\n assert_allclose(y1, y2, atol=1e-05)\n assert_allclose(h1, h2, atol=1e-05)\n else:\n assert_allclose(last_output_list[i - 1], last_output_list[i], atol=1e-05)\n assert_allclose(outputs_list[i - 1], outputs_list[i], atol=1e-05)\n assert_allclose(state_list[i - 1], state_list[i], atol=1e-05)\n\n def test_rnn_no_states(self):\n # implement a simple RNN without states\n input_dim = 8\n output_dim = 4\n timesteps = 5\n\n _, x = parse_shape_or_val((32, timesteps, input_dim))\n _, wi = parse_shape_or_val((input_dim, output_dim))\n\n x_k = K.variable(x)\n wi_k = K.variable(wi)\n\n def rnn_fn(x_k, h_k):\n assert len(h_k) == 0\n y_k = K.dot(x_k, wi_k)\n return y_k, []\n\n last_y1, y1, h1 = ref_rnn(x, [wi, None, None], None,\n go_backwards=False, mask=None)\n last_y2, y2, h2 = K.rnn(rnn_fn, x_k, [],\n go_backwards=False, mask=None)\n\n assert len(h2) == 0\n last_y2 = K.eval(last_y2)\n y2 = K.eval(y2)\n\n assert_allclose(last_y1, last_y2, atol=1e-05)\n assert_allclose(y1, y2, atol=1e-05)\n\n def legacy_test_rnn(self):\n # implement a simple RNN\n num_samples = 4\n input_dim = 5\n output_dim = 3\n timesteps = 6\n\n input_val = np.random.random((num_samples, timesteps, input_dim)).astype(np.float32)\n init_state_val = np.random.random((num_samples, output_dim)).astype(np.float32)\n W_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)\n W_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)\n np_mask = np.random.randint(2, size=(num_samples, timesteps))\n\n def rnn_step_fn(k):\n W_i = k.variable(W_i_val)\n W_o = k.variable(W_o_val)\n\n def step_function(x, states):\n assert len(states) == 1\n prev_output = states[0]\n output = k.dot(x, W_i) + k.dot(prev_output, W_o)\n return output, [output]\n\n return step_function\n\n # test default setup\n last_output_list = [[], [], [], [], [], []]\n outputs_list = [[], [], [], [], [], []]\n state_list = [[], [], [], [], [], []]\n\n for k in BACKENDS:\n rnn_fn = rnn_step_fn(k)\n inputs = k.variable(input_val)\n initial_states = [k.variable(init_state_val)]\n mask = k.variable(np_mask)\n\n kwargs_list = [\n {'go_backwards': False, 'mask': None},\n {'go_backwards': False, 'mask': None, 'unroll': True, 'input_length': timesteps},\n {'go_backwards': True, 'mask': None},\n {'go_backwards': True, 'mask': None, 'unroll': True, 'input_length': timesteps},\n {'go_backwards': False, 'mask': mask},\n {'go_backwards': False, 'mask': mask, 'unroll': True, 'input_length': timesteps},\n ]\n\n for (i, kwargs) in enumerate(kwargs_list):\n last_output, outputs, new_states = k.rnn(rnn_fn, inputs,\n initial_states,\n **kwargs)\n\n last_output_list[i].append(k.eval(last_output))\n outputs_list[i].append(k.eval(outputs))\n assert len(new_states) == 1\n state_list[i].append(k.eval(new_states[0]))\n\n assert_list_pairwise(last_output_list[0], shape=False, atol=1e-04)\n assert_list_pairwise(outputs_list[0], shape=False, atol=1e-04)\n assert_list_pairwise(state_list[0], shape=False, atol=1e-04)\n assert_list_pairwise(last_output_list[2], shape=False, atol=1e-04)\n assert_list_pairwise(outputs_list[2], shape=False, atol=1e-04)\n assert_list_pairwise(state_list[2], shape=False, atol=1e-04)\n\n for l, u_l in zip(last_output_list[0], last_output_list[1]):\n assert_allclose(l, u_l, atol=1e-04)\n\n for o, u_o in zip(outputs_list[0], outputs_list[1]):\n assert_allclose(o, u_o, atol=1e-04)\n\n for s, u_s in zip(state_list[0], state_list[1]):\n assert_allclose(s, u_s, atol=1e-04)\n\n for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):\n assert_allclose(b_l, b_u_l, atol=1e-04)\n\n for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):\n assert_allclose(b_o, b_u_o, atol=1e-04)\n\n for b_s, b_u_s in zip(state_list[2], state_list[3]):\n assert_allclose(b_s, b_u_s, atol=1e-04)\n\n for m_l, u_m_l, k in zip(last_output_list[4], last_output_list[5], BACKENDS):\n if k == KTF:\n m_l = m_l * np.expand_dims(np_mask[:, -1], -1)\n u_m_l = u_m_l * np.expand_dims(np_mask[:, -1], -1)\n assert_allclose(m_l, u_m_l, atol=1e-04)\n\n for m_o, u_m_o, k in zip(outputs_list[4], outputs_list[5], BACKENDS):\n if k == KTF:\n m_o = m_o * np.expand_dims(np_mask, -1)\n u_m_o = u_m_o * np.expand_dims(np_mask, -1)\n assert_allclose(m_o, u_m_o, atol=1e-04)\n\n for m_s, u_m_s, k in zip(state_list[4], state_list[5], BACKENDS):\n assert_allclose(m_s, u_m_s, atol=1e-04)\n\n def legacy_test_rnn_no_states(self):\n # implement a simple RNN without states\n input_dim = 8\n output_dim = 4\n timesteps = 5\n\n input_val = np.random.random((32, timesteps, input_dim))\n W_i_val = np.random.random((input_dim, output_dim))\n\n def rnn_step_fn(k):\n W_i = k.variable(W_i_val)\n\n def step_function(x, states):\n assert len(states) == 0\n output = k.dot(x, W_i)\n return output, []\n\n return step_function\n\n # test default setup\n last_output_list = []\n outputs_list = []\n\n for k in BACKENDS:\n rnn_fn = rnn_step_fn(k)\n inputs = k.variable(input_val)\n initial_states = []\n last_output, outputs, new_states = k.rnn(rnn_fn, inputs,\n initial_states,\n go_backwards=False,\n mask=None)\n last_output_list.append(k.eval(last_output))\n outputs_list.append(k.eval(outputs))\n assert len(new_states) == 0\n\n assert_list_pairwise(last_output_list, shape=False)\n assert_list_pairwise(outputs_list, shape=False)\n\n @pytest.mark.parametrize('x_np,axis,keepdims', [\n (np.array([1.1, 0.8, 0.9]), 0, False),\n (np.array([[1.1, 0.8, 0.9]]), 0, False),\n (np.array([[1.1, 0.8, 0.9]]), 1, False),\n (np.array([[1.1, 0.8, 0.9]]), -1, False),\n (np.array([[1.1, 0.8, 0.9]]), 1, True),\n (np.array([[1.1], [1.2]]), 0, False),\n (np.array([[1.1], [1.2]]), 1, False),\n (np.array([[1.1], [1.2]]), -1, False),\n (np.array([[1.1], [1.2]]), -1, True),\n (np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), None, False),\n (np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), 0, False),\n (np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), 1, False),\n (np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), -1, False),\n ])\n def test_logsumexp(self, x_np, axis, keepdims):\n '''\n Check if K.logsumexp works properly for values close to one.\n '''\n for k in BACKENDS:\n x = k.variable(x_np)\n assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),\n np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),\n rtol=1e-5)\n\n def test_logsumexp_optim(self):\n '''\n Check if optimization works.\n '''\n for k in [KTF]:\n x_np = np.array([1e+4, 1e-4])\n assert_allclose(k.eval(k.logsumexp(k.variable(x_np), axis=0)),\n 1e4,\n rtol=1e-5)\n\n def test_switch(self):\n # scalar\n val = np.random.random()\n z_list = []\n for k in BACKENDS:\n x = k.variable(val)\n x = k.switch(k.greater_equal(x, 0.5), x * 0.1, x * 0.2)\n z_list.append(k.eval(x))\n assert_list_pairwise(z_list)\n # non scalar\n shapes = []\n shapes.append([(4, 3, 2), (4, 3, 2), (4, 3, 2)])\n shapes.append([(4, 3,), (4, 3, 2), (4, 3, 2)])\n shapes.append([(4,), (4, 3, 2), (4, 3, 2)])\n for s in shapes:\n z_list = []\n arrays = list(map(np.random.random, s))\n for k in BACKENDS:\n x, then_expr, else_expr = map(k.variable, arrays)\n cond = k.greater_equal(x, 0.5)\n z_list.append(k.eval(k.switch(cond, then_expr, else_expr)))\n assert_list_pairwise(z_list)\n\n def test_dropout(self):\n val = np.random.random((100, 100))\n z_list = [k.eval(k.dropout(k.variable(val), level=0.2))\n for k in BACKENDS]\n assert_list_pairwise(z_list, allclose=False)\n # dropout patterns are different, only check mean\n for i in range(len(z_list) - 1):\n assert np.abs(z_list[i].mean() - z_list[i + 1].mean()) < 0.05\n\n z_list = [k.eval(k.dropout(k.variable(val), level=0.2,\n noise_shape=list(val.shape)))\n for k in BACKENDS]\n assert_list_pairwise(z_list, allclose=False)\n # dropout patterns are different, only check mean\n for i in range(len(z_list) - 1):\n assert np.abs(z_list[i].mean() - z_list[i + 1].mean()) < 0.05\n\n # Test invalid use cases\n for k in BACKENDS:\n with pytest.raises(ValueError):\n z = k.dropout(k.variable(val), level=-0.5)\n\n def test_nn_operations(self):\n check_single_tensor_operation('relu', (4, 2), BACKENDS, alpha=0.1, max_value=0.5)\n check_single_tensor_operation('softplus', (4, 10), BACKENDS)\n check_single_tensor_operation('elu', (4, 10), BACKENDS, alpha=0.5)\n\n check_single_tensor_operation('sigmoid', (4, 2), BACKENDS)\n check_single_tensor_operation('hard_sigmoid', (4, 2), BACKENDS)\n check_single_tensor_operation('tanh', (4, 2), BACKENDS)\n\n check_single_tensor_operation('softmax', (4, 10), BACKENDS)\n check_single_tensor_operation('softmax', (4, 5, 3, 10), BACKENDS, axis=2)\n\n check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), BACKENDS, from_logits=True)\n # cross_entropy call require the label is a valid probability distribution,\n # otherwise it is garbage in garbage out...\n # due to the algo difference, we can't guarantee CNTK has the same result on the garbage input.\n # so create a separate test case for valid label input\n check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), [KTH, KTF], from_logits=True)\n xval = np.asarray([[0.26157712, 0.0432167], [-0.43380741, 0.30559841],\n [0.20225059, -0.38956559], [-0.13805378, 0.08506755]], dtype=np.float32)\n yval = np.asarray([[0.46221867, 0.53778133], [0.51228984, 0.48771016],\n [0.64916514, 0.35083486], [0.47028078, 0.52971922]], dtype=np.float32)\n check_two_tensor_operation('categorical_crossentropy', yval, xval,\n BACKENDS, cntk_two_dynamicity=True, from_logits=True)\n check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), BACKENDS, from_logits=False)\n check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), BACKENDS, from_logits=False)\n\n check_single_tensor_operation('l2_normalize', (4, 3), BACKENDS, axis=-1)\n check_single_tensor_operation('l2_normalize', (4, 3), BACKENDS, axis=1)\n\n def test_in_top_k(self):\n batch_size = 20\n num_classes = 10\n\n # Random prediction test case\n predictions = np.random.random((batch_size, num_classes)).astype('float32')\n targets = np.random.randint(num_classes, size=batch_size, dtype='int32')\n\n # (k == 0 or k > num_classes) does not raise an error but just return an unmeaningful tensor.\n for k in range(num_classes + 1):\n z_list = [b.eval(b.in_top_k(b.variable(predictions, dtype='float32'),\n b.variable(targets, dtype='int32'), k))\n for b in [KTH, KTF]]\n assert_list_pairwise(z_list)\n\n # Identical prediction test case:\n # randomly set half of the predictions to an identical value\n num_identical = num_classes // 2\n for i in range(batch_size):\n idx_identical = np.random.choice(num_classes, size=num_identical, replace=False)\n predictions[i, idx_identical] = predictions[i, 0]\n targets = np.zeros(batch_size, dtype='int32')\n\n for k in range(1, num_classes + 1):\n z_list = [b.eval(b.in_top_k(b.variable(predictions, dtype='float32'),\n b.variable(targets, dtype='int32'), k))\n for b in [KTH, KTF]]\n assert_list_pairwise(z_list)\n\n @pytest.mark.parametrize('op,input_shape,kernel_shape,padding,data_format', [\n ('conv1d', (2, 8, 2), (3, 2, 3), 'same', 'channels_last'),\n ('conv1d', (1, 8, 2), (3, 2, 3), 'valid', 'channels_last'),\n ('conv2d', (2, 3, 4, 5), (3, 3, 3, 2), 'same', 'channels_first'),\n ('conv2d', (2, 3, 5, 6), (4, 3, 3, 4), 'valid', 'channels_first'),\n ('conv2d', (1, 6, 5, 3), (3, 4, 3, 2), 'valid', 'channels_last'),\n ('conv2d', (1, 7, 6, 3), (3, 3, 3, 4), 'same', 'channels_last'),\n ('conv3d', (2, 3, 4, 5, 4), (3, 3, 3, 3, 4), 'same', 'channels_first'),\n ('conv3d', (2, 3, 5, 4, 6), (3, 2, 4, 3, 4), 'valid', 'channels_first'),\n ('conv3d', (1, 2, 2, 2, 1), (2, 2, 2, 1, 1), 'valid', 'channels_last'),\n ('conv3d', (1, 3, 5, 4, 2), (3, 3, 3, 2, 3), 'same', 'channels_last'),\n ])\n def test_conv(self, op, input_shape, kernel_shape, padding, data_format):\n k = K.backend()\n _, x = parse_shape_or_val(input_shape)\n _, w = parse_shape_or_val(kernel_shape)\n y1 = ref_conv(x, w, padding, data_format)\n y2 = check_two_tensor_operation(\n op, x, w, [KTH if k == 'theano' else KC if k == 'cntk' else KTF],\n padding=padding, data_format=data_format,\n cntk_dynamicity=True, return_results=True)\n assert_allclose(y1, y2, atol=1e-05)\n\n @pytest.mark.parametrize('op,input_shape,kernel_shape,padding,data_format', [\n ('depthwise_conv2d', (2, 3, 4, 5), (3, 3, 3, 2), 'same', 'channels_first'),\n ('depthwise_conv2d', (2, 3, 5, 6), (4, 3, 3, 4), 'valid', 'channels_first'),\n ('depthwise_conv2d', (1, 6, 5, 3), (3, 4, 3, 2), 'valid', 'channels_last'),\n ('depthwise_conv2d', (1, 7, 6, 3), (3, 3, 3, 4), 'same', 'channels_last'),\n ])\n def test_depthwise_conv(self, op, input_shape, kernel_shape, padding, data_format):\n k = K.backend()\n _, x = parse_shape_or_val(input_shape)\n _, w = parse_shape_or_val(kernel_shape)\n y1 = ref_depthwise_conv(x, w, padding, data_format)\n y2 = check_two_tensor_operation(\n op, x, w, [KTH if k == 'theano' else KC if k == 'cntk' else KTF],\n padding=padding, data_format=data_format,\n cntk_dynamicity=True, return_results=True)\n assert_allclose(y1, y2, atol=1e-05)\n\n @pytest.mark.parametrize('op,input_shape,pool_size,strides,padding,data_format,pool_mode', [\n ('pool2d', (2, 3, 7, 7), (3, 3), (1, 1), 'same', 'channels_first', 'avg'),\n ('pool2d', (3, 3, 8, 5), (2, 3), (1, 1), 'valid', 'channels_first', 'max'),\n ('pool2d', (2, 9, 5, 3), (3, 2), (1, 1), 'valid', 'channels_last', 'avg'),\n ('pool2d', (3, 6, 7, 3), (3, 3), (1, 1), 'same', 'channels_last', 'max'),\n ('pool3d', (2, 3, 7, 7, 7), (3, 3, 3), (1, 1, 1), 'same', 'channels_first', 'avg'),\n ('pool3d', (3, 3, 8, 5, 9), (2, 3, 2), (1, 1, 1), 'valid', 'channels_first', 'max'),\n ('pool3d', (2, 8, 9, 5, 3), (3, 2, 3), (1, 1, 1), 'valid', 'channels_last', 'avg'),\n ('pool3d', (3, 5, 6, 7, 3), (3, 3, 3), (1, 1, 1), 'same', 'channels_last', 'max'),\n ])\n def test_pool(self, op, input_shape, pool_size, strides, padding, data_format, pool_mode):\n k = K.backend()\n _, x = parse_shape_or_val(input_shape)\n y1 = ref_pool(x, pool_size, strides, padding, data_format, pool_mode)\n y2 = check_single_tensor_operation(\n op, x, [KTH if k == 'theano' else KC if k == 'cntk' else KTF],\n pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format, pool_mode=pool_mode,\n cntk_dynamicity=True, return_results=True)\n assert_allclose(y1, y2, atol=1e-05)\n\n def legacy_test_conv1d(self):\n # channels_last input shape: (n, length, input_depth)\n input_shape = (4, 8, 2)\n kernel_shape = (3, 2, 3)\n for strides in [1, 2]:\n check_two_tensor_operation('conv1d', input_shape, kernel_shape,\n BACKENDS, cntk_dynamicity=True,\n strides=strides,\n data_format='channels_last')\n\n def legacy_test_conv2d(self):\n # TF kernel shape: (rows, cols, input_depth, depth)\n # channels_first input shape: (n, input_depth, rows, cols)\n for (input_shape, kernel_shape, data_format) in [\n ((2, 3, 4, 5), (2, 2, 3, 4), 'channels_first'),\n ((2, 3, 5, 6), (4, 3, 3, 4), 'channels_first'),\n ((1, 6, 5, 3), (3, 3, 3, 2), 'channels_last')]:\n check_two_tensor_operation('conv2d', input_shape, kernel_shape,\n BACKENDS, cntk_dynamicity=True,\n data_format=data_format)\n\n def legacy_test_depthwise_conv_2d(self):\n # TF kernel shape: (rows, cols, input_depth, depth_multiplier)\n # channels_first input shape: (n, input_depth, rows, cols)\n for (input_shape, kernel_shape, data_format) in [\n ((2, 3, 4, 5), (2, 2, 3, 4), 'channels_first'),\n ((2, 3, 5, 6), (4, 3, 3, 4), 'channels_first'),\n ((1, 6, 5, 3), (3, 3, 3, 2), 'channels_last')]:\n check_two_tensor_operation('depthwise_conv2d',\n input_shape, kernel_shape,\n BACKENDS, cntk_dynamicity=True,\n data_format=data_format)\n\n def legacy_test_conv3d(self):\n # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)\n # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)\n # TH kernel shape: (depth, input_depth, x, y, z)\n # TF kernel shape: (x, y, z, input_depth, depth)\n for (input_shape, kernel_shape, data_format) in [\n ((2, 3, 4, 5, 4), (2, 2, 2, 3, 4), 'channels_first'),\n ((2, 3, 5, 4, 6), (3, 2, 4, 3, 4), 'channels_first'),\n ((1, 2, 2, 2, 1), (2, 2, 2, 1, 1), 'channels_last')]:\n check_two_tensor_operation('conv3d', input_shape, kernel_shape,\n BACKENDS, cntk_dynamicity=True,\n data_format=data_format)\n\n @pytest.mark.skipif(K.backend() == 'theano', reason='Not supported.')\n @pytest.mark.parametrize('op,input_shape,kernel_shape,depth_multiplier,padding,data_format', [\n ('separable_conv2d', (2, 3, 4, 5), (3, 3), 1, 'same', 'channels_first'),\n ('separable_conv2d', (2, 3, 5, 6), (4, 3), 2, 'valid', 'channels_first'),\n ('separable_conv2d', (1, 6, 5, 3), (3, 4), 1, 'valid', 'channels_last'),\n ('separable_conv2d', (1, 7, 6, 3), (3, 3), 2, 'same', 'channels_last'),\n ])\n def test_separable_conv2d(self, op, input_shape, kernel_shape, depth_multiplier, padding, data_format):\n input_depth = input_shape[1] if data_format == 'channels_first' else input_shape[-1]\n _, x = parse_shape_or_val(input_shape)\n _, depthwise = parse_shape_or_val(kernel_shape + (input_depth, depth_multiplier))\n _, pointwise = parse_shape_or_val((1, 1) + (input_depth * depth_multiplier, 7))\n y1 = ref_separable_conv(x, depthwise, pointwise, padding, data_format)\n if K.backend() == 'cntk':\n y2 = cntk_func_three_tensor(\n op, input_shape,\n depthwise, pointwise,\n padding=padding, data_format=data_format)([x])[0]\n else:\n y2 = K.eval(getattr(K, op)(\n K.variable(x),\n K.variable(depthwise), K.variable(pointwise),\n padding=padding, data_format=data_format))\n assert_allclose(y1, y2, atol=1e-05)\n\n def legacy_test_pool2d(self):\n check_single_tensor_operation('pool2d', (5, 10, 12, 3),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(2, 2), strides=(1, 1), padding='valid')\n\n check_single_tensor_operation('pool2d', (5, 9, 11, 3),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(2, 2), strides=(1, 1), padding='valid')\n\n check_single_tensor_operation('pool2d', (5, 9, 11, 3),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(2, 2), strides=(1, 1), pool_mode='avg')\n\n check_single_tensor_operation('pool2d', (5, 9, 11, 3),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(2, 3), strides=(1, 1), padding='valid')\n\n check_single_tensor_operation('pool2d', (2, 7, 7, 5),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(3, 3), strides=(1, 1),\n padding='same', pool_mode='avg')\n\n def legacy_test_pool3d(self):\n check_single_tensor_operation('pool3d', (5, 10, 12, 5, 3),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(2, 2, 2), strides=(1, 1, 1), padding='valid')\n\n check_single_tensor_operation('pool3d', (5, 9, 11, 5, 3),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(2, 2, 2), strides=(1, 1, 1), padding='valid')\n\n check_single_tensor_operation('pool3d', (5, 9, 11, 5, 3),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(2, 2, 2), strides=(1, 1, 1), pool_mode='avg')\n\n check_single_tensor_operation('pool3d', (5, 9, 11, 5, 3),\n BACKENDS, cntk_dynamicity=True,\n pool_size=(2, 3, 2), strides=(1, 1, 1), padding='valid')\n\n check_single_tensor_operation('pool3d', (2, 6, 6, 6, 3), [KTH, KTF], pool_size=(3, 3, 3),\n strides=(1, 1, 1), padding='same', pool_mode='avg')\n\n def test_random_normal(self):\n mean = 0.\n std = 1.\n for k in BACKENDS:\n rand = k.eval(k.random_normal((300, 200), mean=mean, stddev=std, seed=1337))\n assert rand.shape == (300, 200)\n assert np.abs(np.mean(rand) - mean) < 0.015\n assert np.abs(np.std(rand) - std) < 0.015\n\n def test_random_uniform(self):\n min_val = -1.\n max_val = 1.\n for k in BACKENDS:\n rand = k.eval(k.random_uniform((200, 100), min_val, max_val))\n assert rand.shape == (200, 100)\n assert np.abs(np.mean(rand)) < 0.015\n assert np.max(rand) <= max_val\n assert np.min(rand) >= min_val\n\n def test_random_binomial(self):\n p = 0.5\n for k in BACKENDS:\n rand = k.eval(k.random_binomial((200, 100), p))\n assert rand.shape == (200, 100)\n assert np.abs(np.mean(rand) - p) < 0.015\n assert np.max(rand) == 1\n assert np.min(rand) == 0\n\n def test_conv_invalid_use(self):\n for k in BACKENDS:\n with pytest.raises(ValueError):\n k.conv1d(k.variable(np.ones((4, 8, 2))),\n k.variable(np.ones((3, 2, 3))),\n data_format='channels_middle')\n\n with pytest.raises(ValueError):\n k.conv2d(k.variable(np.ones((2, 3, 4, 5))),\n k.variable(np.ones((2, 2, 3, 4))),\n data_format='channels_middle')\n\n with pytest.raises(ValueError):\n k.conv3d(k.variable(np.ones((2, 3, 4, 5, 4))),\n k.variable(np.ones((2, 2, 2, 3, 4))),\n data_format='channels_middle')\n\n if k != KTH:\n with pytest.raises(ValueError):\n k.separable_conv2d(k.variable(np.ones((2, 3, 4, 5))),\n k.variable(np.ones((2, 2, 3, 4))),\n k.variable(np.ones((1, 1, 12, 7))),\n data_format='channels_middle')\n\n with pytest.raises(ValueError):\n k.depthwise_conv2d(k.variable(np.ones((2, 3, 4, 5))),\n k.variable(np.ones((2, 2, 3, 4))),\n data_format='channels_middle')\n\n def test_pooling_invalid_use(self):\n for (input_shape, pool_size) in zip([(5, 10, 12, 3), (5, 10, 12, 6, 3)], [(2, 2), (2, 2, 2)]):\n for k in BACKENDS:\n x = k.variable(np.random.random(input_shape))\n if len(pool_size) == 2:\n with pytest.raises(ValueError):\n k.pool2d(x, pool_size=pool_size, data_format='channels_middle')\n with pytest.raises(ValueError):\n k.pool2d(x, pool_size=pool_size, padding='twice')\n with pytest.raises(ValueError):\n k.pool2d(x, pool_size=pool_size, pool_mode='median')\n else:\n with pytest.raises(ValueError):\n k.pool3d(x, pool_size=pool_size, data_format='channels_middle')\n with pytest.raises(ValueError):\n k.pool3d(x, pool_size=pool_size, padding='twice')\n with pytest.raises(ValueError):\n k.pool3d(x, pool_size=pool_size, pool_mode='median')\n\n def test_resize_images(self):\n for data_format in ['channels_first', 'channels_last']:\n shape = (5, 5)\n if data_format == 'channels_first':\n x_shape = (2, 3) + shape\n elif data_format == 'channels_last':\n x_shape = (2,) + shape + (3,)\n check_single_tensor_operation('resize_images', x_shape,\n BACKENDS, cntk_dynamicity=True,\n height_factor=2,\n width_factor=2,\n data_format=data_format)\n\n # Test invalid use cases\n xval = np.random.random(x_shape)\n for k in BACKENDS:\n with pytest.raises(ValueError):\n k.resize_images(k.variable(xval), 2, 2,\n data_format='channels_middle')\n\n def test_resize_volumes(self):\n for data_format in ['channels_first', 'channels_last']:\n shape = (5, 5, 5)\n if data_format == 'channels_first':\n x_shape = (2, 3) + shape\n elif data_format == 'channels_last':\n x_shape = (2,) + shape + (3,)\n check_single_tensor_operation('resize_volumes', x_shape,\n BACKENDS, cntk_dynamicity=True,\n depth_factor=2,\n height_factor=2,\n width_factor=2,\n data_format=data_format)\n\n # Test invalid use cases\n xval = np.random.random(x_shape)\n for k in BACKENDS:\n with pytest.raises(ValueError):\n k.resize_volumes(k.variable(xval), 2, 2, 2,\n data_format='channels_middle')\n\n def test_temporal_padding(self):\n check_single_tensor_operation('temporal_padding', (4, 3, 3),\n BACKENDS)\n check_single_tensor_operation('temporal_padding', (2, 3, 4),\n BACKENDS, padding=(1, 2))\n\n def test_spatial_2d_padding(self):\n padding = ((1, 2), (2, 1))\n for data_format in ['channels_first', 'channels_last']:\n shape = (5, 5)\n if data_format == 'channels_first':\n x_shape = (1, 3) + shape\n else:\n x_shape = (1,) + shape + (3,)\n check_single_tensor_operation('spatial_2d_padding', x_shape, BACKENDS,\n padding=padding, data_format=data_format)\n\n # Test invalid use cases\n xval = np.random.random(x_shape)\n for k in BACKENDS:\n with pytest.raises(ValueError):\n k.spatial_2d_padding(k.variable(xval), padding=padding,\n data_format='channels_middle')\n\n def test_spatial_3d_padding(self):\n padding = ((1, 2), (2, 1), (1, 2))\n for data_format in ['channels_first', 'channels_last']:\n shape = (5, 5, 5)\n if data_format == 'channels_first':\n x_shape = (1, 3) + shape\n else:\n x_shape = (1,) + shape + (3,)\n check_single_tensor_operation('spatial_3d_padding', x_shape, BACKENDS,\n padding=padding, data_format=data_format)\n\n # Test invalid use cases\n xval = np.random.random(x_shape)\n for k in BACKENDS:\n with pytest.raises(ValueError):\n k.spatial_3d_padding(k.variable(xval), padding=padding,\n data_format='channels_middle')\n\n def test_bias_add(self):\n for data_format in ['channels_first', 'channels_last']:\n for shape in [(), (3,), (2, 3), (5, 3, 2)]:\n if data_format == 'channels_first':\n x_shape = (1, 4) + shape\n else:\n x_shape = (1,) + shape + (4,)\n bias_shape = (4,)\n check_two_tensor_operation('bias_add', x_shape, bias_shape,\n BACKENDS, cntk_dynamicity=True,\n data_format=data_format)\n\n if data_format == 'channels_first':\n x_shape = (20, 6, 10)\n else:\n x_shape = (20, 10, 6)\n check_two_tensor_operation('bias_add', x_shape, (10, 6),\n BACKENDS, cntk_dynamicity=True,\n data_format=data_format)\n\n # Test invalid use cases\n for k in BACKENDS:\n x = k.variable(np.random.random(x_shape))\n b = k.variable(np.random.random(bias_shape))\n with pytest.raises(ValueError):\n k.bias_add(x, b, data_format='channels_middle')\n\n def test_batchnorm(self):\n shape = (2, 3)\n for data_format in ['channels_first', 'channels_last']:\n if data_format == 'channels_first':\n x_shape = (1, 4) + shape\n else:\n x_shape = (1,) + shape + (4,)\n x_val = np.random.random(x_shape).astype(np.float32)\n xth = KTH.variable(x_val)\n xtf = KTF.variable(x_val)\n xc = KC.placeholder(x_shape)\n zth, _, _ = KTH.normalize_batch_in_training(xth, None, None,\n reduction_axes='per-activation')\n ztf, _, _ = KTF.normalize_batch_in_training(xtf, None, None,\n reduction_axes=[0, 1, 2, 3])\n zc, _, _ = KC.normalize_batch_in_training(xc, None, None,\n reduction_axes=[0, 1, 2, 3])\n zth = KTH.eval(zth)\n ztf = KTF.eval(ztf)\n zc = KC.function([xc], [zc])([x_val])[0]\n assert zth.shape == ztf.shape\n assert zth.shape == zc.shape\n\n # the Theano and TensorFlow CTC code use different methods to ensure\n # numerical stability. The Theano code subtracts out the max\n # before the final log, so the results are different but scale\n # identically and still train properly\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n def test_ctc(self):\n if K.backend() == 'theano':\n ref = [1.73308, 3.81351]\n else:\n ref = [3.34211, 5.42262]\n # simplified version of TensorFlow's test\n\n label_lens = np.expand_dims(np.asarray([5, 4]), 1)\n input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps\n\n # dimensions are batch x time x categories\n labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])\n inputs = np.asarray(\n [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],\n [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],\n [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],\n [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],\n [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],\n [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],\n [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],\n [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],\n [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],\n [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]],\n dtype=np.float32)\n\n k_labels = K.variable(labels, dtype=\"int32\")\n k_inputs = K.variable(inputs, dtype=\"float32\")\n k_input_lens = K.variable(input_lens, dtype=\"int32\")\n k_label_lens = K.variable(label_lens, dtype=\"int32\")\n res = K.eval(K.ctc_batch_cost(k_labels, k_inputs, k_input_lens, k_label_lens))\n assert_allclose(res[0, :] if K.backend() == 'theano' else res[:, 0], ref, atol=1e-05)\n\n # test when batch_size = 1, that is, one sample only\n # get only first sample from above test case\n if K.backend() == 'theano':\n ref = [1.73308]\n else:\n ref = [3.34211]\n\n input_lens = np.expand_dims(np.asarray([5]), 1)\n label_lens = np.expand_dims(np.asarray([5]), 1)\n\n labels = np.asarray([[0, 1, 2, 1, 0]])\n inputs = np.asarray(\n [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],\n [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],\n [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],\n [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],\n [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]]],\n dtype=np.float32)\n\n k_labels = K.variable(labels, dtype=\"int32\")\n k_inputs = K.variable(inputs, dtype=\"float32\")\n k_input_lens = K.variable(input_lens, dtype=\"int32\")\n k_label_lens = K.variable(label_lens, dtype=\"int32\")\n res = K.eval(K.ctc_batch_cost(k_labels, k_inputs, k_input_lens, k_label_lens))\n assert_allclose(res[0, :] if K.backend() == 'theano' else res[:, 0], ref, atol=1e-05)\n\n '''only tensorflow tested, need special handle'''\n\n def test_ctc_decode_greedy(self):\n # Test adapted from tensorflow\n \"\"\"Test two batch entries - best path decoder.\"\"\"\n max_time_steps = 6\n\n seq_len_0 = 4\n input_prob_matrix_0 = np.asarray(\n [[1.0, 0.0, 0.0, 0.0], # t=0\n [0.0, 0.0, 0.4, 0.6], # t=1\n [0.0, 0.0, 0.4, 0.6], # t=2\n [0.0, 0.9, 0.1, 0.0], # t=3\n [0.0, 0.0, 0.0, 0.0], # t=4 (ignored)\n [0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)\n dtype=np.float32)\n input_log_prob_matrix_0 = np.log(input_prob_matrix_0)\n\n seq_len_1 = 5\n # dimensions are time x depth\n\n input_prob_matrix_1 = np.asarray(\n [[0.1, 0.9, 0.0, 0.0], # t=0\n [0.0, 0.9, 0.1, 0.0], # t=1\n [0.0, 0.0, 0.1, 0.9], # t=2\n [0.0, 0.9, 0.1, 0.1], # t=3\n [0.9, 0.1, 0.0, 0.0], # t=4\n [0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)\n dtype=np.float32)\n\n # len max_time_steps array of batch_size x depth matrices\n inputs = [np.vstack([input_prob_matrix_0[t, :],\n input_prob_matrix_1[t, :]])\n for t in range(max_time_steps)]\n\n # change tensorflow order to keras backend order\n inputs = KTF.variable(np.asarray(inputs).transpose((1, 0, 2)))\n # batch_size length vector of sequence_lengths\n input_length = KTF.variable(np.array([seq_len_0, seq_len_1], dtype=np.int32))\n\n # batch_size length vector of negative log probabilities\n log_prob_truth = np.array([\n np.sum(-np.log([1.0, 0.6, 0.6, 0.9])),\n np.sum(-np.log([0.9, 0.9, 0.9, 0.9, 0.9]))\n ], np.float32)[:, np.newaxis]\n\n # keras output, unlike tensorflow, is a dense (not sparse) tensor\n decode_truth = np.array([[0, 1, -1], [1, 1, 0]])\n\n decode_pred_tf, log_prob_pred_tf = KTF.ctc_decode(inputs,\n input_length,\n greedy=True)\n\n assert len(decode_pred_tf) == 1\n\n decode_pred = KTF.eval(decode_pred_tf[0])\n log_prob_pred = KTF.eval(log_prob_pred_tf)\n\n assert np.alltrue(decode_truth == decode_pred)\n assert np.allclose(log_prob_truth, log_prob_pred)\n\n '''tensorflow only, need special handle'''\n\n def test_ctc_decode_beam_search(self):\n \"\"\"Test one batch, two beams - hibernating beam search.\"\"\"\n\n depth = 6\n\n seq_len_0 = 5\n input_prob_matrix_0 = np.asarray(\n [[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],\n [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],\n [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],\n [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],\n [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],\n # Random entry added in at time=5\n [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],\n dtype=np.float32)\n\n # len max_time_steps array of batch_size x depth matrices\n inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]\n for t in range(seq_len_0)] + # Pad to max_time_steps = 8\n 2 * [np.zeros((1, depth), dtype=np.float32)])\n\n inputs = KTF.variable(np.asarray(inputs).transpose((1, 0, 2)))\n\n # batch_size length vector of sequence_lengths\n input_length = KTF.variable(np.array([seq_len_0], dtype=np.int32))\n # batch_size length vector of negative log probabilities\n log_prob_truth = np.array([\n 0.584855, # output beam 0\n 0.389139 # output beam 1\n ], np.float32)[np.newaxis, :]\n\n decode_truth = [np.array([1, 0]), np.array([0, 1, 0])]\n\n beam_width = 2\n top_paths = 2\n\n decode_pred_tf, log_prob_pred_tf = KTF.ctc_decode(inputs,\n input_length,\n greedy=False,\n beam_width=beam_width,\n top_paths=top_paths)\n\n assert len(decode_pred_tf) == top_paths\n\n log_prob_pred = KTF.eval(log_prob_pred_tf)\n\n for i in range(top_paths):\n assert np.alltrue(decode_truth[i] == KTF.eval(decode_pred_tf[i]))\n\n assert np.allclose(log_prob_truth, log_prob_pred)\n\n def test_one_hot(self):\n input_length = 10\n num_classes = 20\n batch_size = 30\n indices = np.random.randint(0, num_classes, size=(batch_size, input_length))\n oh = np.eye(num_classes)[indices]\n for k in BACKENDS:\n koh = k.eval(k.one_hot(k.variable(indices, dtype='int32'), num_classes))\n assert np.all(koh == oh)\n\n def test_sparse_dot(self):\n x_d = np.array([0, 7, 2, 3], dtype=np.float32)\n x_r = np.array([0, 2, 2, 3], dtype=np.int64)\n x_c = np.array([4, 3, 2, 3], dtype=np.int64)\n\n x_sparse = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))\n x_dense = x_sparse.toarray()\n\n W = np.random.random((5, 4))\n # cntk not support it yet\n backends = [KTF]\n if KTH.th_sparse_module:\n # Theano has some dependency issues for sparse\n backends.append(KTH)\n\n for k in backends:\n t_W = k.variable(W)\n k_s = k.eval(k.dot(k.variable(x_sparse), t_W))\n k_d = k.eval(k.dot(k.variable(x_dense), t_W))\n\n assert k_s.shape == k_d.shape\n assert_allclose(k_s, k_d, atol=1e-05)\n\n def test_sparse_concat(self):\n x_d = np.array([0, 7, 2, 3], dtype=np.float32)\n x_r = np.array([0, 2, 2, 3], dtype=np.int64)\n x_c = np.array([4, 3, 2, 3], dtype=np.int64)\n\n x_sparse_1 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))\n\n x_d = np.array([0, 7, 2, 3], dtype=np.float32)\n x_r = np.array([0, 2, 2, 3], dtype=np.int64)\n x_c = np.array([4, 3, 2, 3], dtype=np.int64)\n\n x_sparse_2 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))\n\n x_dense_1 = x_sparse_1.toarray()\n x_dense_2 = x_sparse_2.toarray()\n\n # cntk not support it yet\n backends = [KTF]\n if KTH.th_sparse_module:\n # Theano has some dependency issues for sparse\n backends.append(KTH)\n\n for k in backends:\n k_s = k.concatenate([k.variable(x_sparse_1), k.variable(x_sparse_2)])\n assert k.is_sparse(k_s)\n\n k_s_d = k.eval(k_s)\n\n k_d = k.eval(k.concatenate([k.variable(x_dense_1), k.variable(x_dense_2)]))\n\n assert k_s_d.shape == k_d.shape\n assert_allclose(k_s_d, k_d, atol=1e-05)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n def test_map(self):\n x = np.random.rand(10, 3).astype(np.float32)\n vx = K.variable(x)\n kx = K.eval(K.map_fn(K.sum, vx))\n # make sure we can also walk the indexes in tensorflow which we\n # can't without specifying dtype\n kx2 = K.eval(K.map_fn(\n lambda i: K.sum(vx[i]),\n K.arange(10),\n dtype=K.floatx()\n ))\n\n assert (10,) == kx.shape\n assert (10,) == kx2.shape\n assert_allclose(x.sum(axis=1), kx, atol=1e-05)\n assert_allclose(kx, kx2, atol=1e-05)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n def test_foldl(self):\n x = np.random.rand(10, 3).astype(np.float32)\n kx = K.eval(K.foldl(lambda a, b: a + b, K.variable(x)))\n\n assert (3,) == kx.shape\n assert_allclose(x.sum(axis=0), kx, atol=1e-05)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n def test_foldr(self):\n # This test aims to make sure that we walk the array from right to left\n # and checks it in the following way: multiplying left to right 1e-40\n # cannot be held into a float32 so it causes an underflow while from\n # right to left we have no such problem and the result is larger\n x = np.array([1e-20, 1e-20, 10, 10, 10], dtype=np.float32)\n vx = K.variable(x)\n p1 = K.eval(K.foldl(lambda a, b: a * b, vx))\n p2 = K.eval(K.foldr(lambda a, b: a * b, vx))\n\n assert p1 < p2\n assert 9e-38 < p2 <= 1e-37\n\n def test_arange(self):\n for test_value in (-20, 0, 1, 10):\n a_list = []\n dtype_list = []\n # cntk has issue with negative number\n for k in [KTH, KTF]:\n t = k.arange(test_value)\n a = k.eval(t)\n assert np.array_equal(a, np.arange(test_value))\n dtype_list.append(k.dtype(t))\n a_list.append(a)\n\n for i in range(len(a_list) - 1):\n assert np.array_equal(a_list[i], a_list[i + 1])\n\n for start, stop, step in ((0, 5, 1), (-5, 5, 2), (0, 1, 2)):\n a_list = []\n for k in [KTH, KTF]:\n a = k.eval(k.arange(start, stop, step))\n assert np.array_equal(a, np.arange(start, stop, step))\n a_list.append(a)\n for i in range(len(a_list) - 1):\n assert np.array_equal(a_list[i], a_list[i + 1])\n\n for dtype in ('int32', 'int64', 'float32', 'float64'):\n for k in [KTH, KTF]:\n t = k.arange(10, dtype=dtype)\n assert k.dtype(t) == dtype\n\n for k in [KTH, KTF]:\n start = k.constant(1, dtype='int32')\n t = k.arange(start)\n assert len(k.eval(t)) == 1\n\n start = k.constant(-1, dtype='int32')\n t = k.arange(start)\n assert len(k.eval(t)) == 0\n\n def test_in_train_phase(self):\n for training in [True, False]:\n check_two_tensor_operation('in_train_phase', (3, 3), (2, 2), [KTH, KTF],\n training=training)\n\n def test_setfloatx_incorrect_values(self):\n # Keep track of the old value\n old_floatx = floatx()\n # Try some incorrect values\n initial = floatx()\n for value in ['', 'beerfloat', 123]:\n with pytest.raises(ValueError):\n set_floatx(value)\n assert floatx() == initial\n # Restore old value\n set_floatx(old_floatx)\n\n def test_setfloatx_correct_values(self):\n # Keep track of the old value\n old_floatx = floatx()\n # Check correct values\n for value in ['float16', 'float32', 'float64']:\n set_floatx(value)\n assert floatx() == value\n # Restore old value\n set_floatx(old_floatx)\n\n @pytest.mark.skipif((K.backend() == 'cntk'),\n reason='cntk does not support float16')\n def test_set_floatx(self):\n \"\"\"\n Make sure that changes to the global floatx are effectively\n taken into account by the backend.\n \"\"\"\n # Keep track of the old value\n old_floatx = floatx()\n\n set_floatx('float16')\n var = variable([10])\n check_dtype(var, 'float16')\n\n set_floatx('float64')\n var = variable([10])\n check_dtype(var, 'float64')\n\n # Restore old value\n set_floatx(old_floatx)\n\n def test_variable_support_bool_dtype(self):\n # Github issue: 7819\n if K.backend() == 'tensorflow':\n assert K.dtype(K.variable(1, dtype='int16')) == 'int16'\n assert K.dtype(K.variable(False, dtype='bool')) == 'bool'\n with pytest.raises(TypeError):\n K.variable('', dtype='unsupported')\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n" ]
[ [ "numpy.repeat", "numpy.testing.assert_allclose", "numpy.dot", "numpy.random.choice", "numpy.random.rand", "numpy.array_equal", "numpy.min", "numpy.mean", "numpy.exp", "numpy.alltrue", "numpy.random.random", "scipy.signal.convolve", "numpy.concatenate", "numpy.max", "numpy.log", "numpy.eye", "numpy.flipud", "numpy.prod", "numpy.random.randint", "numpy.transpose", "numpy.arange", "scipy.sparse.csr_matrix", "numpy.vstack", "numpy.expand_dims", "numpy.pad", "numpy.array", "numpy.zeros", "numpy.ma.masked_invalid", "numpy.allclose", "numpy.std", "numpy.stack", "numpy.asarray", "numpy.ones", "numpy.all" ] ]
bvermeulen/Seistools
[ "9c6fadf92b9ac2cbd6c32ed510147525c4b5464b" ]
[ "sweep_impulse_responses..py" ]
[ "from pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom segpy.reader import create_reader\n\nfile_name1 = Path('./data_files/himin.sgy')\nfile_name2 = Path('./data_files/hilin.sgy')\ncorr_file_name = Path('./data_files/hicut_corr.csv')\nwavelet_file_name = Path('./data_files/wavelet.csv')\ncorr_wavelet_file_name = Path('./data_files/wavelet_corr.csv')\ndt = 4/1000 # sampling interval is 4 ms\ndf = 1/dt # sampling frequency\npi = np.pi\nsweep_length = 14000 # ms\nmax_lag = 129\nmax_display_frequency = 90 # Hz\nphase_display = (-10, 10) # radians\n\nwith open(file_name1, 'rb') as segy_file:\n seg_y_dataset = create_reader(segy_file)\n\n pilot_samples = []\n for i, val in enumerate(seg_y_dataset.trace_samples(3)):\n pilot_samples.append((i * int(1000 * dt), float(val)))\n\nwith open(file_name2, 'rb') as segy_file:\n seg_y_dataset = create_reader(segy_file)\n\n gf_samples = []\n for i, val in enumerate(seg_y_dataset.trace_samples(3)):\n gf_samples.append((i * int(1000 * dt), float(val)))\n\npilot_df = pd.DataFrame(pilot_samples, columns=['Time', 'Amplitude'])\ngf_df = pd.DataFrame(gf_samples, columns=['Time', 'Amplitude'])\ntime = pilot_df['Time']\npilot = pilot_df['Amplitude']\ngf = gf_df['Amplitude']\n\nfig, ax = plt.subplots(nrows=5, ncols=1, figsize=(8, 7))\n\nax[0].set_title('Pilot')\nax[0].set_xlim(0, sweep_length)\nax[0].plot(time, pilot)\n\nax[1].set_title('GF')\nax[1].set_xlim(0, sweep_length)\nax[1].plot(time, gf)\n\nax[2].set_title('correlation pilot with GF')\ncorr_function = np.correlate(pilot, gf, mode='full') / len(pilot)\ncorr_function = corr_function[(len(pilot)-1)-(max_lag-1):(len(pilot)-1)+max_lag]\ntime_lags = np.arange(-(max_lag-1), max_lag)\n\ncorr_function_df = pd.DataFrame(zip(time_lags, corr_function), columns=['Time', 'Values'])\ncorr_function_df.to_csv(corr_file_name, index=False)\nax[2].plot(time_lags, corr_function)\n\nwavelet_df = corr_function_df[corr_function_df['Time'] >= 0]\nwavelet_df.to_csv(wavelet_file_name, index=False)\nwavelet_length = len(wavelet_df)\nwavelet_values = wavelet_df['Values'].to_list()\n\nax[3].set_title('Minimum phase wavelet')\nax[3].plot(time[0:wavelet_length], wavelet_values)\n\ncorr_wavelet = np.correlate(wavelet_values, wavelet_values, mode='full') / wavelet_length\ncorr_wavelet = corr_wavelet[(wavelet_length-1)-(max_lag-1):(wavelet_length-1)+max_lag]\ntime_lags = np.arange(-(max_lag-1), max_lag)\nax[4].plot(time_lags, corr_wavelet)\n\ncorr_wavelet_df = pd.DataFrame(zip(time_lags, corr_wavelet), columns=['Time', 'Values'])\ncorr_wavelet_df.to_csv(corr_wavelet_file_name, index=False)\n\n# ax[3].set_title('autocorrelation re-ordered')\n# cf_reordered = np.concatenate((corr_function[max_lag-1:], corr_function[0:max_lag-1]))\n# time_lags = np.arange(0, 2*max_lag-1)\n# ax[3].plot(time_lags, cf_reordered)\n# print(len(corr_function))\n# print(len(cf_reordered))\n\n# ax[3].set_title('magnitude')\n# ax[3].set_xlim(0, max_display_frequency)\n# scale = 'linear' # 'dB' # or 'default'\n# ax[3].magnitude_spectrum(corr_function, Fs=df, scale=scale, window=window_none)\n\n# ax[4].set_title('phase')\n# ax[4].set_ylim(phase_display[0], phase_display[1])\n# ax[4].set_xlim(0, max_display_frequency)\n# get the phase spectrum values and frequencies values;\n# plot invisible and use a non default color\n# cf_phase_values, cf_freq, _ = ax[4].phase_spectrum(\n# cf_reordered, Fs=df, window=window_none, visible=False, color='r')\n\n# check for modulus 2*pi and keep values between -pi and pi\n# cf_phase_values = np.mod(cf_phase_values, 2 * pi)\n# cf_phase_values[cf_phase_values > pi] -= 2 * pi\n\n# cf_phase_values -= 2 * pi\n# ax[4].plot(cf_freq, cf_phase_values)\n\nplt.tight_layout()\nplt.show()\n" ]
[ [ "numpy.correlate", "pandas.DataFrame", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.show" ] ]
JokerYan/pytorch_image_classification
[ "e080a065c8afd3a96fa459c64442efd072464f40" ]
[ "pytorch_image_classification/losses/ricap.py" ]
[ "from typing import List, Tuple\n\nimport torch\nimport torch.nn as nn\n\n\nclass RICAPLoss:\n def __init__(self, reduction: str):\n self.loss_func = nn.CrossEntropyLoss(reduction=reduction)\n\n def __call__(\n self, predictions: torch.Tensor,\n targets: Tuple[List[torch.Tensor], List[float]]) -> torch.Tensor:\n target_list, weights = targets\n return sum([\n weight * self.loss_func(predictions, targets)\n for targets, weight in zip(target_list, weights)\n ])\n" ]
[ [ "torch.nn.CrossEntropyLoss" ] ]
old-school-kid/astro-projects
[ "e82dfe68488ef1f072711871ad20178a23f89019" ]
[ "threebody.py" ]
[ "import os\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nimport pygame\nfrom random import *\nimport numpy as np\nimport time\nimport sys\n\n# initiate pygame and clock\npygame.init()\nclock = pygame.time.Clock()\ngame_font = pygame.font.SysFont('ubuntu', 15)\n\n# dimensions\nWIDTH = 1540\nHEIGHT = 865\n\n# gravitational constant\ng = 4.4\n\n#Color Section\nBLACK = (0,0,0)\nGREY = (128,128,128) #mercury\nYELLOWISH = (165,124,27) #venus\nBLUE = (0,0,225) #for earth\nRED = (198, 123, 92) #mars\nBROWN = (144, 97, 77) #jupiter\nCARMEL = (195, 161, 113) #saturn\nURANUS_BLUE = (79, 208, 231) #uranus\nNEPTUNE = (62, 84, 232) #neptune\nWHITE = (255, 255, 255) #for text\nYELLOW = (255, 255, 0) #for sun\nDARK_GREY = (80,78,81) #orbit\n\n# set up surface plane\nsurface = pygame.display.set_mode((WIDTH, HEIGHT)) # ((width, height))\npygame.display.set_caption('3 body')\nsurface.fill(BLACK)\n\n# trails\nglobal trails_active\ntrails_active = True\n\n# trails button\ntrails_button = pygame.Rect(0, 0, 100, 50)\ntrails_button_surface = game_font.render(\"TRAILS\", True, (0, 0, 0))\npygame.draw.rect(surface, WHITE, trails_button)\nsurface.blit(trails_button_surface, (50, 10))\n\n# exit button\nexit_button = pygame.Rect(WIDTH-100, 0, 100, 50)\nexit_button_surface = game_font.render(\"EXIT\", True, (0, 0, 0))\npygame.draw.rect(surface, WHITE, exit_button)\nsurface.blit(exit_button_surface, (WIDTH-90, 10))\n\n# reset button\nreset_button = pygame.Rect(WIDTH/2 - 50, 0, 100, 50)\nreset_button_surface = game_font.render(\"RESET\", True, (0, 0, 0))\npygame.draw.rect(surface, WHITE, reset_button)\nsurface.blit(reset_button_surface, (WIDTH/2 - 30, 10))\n\n### body object\nclass Body(object):\n def __init__(self, m, x, y, c):\n \"\"\"\n mass m is passed, random at source. Position x,y is passed,\n fixed at source. Initial acceleration is not passed, set to random.\n Initial acceleration not passed, set to 0. Colour passed.\n Radius passed, fixed at source.\n \"\"\"\n self.mass = m\n self.position = np.array([x, y])\n self.last_position = np.array([x, y])\n self.velocity = np.array([randint(-1,1), randint(-1,1)])\n self.accel = np.array([0, 0])\n self.color = c\n self.radius = m * 1 # density is 1\n\n def applyForce(self, force):\n # apply forces to a body\n f = force / self.mass\n self.accel = np.add(self.accel, f)\n\n def update(self):\n # update position based on velocity and reset accel\n self.velocity = np.add(self.velocity, self.accel)\n self.last_position = self.position\n self.position = np.add(self.position, self.velocity)\n self.accel = 0\n if(self.position[0] > WIDTH) or (self.position[0] < 0) or (self.position[1] > HEIGHT) or (self.position[1] < 0):\n self.randomize_position()\n print(\"object left screen\")\n\n def display(self):\n # draw over old object location\n pygame.draw.circle(surface, BLACK, (int(self.last_position[0]), int(self.last_position[1])), self.radius) \t# (drawLayer, color, (coordinates), radius)\n\n # draw trail (Thickness set to 5, color white)\n if trails_active == True:\n pygame.draw.line(surface, WHITE, (int(self.last_position[0]), int(self.last_position[1])), (int(self.position[0]), int(self.position[1])), 5)\n\n # draw new object location\n pygame.draw.circle(surface, self.color, (int(self.position[0]), int(self.position[1])), self.radius)\n\n\n def attract(self, m, g):\n # gravitational code rewritten from Daniel Shiffman's \"Nature of Code\"\n force = self.position - m.position\n distance = np.linalg.norm(force)\n distance = constrain(distance, 5.0, 25.0)\n force = normalize(force)\n strength = (g * self.mass * m.mass) / float(distance * distance)\n force = force * strength\n return force\n\n def randomize_position(self):\n self.position[0] = randrange(1000)\n self.position[1] = randrange(600)\n self.velocity = np.array([0, 0])\n return\n\n############################## set up and draw\ndef setup():\n # 3bodies\n body1 = Body(randint(0, 10), 700, 200, BLUE)\n body2 = Body(randint(0, 10), 600, 200, RED)\n body3 = Body(randint(0, 10), 500, 286, YELLOW)\n\n # list of all bodies\n global bodies\n bodies = [body1, body2, body3]\n return\n\n\ndef draw():\n # for each body: apply forces, update position, and draw\n for body in bodies:\n for other_body in bodies:\n if (body != other_body):\n global g\n force = other_body.attract(body, g)\n body.applyForce(force)\n body.update()\n body.display()\n\n # Re-draw buttons\n pygame.draw.rect(surface, WHITE, trails_button)\n surface.blit(trails_button_surface, (10, 10))\n pygame.draw.rect(surface, WHITE, exit_button)\n surface.blit(exit_button_surface, (WIDTH-90, 10))\n pygame.draw.rect(surface, WHITE, reset_button)\n surface.blit(reset_button_surface, (WIDTH/2 - 30, 10))\n\n return\n\n\n############################## mathematical functions\n\ndef constrain(val, min_val, max_val):\n return min(max_val, max(min_val, val))\n\n\ndef normalize(force):\n normal = np.linalg.norm(force, ord=1)\n if normal == 0:\n normal = np.finfo(force.dtype).eps\n return force / normal\n\n\n############################## main loop\n\nif __name__ == \"__main__\":\n # initial set up\n setup()\n while True:\n # draw bodies to screen\n draw()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = event.pos\n # trails button\n if trails_button.collidepoint(mouse_pos):\n #print(\"trails button pushed\")\n if trails_active == True:\n trails_active = False\n surface.fill(BLACK)\n else:\n trails_active = True\n if exit_button.collidepoint(mouse_pos):\n pygame.quit()\n sys.exit()\n if reset_button.collidepoint(mouse_pos):\n for body in bodies:\n body.randomize_position()\n surface.fill(BLACK)\n\n pygame.display.update()\n time.sleep(0.05)" ]
[ [ "numpy.finfo", "numpy.array", "numpy.linalg.norm", "numpy.add" ] ]
martinlarsalbert/rolldecay-estimators
[ "4d70da6058720ecbcecba3ed94c40f287a115e05", "4d70da6058720ecbcecba3ed94c40f287a115e05" ]
[ "rolldecayestimators/measure.py", "rolldecayestimators/ikeda_estimator.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom rolldecayestimators import lambdas\n\ndef sample_increase(X, increase=5):\n N = len(X) * increase\n t_interpolated = np.linspace(X.index[0], X.index[-1], N)\n X_interpolated = pd.DataFrame(index=t_interpolated)\n\n for key, values in X.items():\n X_interpolated[key] = np.interp(t_interpolated, values.index, values)\n\n return X_interpolated\n\ndef get_peaks(X:pd.DataFrame, key='phi1d')->pd.DataFrame:\n \"\"\"\n Find the peaks in the signal by finding zero roll angle velocity\n\n Parameters\n ----------\n X\n DataFrame with roll signal as \"phi\"\n key = 'phi1d'\n\n Returns\n -------\n Dataframe with rows from X where phi1d is close to 0.\n\n \"\"\"\n\n phi1d = np.array(X[key])\n\n index = np.arange(0, len(X.index))\n index_later = np.roll(index, shift=-1)\n index_later[-1] = index[-1]\n mask = (\n ((phi1d[index] > 0) &\n (phi1d[index_later] < 0)) |\n ((phi1d[index] < 0) &\n (phi1d[index_later] > 0))\n )\n\n index_first = index[mask]\n index_second = index[mask] + 1\n\n # y = m + k*x\n # k = (y2-y1)/(x2-x1)\n # m = y1 - k*x1\n # y = 0 --> x = -m/k\n X_1 = X.iloc[index_first].copy()\n X_2 = X.iloc[index_second].copy()\n rows, cols = X_1.shape\n\n x1 = np.array(X_1.index)\n x2 = np.array(X_2.index)\n y1 = np.array(X_1['phi1d'])\n y2 = np.array(X_2['phi1d'])\n k = (y2 - y1) / (x2 - x1)\n m = y1 - k * x1\n x = -m / k\n\n X_1 = np.array(X_1)\n X_2 = np.array(X_2)\n\n factor = (x - x1) / (x2 - x1)\n factor = np.tile(factor, [cols, 1]).T\n X_zero = X_1 + (X_2 - X_1) * factor\n\n X_zerocrossings = pd.DataFrame(data=X_zero, columns=X.columns, index=x)\n\n return X_zerocrossings\n\ndef calculate_amplitudes(X_zerocrossings):\n\n X_amplitudes = pd.DataFrame()\n for i in range(len(X_zerocrossings) - 1):\n s1 = X_zerocrossings.iloc[i]\n s2 = X_zerocrossings.iloc[i + 1]\n\n amplitude = (s2 - s1).abs()\n amplitude.name = (s1.name + s2.name)/2 # mean time\n X_amplitudes = X_amplitudes.append(amplitude)\n\n X_amplitudes['phi']/=2\n X_amplitudes['phi_a'] = X_amplitudes['phi']\n\n return X_amplitudes\n\ndef calculate_amplitudes_and_damping(X:pd.DataFrame):\n X_interpolated = sample_increase(X=X)\n X_zerocrossings = get_peaks(X=X_interpolated)\n X_amplitudes = calculate_amplitudes(X_zerocrossings=X_zerocrossings)\n X_amplitudes = calculate_damping(X_amplitudes=X_amplitudes)\n T0 = 2*X_amplitudes.index\n X_amplitudes['omega0'] = 2 * np.pi/np.gradient(T0)\n #X_amplitudes['time'] = np.cumsum(X_amplitudes.index)\n return X_amplitudes\n\ndef calculate_damping(X_amplitudes):\n\n df_decrements = pd.DataFrame()\n\n for i in range(len(X_amplitudes) - 2):\n s1 = X_amplitudes.iloc[i]\n s2 = X_amplitudes.iloc[i + 2]\n\n decrement = s1 / s2\n decrement.name = s1.name\n df_decrements = df_decrements.append(decrement)\n\n df_decrements['zeta_n'] = 1 / (2 * np.pi) * np.log(df_decrements['phi'])\n\n X_amplitudes_new = X_amplitudes.copy()\n X_amplitudes_new = X_amplitudes_new.iloc[0:-1].copy()\n X_amplitudes_new['zeta_n'] = df_decrements['zeta_n'].copy()\n X_amplitudes_new['B_n'] = 2*X_amplitudes_new['zeta_n'] # [Nm*s]\n\n return X_amplitudes_new\n\n\ndef fft_omega0(frequencies, dft):\n\n index = np.argmax(dft)\n natural_frequency = frequencies[index]\n omega0 = 2 * np.pi * natural_frequency\n return omega0\n\ndef fft(series):\n \"\"\"\n FFT of a series\n Parameters\n ----------\n series\n\n Returns\n -------\n\n \"\"\"\n\n signal = series.values\n time = series.index\n\n dt = np.mean(np.diff(time))\n #n = 11*len(time)\n n = 50000\n frequencies = np.fft.rfftfreq(n=n, d=dt) # [Hz]\n\n dft = np.abs(np.fft.rfft(signal, n=n))\n\n return frequencies, dft\n\n\ndef linearized_matrix(df_rolldecay, df_ikeda, phi_as = np.deg2rad(np.linspace(1,10,10)), g=9.81, rho=1000,\n do_hatify=True, suffixes=('','_ikeda')):\n \"\"\"\n Calculate B_e equivalent linearized damping for a range of roll amplitudes for both model tests and simplified ikeda.\n\n Parameters\n ----------\n df_rolldecay\n df_ikeda\n phi_as\n\n Returns\n -------\n\n \"\"\"\n\n\n df = pd.DataFrame()\n\n for phi_a in phi_as:\n df_ = linearize(phi_a=phi_a, df_rolldecay=df_rolldecay, df_ikeda=df_ikeda, g=g, rho=rho, do_hatify=do_hatify,\n suffixes=suffixes)\n df_['phi_a']=phi_a\n df =df.append(df_, ignore_index=True)\n\n return df\n\n\ndef linearize_si(phi_a, df_ikeda, components = ['B_44', 'B_F', 'B_W', 'B_E', 'B_BK', 'B_L'], do_hatify=True):\n \"\"\"\n Calculate the equivalent linearized damping B_e\n\n Parameters\n ----------\n phi_a\n df_ikeda\n g\n rho\n\n Returns\n -------\n\n \"\"\"\n\n df_ikeda = df_ikeda.copy()\n\n for component in components:\n new_key = '%s_e' % component\n B1_key = '%s_1' % component\n B2_key = '%s_2' % component\n\n df_ikeda[new_key] = lambdas.B_e_lambda(B_1=df_ikeda[B1_key],\n B_2=df_ikeda[B2_key],\n omega0=df_ikeda['omega0'],\n phi_a=phi_a)\n\n if do_hatify:\n df_ikeda['B_e'] = df_ikeda['B_44_e']\n else:\n df_ikeda['B_e_hat'] = df_ikeda['B_44_hat_e']\n\n return df_ikeda\n\ndef hatify(df_ikeda, g=9.81, rho=1000, components = ['B','B_44', 'B_F', 'B_W', 'B_E', 'B_BK', 'B_L']):\n\n df_ikeda=df_ikeda.copy()\n new_keys = ['%s_e' % key for key in components]\n new_hat_keys = ['%s_e_hat' % key for key in components]\n\n Disp = np.tile(df_ikeda['Disp'],[len(components),1]).T\n beam = np.tile(df_ikeda['b'],[len(components),1]).T\n\n df_ikeda[new_hat_keys] = lambdas.B_e_hat_lambda(B_e=df_ikeda[new_keys],\n Disp=Disp,\n beam=beam,\n g=g, rho=rho)\n\n df_ikeda['B_e_hat'] = df_ikeda['B_44_e_hat']\n\n return df_ikeda\n\n\ndef linearize_model_test(phi_a, df_rolldecay, g=9.81, rho=1000):\n \"\"\"\n Calculate the equivalent linearized damping B_e\n\n Parameters\n ----------\n phi_a\n df_rolldecay\n g\n rho\n\n Returns\n -------\n\n \"\"\"\n\n df_rolldecay = df_rolldecay.copy()\n\n df_rolldecay['B_e'] = lambdas.B_e_lambda(B_1=df_rolldecay['B_1'],\n B_2=df_rolldecay['B_2'],\n omega0=df_rolldecay['omega0'],\n phi_a=phi_a)\n\n df_rolldecay['B_e_hat'] = lambdas.B_e_hat_lambda(B_e=df_rolldecay['B_e'],\n Disp=df_rolldecay['Disp'],\n beam=df_rolldecay['b'],\n g=g, rho=rho)\n\n return df_rolldecay\n\ndef linearize(phi_a:float, df_rolldecay:pd.DataFrame, df_ikeda:pd.DataFrame, g=9.81, rho=1000,\n components = ['B_44', 'B_F', 'B_W', 'B_E', 'B_BK', 'B_L'], do_hatify=True, suffixes=('','_ikeda')):\n\n if not do_hatify:\n components = ['%s_hat' % key for key in components]\n\n df_rolldecay = linearize_model_test(phi_a=phi_a, df_rolldecay=df_rolldecay, g=g, rho=rho)\n df_ikeda = linearize_si(phi_a=phi_a, df_ikeda=df_ikeda, components=components, do_hatify=do_hatify)\n\n if do_hatify:\n df_ikeda = hatify(df_ikeda=df_ikeda,g=g, rho=rho, components=components)\n\n\n df_compare = pd.merge(left=df_rolldecay, right=df_ikeda, how='inner', left_index=True, right_index=True,\n suffixes=suffixes)\n return df_compare\n", "import pandas as pd\nimport numpy as np\nimport sympy as sp\nimport matplotlib.pyplot as plt\nfrom rolldecayestimators.estimator import RollDecay\nfrom rolldecayestimators.direct_estimator import DirectEstimator\n\nfrom rolldecayestimators.simplified_ikeda import calculate_roll_damping\nfrom rolldecayestimators import equations\nfrom rolldecayestimators import symbols\nfrom rolldecayestimators.substitute_dynamic_symbols import lambdify\nfrom rolldecayestimators.sensitivity import variate_ship, plot_variation, calculate, calculate_variation, _plot_result\n\n\nfrom scipy.optimize import curve_fit\n\nclass IkedaEstimatorFitError(ValueError): pass\n\nclass IkedaEstimator(DirectEstimator):\n\n eqs = [equations.zeta_equation, # 0\n equations.omega0_equation_linear] # 1\n functions_ikeda = [lambdify(sp.solve(eqs, symbols.A_44, symbols.zeta)[0][1]),\n lambdify(sp.solve(equations.B44_equation, symbols.B_44)[0]),\n ]\n\n def __init__(self, lpp:float, TA, TF, beam, BKL, BKB, A0, kg, Volume, gm, V, rho=1000, g=9.81, phi_max=8, omega0=None,\n verify_input = True, limit_inputs=False, **kwargs):\n \"\"\"\n Estimate a roll decay test using the Simplified Ikeda Method to predict roll damping.\n NOTE! This method is currently only valid for zero speed!\n\n Parameters\n ----------\n lpp\n Ship perpendicular length [m]\n TA\n Draught aft [m]\n TF\n Draught forward [m]\n beam\n Ship b [m]\n BKL\n Bilge keel length [m]\n BKB\n Bilge keel height [m]\n A0\n Middship coefficient (A_m/(B*d) [-]\n kg\n Vertical centre of gravity [m]\n Volume\n Displacement of ship [m3]\n gm\n metacentric height [m]\n V\n ship speed [m/s]\n rho\n Density of water [kg/m3]\n g\n acceleration of gravity [m/s**2]\n phi_max\n max roll angle during test [deg]\n omega0\n Natural frequency of motion [rad/s], if None it will be calculated with fft of signal\n\n For more info see: \"rolldecaysestimators/simplified_ikeda.py\"\n \"\"\"\n super().__init__(omega0=omega0)\n\n self.lpp=lpp\n self.TA=TA\n self.TF=TF\n self.beam=beam\n self.BKL=BKL\n self.BKB=BKB\n self.A0=A0\n self.kg=kg\n self.Volume=Volume\n self.V = V\n self.rho=rho\n self.g=g\n self.gm=gm\n self.phi_max=phi_max\n self.two_point_regression=True\n self.verify_input = verify_input\n self.limit_inputs = limit_inputs\n\n @property\n def zeta_lambda(self):\n return self.functions_ikeda[0]\n\n @property\n def B44_lambda(self):\n return self.functions_ikeda[1]\n\n #def simulate(self, t :np.ndarray, phi0 :float, phi1d0 :float,omega0:float, zeta:float)->pd.DataFrame:\n # \"\"\"\n # Simulate a roll decay test using the quadratic method.\n # :param t: time vector to be simulated [s]\n # :param phi0: initial roll angle [rad]\n # :param phi1d0: initial roll speed [rad/s]\n # :param omega0: roll natural frequency[rad/s]\n # :param zeta:linear roll damping [-]\n # :return: pandas data frame with time series of 'phi' and 'phi1d'\n # \"\"\"\n # parameters={\n # 'omega0':omega0,\n # 'zeta':zeta,\n # }\n # return self._simulate(t=t, phi0=phi0, phi1d0=phi1d0, parameters=parameters)\n\n def fit(self, X, y=None, **kwargs):\n self.X = X\n\n self.phi_max = np.rad2deg(self.X[self.phi_key].abs().max()) ## Initial roll angle in [deg]\n\n DRAFT=(self.TA + self.TF) / 2\n omega0=self.omega0\n\n if (self.lpp*self.beam*DRAFT > 0):\n CB = self.Volume / (self.lpp*self.beam*DRAFT)\n else:\n raise IkedaEstimatorFitError('lpp, b or DRAFT is zero or nan!')\n\n self.ikeda_parameters = {\n\n 'LPP' : self.lpp,\n 'Beam' : self.beam,\n 'DRAFT' : DRAFT,\n\n 'PHI' : self.phi_max,\n 'BKL' : self.BKL,\n 'BKB' : self.BKB,\n 'OMEGA' : omega0,\n 'OG' : (-self.kg + DRAFT),\n 'CB' : CB,\n 'CMID' : self.A0,\n 'V':self.V,\n 'verify_input':self.verify_input,\n 'limit_inputs':self.limit_inputs,\n\n }\n\n self.result=self.calculate()\n\n m = self.Volume * self.rho\n B_44 = self.B44_lambda(B_44_hat=self.result.B_44_HAT, Disp=self.Volume, b=self.beam, g=self.g, rho=self.rho)\n zeta = self.zeta_lambda(B_1=B_44, GM=self.gm, g=self.g, m=m, omega0=omega0)\n self.parameters={\n 'zeta':zeta,\n 'omega0':omega0,\n 'd':0,\n }\n\n self.is_fitted_ = True\n\n\n def calculate(self,**kwargs):\n\n B44HAT, BFHAT, BWHAT, BEHAT, BBKHAT, BLHAT = calculate_roll_damping(**self.ikeda_parameters, **kwargs)\n s = pd.Series()\n s['B_44_HAT'] = B44HAT\n s['B_F_HAT'] = BFHAT\n s['B_W_HAT'] = BWHAT\n s['B_E_HAT'] = BEHAT\n s['B_BK_HAT'] = BBKHAT\n s['B_L_HAT'] = BLHAT\n\n return s\n\n def result_for_database(self, score=True, **kwargs):\n\n s = super().result_for_database(score=score, **kwargs)\n s.update(self.result)\n\n return s\n\nclass IkedaQuadraticEstimator(IkedaEstimator):\n\n functions_ikeda = IkedaEstimator.functions_ikeda\n functions_ikeda.append(lambdify(sp.solve(equations.B_e_equation, symbols.B_e)[0])) # 2\n functions_ikeda.append(lambdify(sp.solve(equations.zeta_B1_equation, symbols.zeta)[0])) # 3\n functions_ikeda.append(lambdify(sp.solve(equations.d_B2_equation, symbols.d)[0])) # 4\n\n @property\n def B_e_lambda(self):\n return self.functions_ikeda[2]\n\n @property\n def zeta_B1_lambda(self):\n return self.functions_ikeda[3]\n\n @property\n def d_B2_lambda(self):\n return self.functions_ikeda[4]\n\n def fit(self, X=None, y=None, **kwargs):\n self.X = X\n\n if not self.X is None:\n self.phi_max = np.rad2deg(self.X[self.phi_key].abs().max()) ## Initial roll angle in [deg]\n\n\n DRAFT=(self.TA + self.TF) / 2\n omega0=self.omega0\n\n if (self.lpp*self.beam*DRAFT > 0):\n CB = self.Volume / (self.lpp*self.beam*DRAFT)\n else:\n raise IkedaEstimatorFitError('lpp, b or DRAFT is zero or nan!')\n\n self.ikeda_parameters = {\n\n 'LPP' : self.lpp,\n 'Beam' : self.beam,\n 'DRAFT' : DRAFT,\n\n 'PHI' : self.phi_max,\n 'BKL' : self.BKL,\n 'BKB' : self.BKB,\n 'OMEGA' : omega0,\n 'OG' : (-self.kg + DRAFT),\n 'CB' : CB,\n 'V' : self.V,\n 'CMID' : self.A0,\n 'verify_input': self.verify_input,\n 'limit_inputs': self.limit_inputs,\n }\n\n #self.result=self.calculate(**kwargs)\n self.result = {}\n\n self.result_variation=self.calculate_phi_a_variation()\n\n if self.two_point_regression:\n B_1_,B_2_ = self.calculate_two_point_regression(**kwargs)\n self.result['B_1'] = B_1 = B_1_['B_44']\n self.result['B_2'] = B_2 = B_2_['B_44']\n\n B_1_, B_2_ = self.fit_Bs() # Not used...\n else:\n B_1, B_2 = self.fit_Bs()\n self.result['B_1'] = B_1\n self.result['B_2'] = B_2\n\n zeta, d = self.Bs_to_zeta_d(B_1=B_1, B_2=B_2)\n factor = 1.0 # Factor\n phi_a = np.abs(np.deg2rad(self.phi_max))/ factor # [Radians]\n self.result['B_e'] = self.B_e_lambda(B_1=B_1, B_2=B_2, omega0=self.ikeda_parameters['OMEGA'],phi_a=phi_a)\n\n self.parameters={\n 'zeta':zeta,\n 'd':d,\n 'omega0':omega0,\n }\n\n self.is_fitted_ = True\n\n def calculate_two_point_regression(self, **kwargs):\n # Two point regression:\n data = {\n 'lpp': self.lpp,\n 'b': self.beam,\n 'DRAFT': (self.TA + self.TF) / 2,\n 'phi_max': self.phi_max,\n 'BKL': self.BKL,\n 'BKB': self.BKB,\n 'omega0': self.ikeda_parameters['OMEGA'],\n 'kg': self.kg,\n 'CB': self.ikeda_parameters['CB'],\n 'A0': self.A0,\n 'V': self.V,\n 'Volume': self.Volume,\n }\n row1 = pd.Series(data)\n row1.phi_max *= 0.5\n row2 = pd.Series(data)\n s1_hat = calculate(row1, verify_input=self.verify_input, limit_inputs=self.limit_inputs, **kwargs)\n s2_hat = calculate(row2, verify_input=self.verify_input, limit_inputs=self.limit_inputs, **kwargs)\n\n s1_hat = self.B44_lambda(B_44_hat=s1_hat, Disp=row1.Volume, b=row1.b, g=self.g, rho=self.rho)\n s2_hat = self.B44_lambda(B_44_hat=s2_hat, Disp=row2.Volume, b=row2.b, g=self.g, rho=self.rho)\n\n s1=pd.Series()\n s2=pd.Series()\n for key,value in s1_hat.items():\n new_key = key.replace('_hat','')\n s1[new_key]=s1_hat[key]\n s2[new_key] = s2_hat[key]\n\n x = np.deg2rad([row1.phi_max, row2.phi_max]) * 8 * row1.omega0 / (3 * np.pi)\n B_2 = (s2 - s1) / (x[1] - x[0])\n B_1 = s1 - B_2 * x[0]\n\n # Save all of the component as one linear term: _1 and a quadratic term: _2\n for key in s1.index:\n new_name_1 = '%s_1' % key\n self.result[new_name_1] = s1[key]\n\n new_name_2 = '%s_2' % key\n self.result[new_name_2] = s2[key]\n\n return B_1,B_2\n\n def calculate_phi_a_variation(self):\n\n data={\n 'lpp':self.lpp,\n 'b' : self.beam,\n 'DRAFT' : (self.TA+self.TF)/2,\n 'phi_max' : self.phi_max,\n 'BKL' : self.BKL,\n 'BKB': self.BKB,\n 'omega0' : self.ikeda_parameters['OMEGA'],\n 'kg' :self.kg,\n 'CB':self.ikeda_parameters['CB'],\n 'A0' : self.A0,\n 'V' : self.V,\n 'Volume':self.Volume,\n }\n self.ship = ship = pd.Series(data)\n\n N = 40\n changes = np.linspace(1, 0.0001, N)\n df_variation = variate_ship(ship=ship, key='phi_max', changes=changes)\n result = calculate_variation(df=df_variation, limit_inputs=self.limit_inputs, verify_input=self.verify_input)\n df_variation['g'] = 9.81\n df_variation['rho'] = 1000\n result = pd.concat((result, df_variation), axis=1)\n\n result['B_44'] = self.B44_lambda(B_44_hat=result.B_44_hat, Disp=ship.Volume, b=ship.b, g=result.g, rho=result.rho)\n result.dropna(inplace=True)\n return result\n\n def fit_Bs(self):\n\n def fit(df, B_1, B_2):\n omega0 = df['omega0']\n phi_a = np.deg2rad(df['phi_max']) # Deg or rad (Radians gave better results actually)???\n #phi_a = df['phi_max'] # Deg or rad???\n return self.B_e_lambda(B_1, B_2, omega0, phi_a)\n\n coeffs, _ = curve_fit(f=fit, xdata=self.result_variation, ydata=self.result_variation['B_44'])\n B_1 = coeffs[0]\n B_2 = coeffs[1]\n self.result_variation['B_44_fit'] = fit(self.result_variation, *coeffs)\n return B_1,B_2\n\n def Bs_to_zeta_d(self, B_1, B_2):\n m = self.Volume*self.rho\n zeta = self.zeta_B1_lambda(B_1=B_1, GM=self.gm, g=self.g, m=m, omega0=self.ikeda_parameters['OMEGA'])\n d = self.d_B2_lambda(B_2=B_2, GM=self.gm, g=self.g, m=m, omega0=self.ikeda_parameters['OMEGA'])\n return zeta,d\n\n def plot_variation(self,ax=None):\n\n if ax is None:\n fig,ax=plt.subplots()\n\n self.result_variation.plot(y = ['B_44_hat'], ax=ax)\n\n def plot_B_fit(self,ax=None):\n\n if ax is None:\n fig,ax=plt.subplots()\n\n self.result_variation.plot(y='B_44', ax=ax)\n self.result_variation.plot(y='B_44_fit', ax=ax, style='--')\n\n @classmethod\n def load(cls, data: {}, X=None):\n \"\"\"\n Load data and parameters from an existing fitted estimator\n\n Parameters\n ----------\n data : dict\n Dict containing data for this estimator such as parameters\n X : pd.DataFrame\n DataFrame containing the measurement that this estimator fits (optional).\n Returns\n -------\n estimator\n Loaded with parameters from data and maybe also a loaded measurement X\n \"\"\"\n estimator = cls(**data)\n estimator.load_data(data=data)\n estimator.load_X(X=X)\n return estimator\n\n\n" ]
[ [ "numpy.array", "numpy.fft.rfft", "pandas.merge", "numpy.log", "pandas.DataFrame", "numpy.roll", "numpy.fft.rfftfreq", "numpy.interp", "numpy.tile", "numpy.diff", "numpy.argmax", "numpy.linspace", "numpy.gradient" ], [ "scipy.optimize.curve_fit", "matplotlib.pyplot.subplots", "pandas.concat", "numpy.deg2rad", "numpy.linspace", "pandas.Series" ] ]
khuship-wmt/incubator-mxnet
[ "dfc47be76d97640d9209f601a6c89809eeafae96" ]
[ "python/mxnet/numpy/multiarray.py" ]
[ "#!/usr/bin/env python\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=too-many-lines, unused-argument\n\"\"\"numpy ndarray and util functions.\"\"\"\n\n\ntry:\n from __builtin__ import all as py_all\n from __builtin__ import slice as py_slice\nexcept ImportError:\n from builtins import all as py_all\n from builtins import slice as py_slice\n\nfrom array import array as native_array\nimport functools\nimport ctypes\nimport sys\nimport datetime\nimport warnings\nimport numpy as _np\nfrom .. import _deferred_compute as dc\nfrom ..autograd import is_recording\nfrom ..ndarray import NDArray, _DTYPE_NP_TO_MX, _GRAD_REQ_MAP\nfrom ..ndarray import indexing_key_expand_implicit_axes, get_indexing_dispatch_code,\\\n get_oshape_of_gather_nd_op\nfrom ..ndarray._internal import _set_np_ndarray_class\nfrom . import _op as _mx_np_op\nfrom ..base import check_call, _LIB, NDArrayHandle, c_array, mx_int, mx_int64\nfrom ..base import mx_real_t, c_array_buf, mx_uint, numeric_types, integer_types\nfrom ..runtime import Features\nfrom ..device import Device\nfrom ..util import set_module, wrap_np_unary_func, wrap_np_binary_func,\\\n is_np_default_dtype, wrap_ctx_to_device_func,\\\n dtype_from_number, wrap_data_api_statical_func,\\\n wrap_sort_functions\nfrom ..device import current_device\nfrom ..ndarray import numpy as _mx_nd_np\nfrom ..ndarray.numpy import _internal as _npi\nfrom ..ndarray.ndarray import _storage_type\nfrom ..dlpack import ndarray_from_numpy, ndarray_to_dlpack_for_write, DLDeviceType,\\\n ndarray_from_dlpack\nfrom .utils import _get_np_op\nfrom .fallback import * # pylint: disable=wildcard-import,unused-wildcard-import\nfrom . import fallback\n\n\n__all__ = ['ndarray', 'empty', 'empty_like', 'array', 'shape', 'median',\n 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'all', 'any', 'broadcast_to',\n 'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'fmod', 'pow', 'power', 'bitwise_not',\n 'delete', 'trace', 'transpose', 'copy', 'moveaxis', 'reshape', 'dot',\n 'arctan2', 'atan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'bitwise_invert', 'invert',\n 'sqrt', 'cbrt', 'abs', 'absolute', 'fabs', 'exp', 'expm1', 'arcsin', 'asin', 'arccos', 'acos', 'arctan',\n 'atan', 'sign', 'log', 'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square',\n 'negative', 'histogram', 'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'asinh',\n 'arccosh', 'acosh', 'arctanh', 'atanh', 'append', 'argsort', 'sort', 'tensordot', 'eye', 'linspace',\n 'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit',\n 'dsplit', 'flatnonzero', 'tril_indices', 'concatenate', 'concat', 'stack', 'vstack', 'row_stack',\n 'column_stack', 'hstack', 'dstack', 'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin',\n 'amax', 'amin', 'max', 'min', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'insert',\n 'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman',\n 'logical_and', 'logical_or', 'logical_xor',\n 'flip', 'flipud', 'fliplr', 'around', 'round', 'round_', 'arctan2', 'hypot',\n 'triu_indices_from', 'triu_indices', 'tri',\n 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad',\n 'unique', 'lcm', 'gcd', 'tril', 'triu', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',\n 'cross', 'kron', 'equal', 'not_equal', 'interp',\n 'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero',\n 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'matmul',\n 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'polyval', 'where', 'bincount',\n 'atleast_1d', 'atleast_2d', 'atleast_3d', 'fill_diagonal', 'squeeze',\n 'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'rollaxis', 'diag', 'diagonal',\n 'positive', 'logaddexp', 'floor_divide', 'permute_dims', 'bitwise_left_shift', 'bitwise_right_shift',\n 'asarray', 'from_dlpack']\n\n__all__ += fallback.__all__\n\n# Return code for dispatching indexing function call\n_NDARRAY_UNSUPPORTED_INDEXING = -1\n_NDARRAY_BASIC_INDEXING = 0\n_NDARRAY_ADVANCED_INDEXING = 1\n_NDARRAY_EMPTY_TUPLE_INDEXING = 2\n\n# Return code for 0-d boolean array handler\n_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1\n_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0\n_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1\n_SIGNED_INT32_UPPER_LIMIT = (2**31 - 1)\n\n# Caching whether MXNet was built with INT64 support or not\n_INT64_TENSOR_SIZE_ENABLED = None\n\ndef _int64_enabled():\n global _INT64_TENSOR_SIZE_ENABLED\n if _INT64_TENSOR_SIZE_ENABLED is None:\n _INT64_TENSOR_SIZE_ENABLED = Features().is_enabled('INT64_TENSOR_SIZE')\n return _INT64_TENSOR_SIZE_ENABLED\n\n# This function is copied from ndarray.py since pylint\n# keeps giving false alarm error of undefined-all-variable\ndef _new_alloc_handle(shape, device, delay_alloc, dtype=mx_real_t): # pylint: disable=redefined-outer-name\n \"\"\"Return a new handle with specified shape and device.\n\n Empty handle is only used to hold results.\n\n Returns\n -------\n handle\n A new empty `ndarray` handle.\n \"\"\"\n hdl = NDArrayHandle()\n if _int64_enabled():\n check_call(_LIB.MXNDArrayCreate64(\n c_array_buf(mx_int64, native_array('q', shape)),\n ctypes.c_int(len(shape)),\n ctypes.c_int(device.device_typeid),\n ctypes.c_int(device.device_id),\n ctypes.c_int(int(delay_alloc)),\n ctypes.c_int(int(_DTYPE_NP_TO_MX[_np.dtype(dtype).type])),\n ctypes.byref(hdl)))\n else:\n # When shape is larger than uint32 then there is an overflow error at python end itself.\n # It needs to be caught here since the call doesn't even reach backend.\n array_size = 1\n for idx in shape:\n array_size = array_size * idx\n if array_size > _SIGNED_INT32_UPPER_LIMIT:\n raise Exception(\"[_new_alloc_handle] Size of tensor you are trying to allocate is \" +\n \"larger than 2^31 elements. Please build with flag \" +\n \"USE_INT64_TENSOR_SIZE=1\")\n if _np.dtype(dtype) == _np.dtype([('bfloat16', _np.uint16)]):\n dtype_type = _np.dtype(dtype)\n else:\n dtype_type = _np.dtype(dtype).type\n check_call(_LIB.MXNDArrayCreate(\n c_array_buf(mx_uint, native_array('I', shape)),\n mx_uint(len(shape)),\n ctypes.c_int(device.device_typeid),\n ctypes.c_int(device.device_id),\n ctypes.c_int(int(delay_alloc)),\n ctypes.c_int(int(_DTYPE_NP_TO_MX[dtype_type])),\n ctypes.byref(hdl)))\n return hdl\n\n\ndef _reshape_view(a, *shape): # pylint: disable=redefined-outer-name\n \"\"\"Returns a **view** of this array with a new shape without altering any data.\n\n Parameters\n ----------\n shape : tuple of int, or n ints\n The new shape should not change the array size, namely\n ``np.prod(new_shape)`` should be equal to ``np.prod(a.shape)``.\n Some dimensions of the shape can take special value -1, which\n infers the dimension of the output shape by using the remainder of the\n input dimensions keeping the size of the new array same as that of the input array.\n At most one dimension of shape can be -1.\n\n Returns\n -------\n ndarray\n An array with desired shape that shares data with this array.\n \"\"\"\n if len(shape) == 1 and isinstance(shape[0], (list, tuple)):\n shape = shape[0]\n handle = NDArrayHandle()\n check_call(_LIB.MXNDArrayReshape64(a.handle,\n len(shape),\n c_array(ctypes.c_int64, shape),\n False,\n ctypes.byref(handle)))\n return ndarray(handle=handle, writable=a.writable)\n\ndef _as_mx_np_array(object, device=None, zero_copy=False):\n \"\"\"Convert arrays or any array member of container to mxnet.numpy.ndarray on device.\"\"\"\n if object is None or isinstance(object, ndarray):\n return object\n elif isinstance(object, _np.ndarray):\n from_numpy = ndarray_from_numpy(ndarray, array)\n return from_numpy(object, zero_copy and object.flags['C_CONTIGUOUS'])\n elif isinstance(object, (integer_types, numeric_types)):\n return object\n elif isinstance(object, (_np.bool_, _np.bool)):\n return array(object, dtype=_np.bool_, device=device)\n elif isinstance(object, (list, tuple)):\n tmp = [_as_mx_np_array(arr, device=device, zero_copy=zero_copy) for arr in object]\n return object.__class__(tmp)\n else:\n raise TypeError('Does not support converting {} to mx.np.ndarray.'.format(str(type(object))))\n\n\ndef _as_onp_array(object, cur_device=None):\n \"\"\"Convert object to numpy.ndarray.\"\"\"\n def _update_device(cur_device, tmp_device):\n if cur_device is None:\n cur_device = tmp_device\n elif tmp_device is not None and cur_device != tmp_device:\n raise ValueError('Ambiguous to set the device for the output ndarray since' # pylint: disable=too-few-format-args\n ' input ndarrays are allocated on different devices: {} and {}'\n .format(str(cur_device, tmp_device)))\n return cur_device\n\n if isinstance(object, ndarray):\n return object.asnumpy(), object.device\n elif isinstance(object, (list, tuple)):\n tmp = []\n for arr in object:\n arr, tmp_device = _as_onp_array(arr, cur_device)\n tmp.append(arr)\n cur_device = _update_device(cur_device, tmp_device)\n return object.__class__(tmp), cur_device\n elif isinstance(object, dict):\n tmp = dict()\n for key, value in object.items():\n value, tmp_device = _as_onp_array(value, cur_device)\n tmp[key] = value\n cur_device = _update_device(cur_device, tmp_device)\n return object.__class__(tmp), cur_device\n else:\n return object, cur_device\n\n\n# Have to use 0 as default value for stype since pylint does not allow\n# importing _STORAGE_TYPE_DEFAULT from ndarray.py.\ndef _np_ndarray_cls(handle, writable=True, stype=0):\n if stype == -1:\n stype = _storage_type(handle)\n if stype != 0:\n raise ValueError('_np_ndarray_cls currently only supports default storage '\n 'type, while received stype = {}'.format(stype))\n return ndarray(handle, writable=writable)\n\n\n_set_np_ndarray_class(_np_ndarray_cls)\n\n_NUMPY_ARRAY_FUNCTION_DICT = {}\n_NUMPY_ARRAY_UFUNC_DICT = {}\n_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD = {}\n_FALLBACK_ARRAY_UFUNC_WARNED_RECORD = {}\n\ndef wrap_mxnp_np_ufunc(func):\n \"\"\"\n A convenience decorator for wrapping for python overload-able ops to provide type\n casting for mixed use of mx_np and onp inputs.\n\n Parameters\n ----------\n func : a python overload-able binary function to be wrapped for type casting.\n\n Returns\n -------\n Function\n A function wrapped with type casted.\n \"\"\"\n @functools.wraps(func)\n def _wrap_mxnp_np_ufunc(x1, x2):\n if isinstance(x2, _np.ndarray):\n x2 = _as_mx_np_array(x2, device=x1.device)\n return func(x1, x2)\n return _wrap_mxnp_np_ufunc\n\n@set_module('mxnet.numpy')\nclass ndarray(NDArray): # pylint: disable=invalid-name\n \"\"\"\n ndarray(handle, writable=True):\n\n An array object represents a multidimensional, homogeneous array of fixed-size items.\n An associated data-type object describes the format of each element in the array\n (its byte-order, how many bytes it occupies in memory, whether it is an integer, a\n floating point number, or something else, etc.). Arrays should be constructed using\n `array`, `zeros` or `empty`. Currently, only c-contiguous arrays are supported.\n\n Arrays should be constructed using `array`, `zeros` or `empty` (refer\n to the See Also section below). The parameters given here refer to\n a low-level method (`ndarray(...)`) for instantiating an array.\n\n For more information, refer to the `mxnet.numpy` module and examine the\n methods and attributes of an array.\n\n Parameters\n ----------\n handle: int\n The ndarray handle in backend (C++).\n writable: bool\n Indicates whether inplace-assignment is allowed for the array.\n\n Attributes\n ----------\n T : ndarray\n Transpose of the array.\n dtype : dtype object\n Describes the format of the elements in the array.\n size : int\n Number of elements in the array.\n ndim : int\n The array's number of dimensions.\n shape : tuple of ints\n Shape of the array.\n\n See Also\n --------\n array : Construct an array.\n zeros : Create an array, each element of which is zero.\n empty : Create an array, but leave its allocated memory unchanged (i.e.,\n it contains \"garbage\").\n \"\"\"\n\n @staticmethod\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # pylint: disable=bad-staticmethod-argument\n \"\"\"\n Dispatch official NumPy unary/binary operator calls on mxnet.numpy.ndarray\n to this function. The operators must comply with the ufunc definition in NumPy.\n The following code is adapted from CuPy.\n Casting rules for operator with mx_np and onp (inplace op will keep its type)\n | Expression | a type | b type | out type|\n | --- | --- | --- | --- |\n | `a += b` | onp | mx_np | onp |\n | `a += b` | mx_np | onp | mx_np |\n | `c = a + b` | onp | mx_np | mx_np |\n | `c = a + b` | mx_np | onp | mx_np |\n \"\"\"\n ufunc_list = [\"add\", \"subtract\", \"multiply\", \"divide\", \"true_divide\", \"floor_divide\", \"power\",\n \"remainder\", \"bitwise_and\", \"bitwise_or\", \"bitwise_xor\", \"left_shift\", \"right_shift\",\n \"greater\", \"greater_equal\", \"less\", \"less_equal\", \"not_equal\", \"equal\", \"matmul\"]\n if 'out' in kwargs:\n # need to unfold tuple argument in kwargs\n out = kwargs['out']\n if len(out) != 1:\n raise ValueError('The `out` parameter must have exactly one ndarray')\n kwargs['out'] = out[0]\n\n if method == '__call__':\n name = ufunc.__name__\n mx_ufunc = _NUMPY_ARRAY_UFUNC_DICT.get(name, None)\n onp_op = _get_np_op(name)\n if mx_ufunc is None:\n # try to fallback to official NumPy op\n if is_recording():\n raise ValueError(\"Falling back to NumPy operator {} with autograd active is not supported.\"\n \"Please consider moving the operator to the outside of the autograd scope.\")\\\n .format(name)\n new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]\n if onp_op not in _FALLBACK_ARRAY_UFUNC_WARNED_RECORD:\n import logging\n logging.warning(\"np.%s is a fallback operator, \"\n \"which is actually using official numpy's implementation\", name)\n _FALLBACK_ARRAY_UFUNC_WARNED_RECORD[onp_op] = True\n out = onp_op(*new_inputs, **kwargs)\n return _as_mx_np_array(out, device=inputs[0].device)\n # ops with np mx_np\n elif name in ufunc_list and isinstance(inputs[0], _np.ndarray):\n # inplace\n if 'out' in kwargs:\n new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]\n return onp_op(*new_inputs, **kwargs)\n else:\n new_inputs = [_as_mx_np_array(arg, device=inputs[1].device)\n if isinstance(arg, _np.ndarray) else arg for arg in inputs]\n return mx_ufunc(*new_inputs, **kwargs)\n else:\n return mx_ufunc(*inputs, **kwargs)\n else:\n return NotImplemented\n\n @staticmethod\n def __array_function__(self, func, types, args, kwargs): # pylint: disable=bad-staticmethod-argument\n \"\"\"\n Dispatch official NumPy operators that comply with the array function protocol to\n this function.\n \"\"\"\n mx_np_func = _NUMPY_ARRAY_FUNCTION_DICT.get(func, None)\n func_name = func.__name__\n if mx_np_func is None:\n # try to fallback to official NumPy op\n if is_recording():\n raise ValueError(\"Falling back to NumPy operator {} with autograd active is not supported.\"\n \"Please consider moving the operator to the outside of the autograd scope.\")\\\n .format(func)\n cur_device = None\n new_args, cur_device = _as_onp_array(args, cur_device)\n new_kwargs, cur_device = _as_onp_array(kwargs, cur_device)\n if cur_device is None:\n raise ValueError('Unknown device for the input ndarrays. It is probably a bug. Please'\n ' create an issue on GitHub.')\n if func not in _FALLBACK_ARRAY_FUNCTION_WARNED_RECORD:\n import logging\n logging.warning(\"np.%s is a fallback operator, \"\n \"which is actually using official numpy's implementation.\", func_name)\n _FALLBACK_ARRAY_FUNCTION_WARNED_RECORD[func] = True\n out = func(*new_args, **new_kwargs)\n return _as_mx_np_array(out, device=cur_device)\n else:\n if py_all(issubclass(t, ndarray) for t in types):\n return mx_np_func(*args, **kwargs)\n else:\n try:\n cur_device = next(a.device for a in args if hasattr(a, 'device'))\n except StopIteration:\n cur_device = next(a.device for a in kwargs.values() if hasattr(a, 'device'))\n new_args = _as_mx_np_array(args, device=cur_device,\n zero_copy=func_name in {'may_share_memory', 'shares_memory'})\n new_kwargs = {k: _as_mx_np_array(v, cur_device) for k, v in kwargs.items()}\n return mx_np_func(*new_args, **new_kwargs)\n\n\n def __array_namespace__(self, api_version=None):\n \"\"\"\n Returns an object that has all the array API functions on it.\n\n Notes\n -----\n This is a standard API in\n https://data-apis.org/array-api/latest/API_specification/array_object.html#array-namespace-self-api-version-none.\n\n Parameters\n ----------\n self : ndarray\n The indexing key.\n api_version : Optional, string\n string representing the version of the array API specification to be returned, in `YYYY.MM` form.\n If it is None, it should return the namespace corresponding to latest version of the array API\n specification.\n \"\"\"\n if api_version is not None:\n try:\n date = datetime.datetime.strptime(api_version, '%Y.%m')\n if date.year != 2021:\n raise ValueError\n except ValueError:\n raise ValueError(f\"Unrecognized array API version: {api_version!r}\")\n return sys.modules[self.__module__]\n\n\n def __dlpack__(self, stream=None):\n \"\"\"Exports the array for consumption by from_dlpack() as a DLPack capsule.\n\n Parameters\n ----------\n stream : int, optional\n A Python integer representing a pointer to a stream (CUDA or ROCm).\n Stream is provided by the consumer to the producer to instruct the producer\n to ensure that operations can safely be performed on the array. The pointer must\n be positive integer or -1. If stream is -1, the value must be used by the consumer\n to signal \"producer must not perform any synchronization\". \n\n Returns\n -------\n capsule : PyCapsule\n A DLPack capsule for the array, containing a DLPackManagedTensor.\n \"\"\"\n if stream is not None:\n if type(stream) is not int:\n raise TypeError('The input stream must be int or None')\n if self.device.device_type != \"gpu\":\n raise ValueError('Stream {} is not supported in current device {}'\\\n .format(stream, self.device.device_type))\n if stream != -1:\n check_call(_LIB.MXPushStreamDep(self.handle, ctypes.c_int64(stream)))\n to_dlpack_write = ndarray_to_dlpack_for_write()\n return to_dlpack_write(self)\n\n\n def __dlpack_device__(self):\n \"\"\"Returns device type and device ID in DLPack format\"\"\"\n devtype_map = {'cpu': DLDeviceType.DLCPU,\n 'gpu': DLDeviceType.DLGPU,\n 'cpu_pinned': DLDeviceType.DLCPUPINNED}\n if self.device.device_type not in devtype_map:\n raise ValueError('Unkown device type {} for DLPack'.format(self.device.device_type))\n return (devtype_map[self.device.device_type], self.device.device_id)\n\n\n def _get_np_basic_indexing(self, key):\n \"\"\"\n This function indexes ``self`` with a tuple of `slice` objects only.\n \"\"\"\n key_nd = tuple(idx for idx in key if idx is not None)\n if len(key_nd) < self.ndim:\n raise RuntimeError(\n 'too few indices after normalization: expected `ndim` ({}) '\n 'but got {}. This is a bug, please report it!'\n ''.format(self.ndim, len(key_nd))\n )\n if len(key_nd) > self.ndim:\n raise IndexError(\n 'too many indices ({}) for array with {} dimensions'\n ''.format(len(key_nd), self.ndim)\n )\n\n none_axes = [ax for ax in range(len(key)) if key[ax] is None] # pylint: disable=invalid-name\n slc_key, int_axes = self._basic_indexing_key_int_to_slice(key_nd)\n new_axes = self._new_axes_after_basic_indexing(none_axes, key)\n\n # Check bounds for integer axes\n for ax in int_axes: # pylint: disable=invalid-name\n if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]:\n raise IndexError(\n 'index {} is out of bounds for axis {} with size {}'\n ''.format(key_nd[ax], ax, self.shape[ax]))\n\n if self._basic_indexing_slice_is_contiguous(slc_key, self.shape):\n # Create a shared-memory view by using low-level flat slicing\n flat_begin, flat_end = self._basic_indexing_contiguous_flat_begin_end(\n slc_key, self.shape\n )\n handle = NDArrayHandle()\n flat_self = self.reshape_view(-1)\n if _int64_enabled():\n check_call(\n _LIB.MXNDArraySlice64(\n flat_self.handle,\n ctypes.c_int64(flat_begin),\n ctypes.c_int64(flat_end),\n ctypes.byref(handle),\n )\n )\n else:\n check_call(\n _LIB.MXNDArraySlice(\n flat_self.handle,\n ctypes.c_uint32(flat_begin),\n ctypes.c_uint32(flat_end),\n ctypes.byref(handle),\n )\n )\n sliced_shape = self._basic_indexing_sliced_shape(slc_key, self.shape)\n sliced = self.__class__(handle=handle, writable=self.writable)\n if 0 in sliced_shape:\n sliced = sliced.reshape(sliced_shape)\n else:\n sliced = sliced.reshape_view(sliced_shape)\n\n else:\n begin, end, step = self._basic_indexing_key_to_begin_end_step(\n slc_key, self.shape, keep_none=True\n )\n sliced = _npi.slice(self, begin, end, step)\n\n # Reshape to final shape due to integer and `None` entries in `key`.\n final_shape = [sliced.shape[i] for i in range(sliced.ndim) if i not in int_axes]\n for ax in new_axes: # pylint: disable=invalid-name\n final_shape.insert(ax, 1)\n\n if sliced.size == 0:\n return sliced.reshape(tuple(final_shape))\n else:\n return sliced.reshape_view(tuple(final_shape))\n\n def _get_np_empty_tuple_indexing(self, key):\n new_shape = []\n num_none = 0\n for i, idx in enumerate(key):\n if idx is None:\n new_shape.append(1) # expand dimension\n num_none += 1\n elif idx == ():\n new_shape.append(0) # 0 shape\n elif idx == slice(None, None, None):\n new_shape.append(self.shape[i - num_none])\n return empty(new_shape, dtype=self.dtype)\n\n def _get_np_advanced_indexing(self, key):\n idcs, new_axes = self._get_index_nd(key)\n if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck\n idcs = idcs.as_np_ndarray()\n else:\n idcs = _mx_nd_np.stack([i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])\n sliced = _npi.gather_nd(self, idcs)\n # Reshape due to `None` entries in `key`.\n if new_axes:\n final_shape = [sliced.shape[i] for i in range(sliced.ndim)]\n for ax in new_axes: # pylint: disable=invalid-name\n final_shape.insert(ax, 1)\n return sliced.reshape(tuple(final_shape))\n else:\n return sliced\n\n def _set_np_advanced_indexing(self, key, value):\n \"\"\"This function is called by __setitem__ when key is an advanced index.\"\"\"\n idcs, new_axes = self._get_index_nd(key)\n if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck\n idcs = idcs.as_np_ndarray()\n else:\n idcs = _mx_nd_np.stack([i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])\n vshape = get_oshape_of_gather_nd_op(self.shape, idcs.shape)\n value_nd = self._prepare_value_nd(value, bcast_shape=vshape, squeeze_axes=new_axes)\n self._scatter_set_nd(value_nd, idcs)\n\n # pylint: disable=redefined-outer-name\n def _get_np_boolean_indexing(self, key, ndim, shape):\n \"\"\"\n There are two types of boolean indices (which are equivalent,\n for the most part though). This function will handle single\n boolean indexing for higher speed.\n If this is not the case, it is instead expanded into (multiple)\n integer array indices and will be handled by advanced indexing.\n \"\"\"\n key_shape = key.shape\n key_ndim = len(key_shape)\n if ndim < key_ndim:\n raise IndexError('too many indices, whose ndim = {}, for array with ndim = {}'\n .format(key_ndim, ndim))\n for i in range(key_ndim):\n if key_shape[i] != shape[i]:\n raise IndexError('boolean index did not match indexed array along dimension {};'\n ' dimension is {} but corresponding boolean dimension is {}'\n .format(i, shape[i], key_shape[i]))\n remaining_dims = shape[key_ndim:]\n data = _reshape_view(self, -1, *remaining_dims)\n key = _reshape_view(key, -1)\n if data.size == 0 and key.size == 0:\n return data\n return _reshape_view(_npi.boolean_mask(data, key), -1, *remaining_dims)\n\n def _set_np_boolean_indexing(self, key, value):\n \"\"\"\n There are two types of boolean indices (which are equivalent,\n for the most part though). This function will handle single boolean assign for higher speed.\n If this is not the case, it is instead expanded into (multiple)\n integer array indices and will be handled by advanced assign.\n \"\"\"\n if isinstance(value, numeric_types):\n _npi.boolean_mask_assign_scalar(data=self, mask=key,\n value=int(value) if isinstance(value, bool) else value,\n start_axis=0, out=self)\n elif isinstance(value, ndarray):\n _npi.boolean_mask_assign_tensor(data=self, mask=key, value=value, start_axis=0, out=self)\n else:\n raise NotImplementedError('type %s is not supported.'%(type(value)))\n\n # pylint: disable=too-many-return-statements\n def __getitem__(self, key):\n \"\"\"Return self[key].\n\n Returns a sliced view of this array if the elements fetched are contiguous in memory;\n otherwise, returns a newly created NDArray.\n This functions supports advanced indexing defined in the following reference with\n some restrictions. Boolean indexing is supported only for a single boolean ndarray\n as a key. Mixing boolean ndarray with other index types is not supported in ``advanced``\n indexing.\n\n For basic indexing, i.e., if ``key`` consists only of integers,\n ``slice``, ``Ellipsis`` (``...``) and ``None``, a mutable view is\n returned that shares memory with this array if the accessed portion is\n contiguous in memory.\n Otherwise, a newly created ``ndarray`` is returned.\n\n This functions supports advanced indexing as defined in `the NumPy\n advanced indexing documentation\n <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_.\n\n Parameters\n ----------\n key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types\n Indexing key.\n\n Examples\n --------\n The default is to give explicit indices for all axes:\n\n >>> x = np.arange(6).reshape(2, 3)\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> x[0, :2]\n array([0., 1.])\n >>> x[:, :-1]\n array([[0., 1.],\n [3., 4.]])\n\n If fewer indices are given, they are automatically supplemented by an\n appropriate number of ``slice(None)`` (\"``:``\") to the right. For\n instance, a single integer indexes along the first axis:\n\n >>> x[0]\n array([0., 1., 2.])\n >>> x[1:]\n array([[3., 4., 5.]])\n\n To omit a range of axes that should be kept as-is, an `Ellipsis`\n (\"``...``\") can be used:\n\n >>> x = np.arange(16).reshape(2, 2, 2, 2)\n >>> x[0, ..., 1]\n array([[1., 3.],\n [5., 7.]])\n >>> x[0, :, :, 1] # equivalent\n array([[1., 3.],\n [5., 7.]])\n\n New axes of length 1 can be created by inserting ``None``\n (`numpy.newaxis`) in the index:\n\n >>> x = np.arange(6).reshape(2, 3)\n >>> x[None, :, :]\n array([[[0., 1., 2.],\n [3., 4., 5.]]])\n >>> x[None, :, :].shape\n (1, 2, 3)\n\n If the indexed portion of the array is contiguous in memory, no data\n is copied. Instead, a shared-memory view of the original array is\n returned, and changes to that view affect the original array:\n\n >>> x = np.arange(8).reshape(2, 2, 2)\n >>> y = x[0] # contiguous\n >>> y\n array([[0., 1.],\n [2., 3.]])\n >>> y[:] = -1\n >>> x\n array([[[-1., -1.],\n [-1., -1.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> x = np.arange(8).reshape(2, 2, 2)\n >>> y = x[1, :1, :] # contiguous\n >>> y\n array([[4., 5.]])\n >>> y[:] = -1\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[-1., -1.],\n [ 6., 7.]]])\n >>> x = np.arange(0, 8).reshape(2, 2, 2)\n >>> y = x[:, :, 1] # not contiguous\n >>> y\n array([[1., 3.],\n [5., 7.]])\n >>> y[:] = -1\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n\n If the indexing key contains `list`, `numpy.ndarray` or `NDArray`\n objects, advanced indexing is triggered, which always returns a\n copy:\n\n >>> x = np.arange(8).reshape(2, 2, 2)\n >>> x[[0, 1]]\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> x[[0, 1], :] # equivalent\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> y = np.array([0, 1], dtype='int32')\n >>> x[1:, y]\n array([[[4., 5.],\n [6., 7.]]])\n >>> y = np.array([0, 1], dtype='int32')\n >>> x[1:, y]\n array([[[4., 5.],\n [6., 7.]]])\n\n Get negative elements in an ndarray through boolean array indexing\n >>> x = np.array([1., -1., -2., 3])\n >>> x[x < 0]\n array([-1., -2.])\n\n For more imformation related to boolean indexing, please refer to\n https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.\n \"\"\"\n ndim = self.ndim # pylint: disable=redefined-outer-name\n shape = self.shape # pylint: disable=redefined-outer-name\n if isinstance(key, bool): # otherwise will be treated as 0 and 1\n key = array(key, dtype=_np.bool, device=self.device)\n if isinstance(key, list):\n try:\n new_key = _np.array(key)\n if new_key.dtype == _np.bool_:\n key = new_key\n except Exception as err:\n raise TypeError('{}'.format(str(err)))\n if isinstance(key, _np.ndarray):\n if dc.is_deferred_compute():\n raise TypeError('Indexing with a numpy array is not supported in HybridBlock.')\n if key.dtype == _np.bool_:\n key = array(key, dtype='bool', device=self.device)\n\n # Handle single boolean index of matching dimensionality and size first for higher speed\n # If the boolean array is mixed with other idices, it is instead expanded into (multiple)\n # integer array indices and will be handled by advanced indexing.\n # Come before the check self.dim == 0 as it also handle the 0-dim case.\n if isinstance(key, ndarray) and key.dtype == _np.bool_:\n return self._get_np_boolean_indexing(key, ndim, shape)\n\n all = __builtins__['all'] # `def all` below shadows the all builtin\n if ndim == 0 and key != ():\n raise IndexError('scalar tensor can only accept `()` as index')\n # Handle simple cases for higher speed\n if isinstance(key, tuple) and len(key) == 0:\n return self\n if isinstance(key, tuple) and len(key) == ndim\\\n and py_all(isinstance(idx, integer_types) for idx in key):\n out = self\n for idx in key:\n out = out[idx]\n return out\n if isinstance(key, integer_types):\n # Equivalent to isinstance(key, integer_types) case in numpy/_symbol.py\n if key > shape[0] - 1:\n raise IndexError(\n 'index {} is out of bounds for axis 0 with size {}'.format(\n key, shape[0]))\n return self._at(key)\n elif isinstance(key, py_slice):\n # Unlike numpy/_symbol.py, calls MXNDArraySlice64 writable memory\n # sharing if key.step not in [None, 1]. Equivalent otherwise to\n # isinstance(key, py_slice) case in _symbol.py otherwise.\n if key.step is None or key.step == 1:\n if key.start is not None or key.stop is not None:\n return self._slice(key.start, key.stop)\n else:\n return self\n elif key.step != 0:\n start = [None] if key.start is None else key.start\n stop = [None] if key.stop is None else key.stop\n return _npi.slice(self, start, stop, key.step)\n else:\n raise ValueError(\"slice step cannot be zero\")\n elif isinstance(key, tuple) and \\\n all((isinstance(arr, NDArray) and _np.issubdtype(arr.dtype, _np.integer) and \\\n arr.ndim > 0) for arr in key):\n # Equivalent case in numpy/_symbol.py\n return _npi.advanced_indexing_multiple(self, _mx_nd_np.stack(key))\n elif isinstance(key, tuple) and dc.is_deferred_compute():\n # Equivalent to isinstance(key, tuple) case in numpy/_symbol.py\n # Only enabled in deferred compute mode, as this codepath prevents\n # memory sharing which may be desired in non-deferred compute\n # imperative mode.\n begin = []\n end = []\n step = []\n new_shape = ()\n assert len(key) # len(key) == 0 is handled a above\n unsupported = False\n for index in key:\n if isinstance(index, py_slice):\n if index.step is not None and index.step == 0:\n raise ValueError(\"slice step cannot be zero\")\n begin.append(index.start)\n end.append(index.stop)\n step.append(index.step)\n new_shape += (-2,)\n elif isinstance(index, integer_types):\n if index >= 0:\n begin.append(index)\n end.append(index+1)\n step.append(1)\n else:\n begin.append(index)\n end.append(index - 1)\n step.append(-1)\n new_shape += (-3,)\n else:\n unsupported = True\n break\n if not unsupported:\n new_shape += (-4,)\n sliced = _npi.slice(self, begin, end, step)\n return _mx_nd_np.reshape(sliced, new_shape)\n\n # Special handling for cases only supported in imperative mode\n if dc.is_deferred_compute():\n raise TypeError('The type of indexing used is not supported in HybridBlock.')\n # For 0-d boolean indices: A new axis is added,\n # but at the same time no axis is \"used\". So if we have True,\n # we add a new axis (a bit like with np.newaxis). If it is\n # False, we add a new axis, but this axis has 0 entries.\n # prepend is defined to handle this case.\n # prepend = _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar\n # prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded\n # prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be prepended\n key, prepend = indexing_key_expand_implicit_axes(key, self.shape)\n indexing_dispatch_code = get_indexing_dispatch_code(key)\n if indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:\n # won't be affected by zero-dim boolean indices\n return self._get_np_empty_tuple_indexing(key)\n elif indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:\n if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:\n return empty((0,) + self._get_np_basic_indexing(key).shape,\n dtype=self.dtype, device=self.device)\n if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:\n key = (_np.newaxis,) + key\n return self._get_np_basic_indexing(key)\n elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:\n if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:\n return empty((0,) + self._get_np_adanced_indexing(key).shape,\n dtype=self.dtype, device=self.device)\n if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:\n key = (_np.newaxis,) + key\n return self._get_np_advanced_indexing(key)\n else:\n raise RuntimeError\n\n # pylint: disable=inconsistent-return-statements\n def __setitem__(self, key, value):\n \"\"\"Sets ``self[key]`` to ``value``.\n\n This functions supports advanced indexing as defined in `the NumPy\n advanced indexing documentation\n <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_,\n with the restriction that boolean array indexing is not supported.\n\n Parameters\n ----------\n key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types\n The indexing key.\n value : scalar or array-like object that can be broadcast to the shape of self[key]\n The value to set.\n\n Examples\n --------\n >>> x = np.zeros((2, 3))\n >>> x[:] = 1\n >>> x\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]])\n >>> x[:, 1:2] = 2\n >>> x\n array([[ 1., 2., 1.],\n [ 1., 2., 1.]])\n >>> x[1:2, 1:] = 3\n >>> x\n array([[ 1., 2., 1.],\n [ 1., 3., 3.]])\n >>> x[1:, 0:2] = np.zeros((1, 2))\n >>> x\n array([[ 1., 2., 1.],\n [ 0., 0., 3.]])\n >>> x[1, 2] = 4\n >>> x\n array([[ 1., 2., 1.],\n [ 0., 0., 4.]])\n >>> x[[0], [1, 2]] = 5\n >>> x\n array([[ 1., 5., 5.],\n [ 0., 0., 4.]])\n >>> x[::-1, 0:2:2] = [6]\n >>> x\n array([[ 6., 5., 5.],\n [ 6., 0., 4.]])\n\n For imformation related to boolean indexing, please refer to\n https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.\n \"\"\"\n if isinstance(value, NDArray) and not isinstance(value, ndarray):\n raise TypeError('Cannot assign mx.nd.NDArray to mxnet.numpy.ndarray')\n if isinstance(key, bool): # otherwise will be treated as 0 and 1\n key = array(key, dtype=_np.bool)\n\n # Handle single boolean assign of matching dimensionality and size first for higher speed\n # If the boolean array is mixed with other idices, it is instead expanded into (multiple)\n # integer array indices and will be handled by advanced assign.\n # Come before the check self.dim == 0 as it also handle the 0-dim case.\n if isinstance(key, ndarray) and key.dtype == _np.bool:\n return self._set_np_boolean_indexing(key, value)\n\n # handle basic and advanced indexing\n if self.ndim == 0:\n if not isinstance(key, tuple) or len(key) != 0:\n raise IndexError('scalar tensor can only accept `()` as index')\n if isinstance(value, numeric_types):\n self._full(value)\n elif isinstance(value, ndarray) and value.size == 1:\n if value.shape != self.shape:\n value = value.reshape(self.shape)\n value.copyto(self)\n elif isinstance(value, (_np.ndarray, _np.generic)) and value.size == 1:\n if isinstance(value, _np.generic) or value.shape != self.shape:\n value = value.reshape(self.shape)\n self._sync_copyfrom(value)\n else:\n raise ValueError('setting an array element with a sequence.')\n else:\n # For 0-d boolean indices: A new axis is added,\n # but at the same time no axis is \"used\". So if we have True,\n # we add a new axis (a bit like with np.newaxis). If it is\n # False, we add a new axis, but this axis has 0 entries.\n # prepend is defined to handle this case.\n # prepend == _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar\n # prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded\n # prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be expanded\n # prepend actually has no influence on __setitem__\n key, prepend = indexing_key_expand_implicit_axes(key, self.shape)\n if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:\n return # no action is needed\n slc_key = tuple(idx for idx in key if idx is not None)\n if len(slc_key) < self.ndim:\n raise RuntimeError(\n 'too few indices after normalization: expected `ndim` ({}) '\n 'but got {}. This is a bug, please report it!'\n ''.format(self.ndim, len(slc_key))\n )\n if len(slc_key) > self.ndim and self.ndim != 0:\n raise IndexError(\n 'too many indices ({}) for array with {} dimensions'\n ''.format(len(slc_key), self.ndim)\n )\n indexing_dispatch_code = get_indexing_dispatch_code(slc_key)\n if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:\n self._set_nd_basic_indexing(key, value) # function is inheritated from NDArray class\n elif indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:\n pass # no action needed\n elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:\n self._set_np_advanced_indexing(key, value)\n else:\n raise ValueError(\n 'Indexing NDArray with index {} of type {} is not supported'\n ''.format(key, type(key))\n )\n\n def _prepare_value_nd(self, value, bcast_shape, squeeze_axes=None):\n \"\"\"Return a broadcast `ndarray` with same device and dtype as ``self``.\n For setting item, The returned `ndarray` is squeezed according to squeeze_axes since the\n value_nd is assigned to not yet expanded space in original array.\n `value`: numeric types or array like.\n `bcast_shape`: a shape tuple.\n `squeeze_axes`: a sequence of axes to squeeze in the value array.\n Note: mxnet.numpy.ndarray not support NDArray as assigned value.\n \"\"\"\n if isinstance(value, numeric_types):\n value_nd = full(bcast_shape, value, device=self.device, dtype=self.dtype)\n elif isinstance(value, self.__class__):\n value_nd = value.to_device(self.device)\n if value_nd.dtype != self.dtype:\n value_nd = value_nd.astype(self.dtype)\n else:\n try:\n value_nd = array(value, device=self.device, dtype=self.dtype)\n except:\n raise TypeError('mxnet.np.ndarray does not support assignment with non-array-like '\n 'object {} of type {}'.format(value, type(value)))\n\n # For advanced indexing setitem, if there is None in indices, we need to squeeze the\n # assigned value_nd since None is also ignored in slicing the original array.\n if squeeze_axes and value_nd.ndim > len(bcast_shape):\n squeeze_axes = tuple([ax for ax in squeeze_axes if ax < len(value_nd.shape)])\n value_nd = value_nd.squeeze(axis=tuple(squeeze_axes))\n\n # handle the cases like the following\n # a = np.zeros((3, 3)), b = np.ones((1, 1, 1, 1, 3)), a[0] = b\n # b cannot broadcast directly to a[0].shape unless its leading 1-size axes are trimmed\n if value_nd.ndim > len(bcast_shape):\n squeeze_axes = []\n for i in range(value_nd.ndim - len(bcast_shape)):\n if value_nd.shape[i] == 1:\n squeeze_axes.append(i)\n else:\n break\n if squeeze_axes:\n value_nd = value_nd.squeeze(squeeze_axes)\n\n if value_nd.shape != bcast_shape:\n if value_nd.size == 0:\n value_nd = value_nd.reshape(bcast_shape)\n else:\n value_nd = value_nd.broadcast_to(bcast_shape)\n return value_nd\n\n @wrap_mxnp_np_ufunc\n def __add__(self, other):\n \"\"\"x.__add__(y) <=> x + y\"\"\"\n return add(self, other)\n\n @wrap_mxnp_np_ufunc\n def __iadd__(self, other):\n \"\"\"x.__iadd__(y) <=> x += y\"\"\"\n if not self.writable:\n raise ValueError('trying to add to a readonly ndarray')\n return add(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __radd__(self, other):\n \"\"\"x.__radd__(y) <=> y + x\"\"\"\n return add(other, self)\n\n def __invert__(self):\n \"\"\"x.__invert__() <=> ~x\"\"\"\n return invert(self)\n\n @wrap_mxnp_np_ufunc\n def __and__(self, other):\n \"\"\"x.__and__(y) <=> x & y\"\"\"\n return bitwise_and(self, other)\n\n @wrap_mxnp_np_ufunc\n def __rand__(self, other):\n \"\"\"x.__rand__(y) <=> y & x\"\"\"\n return bitwise_and(other, self)\n\n @wrap_mxnp_np_ufunc\n def __or__(self, other):\n \"\"\"x.__or__(y) <=> x | y\"\"\"\n return bitwise_or(self, other)\n\n @wrap_mxnp_np_ufunc\n def __ror__(self, other):\n \"\"\"x.__ror__(y) <=> y | x\"\"\"\n return bitwise_or(other, self)\n\n @wrap_mxnp_np_ufunc\n def __xor__(self, other):\n \"\"\"x.__xor__(y) <=> x ^ y\"\"\"\n return bitwise_xor(self, other)\n\n @wrap_mxnp_np_ufunc\n def __rxor__(self, other):\n \"\"\"x.__rxor__(y) <=> y ^ x\"\"\"\n return bitwise_xor(other, self)\n\n @wrap_mxnp_np_ufunc\n def __lshift__(self, other):\n \"\"\"x.__lshift__(y) <=> x << y\"\"\"\n return bitwise_left_shift(self, other)\n\n @wrap_mxnp_np_ufunc\n def __rshift__(self, other):\n \"\"\"x.__rshift__(y) <=> x >> y\"\"\"\n return bitwise_right_shift(self, other)\n\n @wrap_mxnp_np_ufunc\n def __iand__(self, other):\n \"\"\"x.__iand__(y) <=> x &= y\"\"\"\n return bitwise_and(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __ior__(self, other):\n r\"\"\"x.__ior__(y) <=> x \\|= y\"\"\"\n return bitwise_or(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __ixor__(self, other):\n \"\"\"x.__ixor__(y) <=> x ^= y\"\"\"\n return bitwise_xor(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __ilshift__(self, other):\n \"\"\"x.__ilshift__(y) <=> x <<= y\"\"\"\n return bitwise_left_shift(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __irshift__(self, other):\n \"\"\"x.__irshift__(y) <=> x >>= y\"\"\"\n return bitwise_right_shift(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __rlshift__(self, other):\n \"\"\"x.__rlshift__(y) <=> y << x\"\"\"\n return bitwise_left_shift(other, self)\n\n @wrap_mxnp_np_ufunc\n def __rrshift__(self, other):\n \"\"\"x.__rrshift__(y) <=> y >> x\"\"\"\n return bitwise_right_shift(other, self)\n\n def __round__(self, n=0):\n \"\"\"x.__round__(n)\"\"\"\n return round(self, decimals=n)\n\n def __abs__(self):\n \"\"\"x.__abs__()\"\"\"\n return absolute(self)\n\n def __ceil__(self):\n \"\"\"x.__ceil__()\"\"\"\n return ceil(self)\n\n def __floor__(self):\n \"\"\"x.__floor__()\"\"\"\n return floor(self)\n\n def __trunc__(self):\n \"\"\"x.__trunc__()\"\"\"\n return trunc(self)\n\n @wrap_mxnp_np_ufunc\n def __sub__(self, other):\n \"\"\"x.__sub__(y) <=> x - y\"\"\"\n return subtract(self, other)\n\n @wrap_mxnp_np_ufunc\n def __isub__(self, other):\n \"\"\"x.__isub__(y) <=> x -= y\"\"\"\n if not self.writable:\n raise ValueError('trying to subtract from a readonly ndarray')\n return subtract(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __rsub__(self, other):\n \"\"\"x.__rsub__(y) <=> y - x\"\"\"\n return subtract(other, self)\n\n @wrap_mxnp_np_ufunc\n def __mul__(self, other):\n \"\"\"x.__mul__(y) <=> x * y\"\"\"\n return multiply(self, other)\n\n @wrap_mxnp_np_ufunc\n def __floordiv__(self, other):\n \"\"\"x.__floordiv__(y) <=> x // y\"\"\"\n return floor_divide(self, other)\n\n @wrap_mxnp_np_ufunc\n def __ifloordiv__(self, other):\n \"\"\"x.__ifloordiv__(y) <=> x //= y\"\"\"\n if not self.writable:\n raise ValueError('trying to divide from a readonly ndarray')\n return floor_divide(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __rfloordiv__(self, other):\n \"\"\"x.__rfloordiv__(y) <=> y // x\"\"\"\n return floor_divide(other, self)\n\n def __neg__(self):\n \"\"\"x.__neg__() <=> -x\"\"\"\n return negative(self)\n\n def __pos__(self):\n \"\"\"x.__pos__() <=> +x\"\"\"\n return positive(self)\n\n @wrap_mxnp_np_ufunc\n def __imul__(self, other):\n r\"\"\"x.__imul__(y) <=> x \\*= y\"\"\"\n if not self.writable:\n raise ValueError('trying to add to a readonly ndarray')\n return multiply(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __rmul__(self, other):\n \"\"\"x.__rmul__(y) <=> y * x\"\"\"\n return self.__mul__(other)\n\n @wrap_mxnp_np_ufunc\n def __div__(self, other):\n \"\"\"x.__div__(y) <=> x / y\"\"\"\n return divide(self, other)\n\n @wrap_mxnp_np_ufunc\n def __rdiv__(self, other):\n \"\"\"x.__rdiv__(y) <=> y / x\"\"\"\n return divide(other, self)\n\n @wrap_mxnp_np_ufunc\n def __idiv__(self, other):\n \"\"\"x.__idiv__(y) <=> x /= y\"\"\"\n return divide(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __truediv__(self, other):\n \"\"\"x.__truediv__(y) <=> x / y\"\"\"\n return divide(self, other)\n\n @wrap_mxnp_np_ufunc\n def __rtruediv__(self, other):\n \"\"\"x.__rtruediv__(y) <=> y / x\"\"\"\n return divide(other, self)\n\n @wrap_mxnp_np_ufunc\n def __itruediv__(self, other):\n \"\"\"x.__itruediv__(y) <=> x /= y\"\"\"\n return divide(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __mod__(self, other):\n \"\"\"x.__mod__(y) <=> x % y\"\"\"\n return mod(self, other)\n\n @wrap_mxnp_np_ufunc\n def __rmod__(self, other):\n \"\"\"x.__rmod__(y) <=> y % x\"\"\"\n return mod(other, self)\n\n @wrap_mxnp_np_ufunc\n def __imod__(self, other):\n \"\"\"x.__imod__(y) <=> x %= y\"\"\"\n return mod(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __pow__(self, other):\n \"\"\"x.__pow__(y) <=> x ** y\"\"\"\n return power(self, other)\n\n @wrap_mxnp_np_ufunc\n def __rpow__(self, other):\n \"\"\"x.__rpow__(y) <=> y ** x\"\"\"\n return power(other, self)\n\n @wrap_mxnp_np_ufunc\n def __ipow__(self, other):\n \"\"\"x.__ipow__(y) <=> x **= y\"\"\"\n return power(self, other, out=self)\n\n @wrap_mxnp_np_ufunc\n def __eq__(self, other):\n \"\"\"x.__eq__(y) <=> x == y\"\"\"\n return equal(self, other)\n\n def __hash__(self):\n raise NotImplementedError\n\n @wrap_mxnp_np_ufunc\n def __ne__(self, other):\n \"\"\"x.__ne__(y) <=> x != y\"\"\"\n return not_equal(self, other)\n\n @wrap_mxnp_np_ufunc\n def __gt__(self, other):\n \"\"\"x.__gt__(y) <=> x > y\"\"\"\n return greater(self, other)\n\n @wrap_mxnp_np_ufunc\n def __ge__(self, other):\n \"\"\"x.__ge__(y) <=> x >= y\"\"\"\n return greater_equal(self, other)\n\n @wrap_mxnp_np_ufunc\n def __lt__(self, other):\n \"\"\"x.__lt__(y) <=> x < y\"\"\"\n return less(self, other)\n\n @wrap_mxnp_np_ufunc\n def __le__(self, other):\n \"\"\"x.__le__(y) <=> x <= y\"\"\"\n return less_equal(self, other)\n\n @wrap_mxnp_np_ufunc\n def __matmul__(self, other):\n \"\"\"x.__matmul__(y) <=> x @ y\"\"\"\n return matmul(self, other)\n\n @wrap_mxnp_np_ufunc\n def __rmatmul__(self, other):\n \"\"\"x.__rmatmul__(y) <=> y @ x\"\"\"\n return matmul(other, self)\n\n @wrap_mxnp_np_ufunc\n def __imatmul__(self, other):\n \"\"\"x.__imatmul__(y) <=> x @= y\"\"\"\n return matmul(self, other, out=self)\n\n def __bool__(self):\n num_elements = self.size\n if num_elements == 0:\n warnings.simplefilter('default')\n warnings.warn('The truth value of an empty array is ambiguous. Returning False, but in'\n ' future this will result in an error.', DeprecationWarning)\n return False\n elif num_elements == 1:\n return bool(self.item())\n else:\n raise ValueError(\"The truth value of an ndarray with multiple elements is ambiguous.\")\n\n __nonzero__ = __bool__\n\n def __index__(self):\n if self.ndim == 0 and _np.issubdtype(self.dtype, _np.integer):\n return self.item()\n raise TypeError('only integer scalar arrays can be converted to a scalar index')\n\n def __float__(self):\n num_elements = self.size\n if num_elements != 1:\n raise TypeError('only size-1 arrays can be converted to Python scalars')\n return float(self.item())\n\n def __int__(self):\n num_elements = self.size\n if num_elements != 1:\n raise TypeError('only size-1 arrays can be converted to Python scalars')\n return int(self.item())\n\n def __len__(self):\n \"\"\"Number of elements along the first axis.\"\"\"\n shape = self.shape # pylint: disable=redefined-outer-name\n if len(shape) == 0:\n raise TypeError('len() of unsized object')\n return self.shape[0]\n\n def __reduce__(self):\n return ndarray, (None,), self.__getstate__()\n\n def item(self, *args):\n \"\"\"Copy an element of an array to a standard Python scalar and return it.\n\n Parameters\n ----------\n *args : Arguments (variable number and type)\n none: in this case, the method only works for arrays with one element (a.size == 1),\n which element is copied into a standard Python scalar object and returned.\n\n int_type: this argument is interpreted as a flat index into the array, specifying which\n element to copy and return.\n\n tuple of int_types: functions as does a single int_type argument, except that the\n argument is interpreted as an nd-index into the array.\n\n Returns\n -------\n z : Standard Python scalar object\n A copy of the specified element of the array as a suitable Python scalar.\n \"\"\"\n # TODO(junwu): no need to call asnumpy() on the whole array.\n return self.asnumpy().item(*args)\n\n def nonzero(self):\n \"\"\"Return the indices of the elements that are non-zero.\n\n Refer to `numpy.nonzero` for full documentation.\n\n See Also\n --------\n numpy.nonzero : equivalent function\n \"\"\"\n return nonzero(self)\n\n @property\n # pylint: disable= invalid-name, undefined-variable\n def T(self):\n \"\"\"Same as self.transpose(). This always returns a copy of self.\"\"\"\n if self.ndim != 2:\n warnings.warn('x.T requires x to have 2 dimensions. '\n 'Use x.mT to transpose stacks of matrices and '\n 'permute_dims() to permute dimensions.')\n return self.transpose()\n # pylint: enable= invalid-name, undefined-variable\n\n @property\n # pylint: disable= invalid-name, undefined-variable\n def mT(self):\n \"\"\"Same as self.transpose(). This always returns a copy of self.\"\"\"\n if self.ndim < 2:\n raise ValueError(\"x must be at least 2-dimensional for matrix_transpose\")\n return _mx_nd_np.swapaxes(self, -1, -2)\n # pylint: enable= invalid-name, undefined-variable\n\n def all(self, axis=None, out=None, keepdims=False):\n return _mx_nd_np.all(self, axis=axis, out=out, keepdims=keepdims)\n\n def any(self, axis=None, out=None, keepdims=False):\n return _mx_nd_np.any(self, axis=axis, out=out, keepdims=keepdims)\n\n def as_nd_ndarray(self):\n \"\"\"Convert mxnet.numpy.ndarray to mxnet.ndarray.NDArray to use its fluent methods.\"\"\"\n hdl = NDArrayHandle()\n check_call(_LIB.MXShallowCopyNDArray(self.handle, ctypes.byref(hdl)))\n return NDArray(handle=hdl, writable=self.writable)\n\n def as_np_ndarray(self):\n \"\"\"A convenience function for creating a numpy ndarray from the current ndarray\n with zero copy. For this class, it just returns itself since it's already a\n numpy ndarray.\"\"\"\n return self\n\n def __repr__(self):\n \"\"\"\n Returns a string representation of the array.\n The dtype of the ndarray will be appended if it's inconsistent with current dtype.\n The device of the ndarray will be appended for devices other than CPU.\n\n Examples\n --------\n >>> from mxnet import np, npx\n >>> a = np.random.uniform(size=(2, 3))\n >>> a\n array([[0.5488135 , 0.5928446 , 0.71518934],\n [0.84426576, 0.60276335, 0.8579456 ]])\n >>> print(a)\n [[0.5488135 0.5928446 0.71518934]\n [0.84426576 0.60276335 0.8579456 ]]\n >>> a.dtype\n dtype('float32')\n >>> npx.set_np_float64()\n >>> a\n array([[0.5488135 , 0.5928446 , 0.71518934],\n [0.84426576, 0.60276335, 0.8579456 ]], dtype=float32)\n >>> npx.set_np_float64(default_float64=False)\n >>> a\n array([[0.5488135 , 0.5928446 , 0.71518934],\n [0.84426576, 0.60276335, 0.8579456 ]])\n >>> b = a.astype(np.float64)\n >>> b\n array([[0.54881352, 0.59284461, 0.71518934],\n [0.84426576, 0.60276335, 0.85794562]], dtype=float64)\n >>> print(b)\n [[0.54881352 0.59284461 0.71518934]\n [0.84426576 0.60276335 0.85794562]]\n >>> b.dtype\n dtype('float64')\n >>> c = a.copyto(npx.gpu(0))\n >>> c\n array([[0.5488135 , 0.5928446 , 0.71518934],\n [0.84426576, 0.60276335, 0.8579456 ]], device=gpu(0))\n >>> print(c)\n [[0.5488135 0.5928446 0.71518934]\n [0.84426576 0.60276335 0.8579456 ]] @gpu(0)\n >>> d = b.copyto(npx.gpu(0))\n >>> d\n array([[0.54881352, 0.59284461, 0.71518934],\n [0.84426576, 0.60276335, 0.85794562]], dtype=float64, device=gpu(0))\n >>> print(d)\n [[0.54881352 0.59284461 0.71518934]\n [0.84426576 0.60276335 0.85794562]] @gpu(0)\n\n \"\"\"\n if self._alive:\n array_str = self.asnumpy().__repr__()\n dtype = self.dtype\n default_dtype = _np.float64 if is_np_default_dtype() else _np.float32\n if 'dtype=' in array_str:\n if dtype == default_dtype:\n array_str = array_str[:array_str.rindex(',')] + ')'\n elif dtype not in (default_dtype, _np.bool_):\n array_str = array_str[:-1] + ', dtype={})'.format(dtype)\n\n device = self.device\n if device.device_type == 'cpu':\n return array_str\n return array_str[:-1] + ', device={})'.format(str(device))\n else:\n return '<FREED {}>'.format(self.__class__.__name__)\n\n def __str__(self):\n \"\"\"Returns a string representation of the array.\"\"\"\n array_str = self.asnumpy().__str__()\n device = self.device\n if device.device_type == 'cpu' or self.ndim == 0:\n return array_str\n return '{array} @{device}'.format(array=array_str, device=device)\n\n def __format__(self, fmt):\n \"\"\"Return value.__format__(format_spec). Overwrite to include 0-d array\"\"\"\n if self.ndim == 0:\n return self.item().__format__(fmt)\n elif len(fmt) == 0:\n return self.__str__().__format__(fmt)\n else:\n raise TypeError(\"Cannot format mxnet.numpy.ndarray with format_spec\")\n\n def attach_grad(self, grad_req='write'): # pylint: disable=arguments-differ\n \"\"\"Attach a gradient buffer to this ndarray, so that `backward`\n can compute gradient with respect to it.\n\n Parameters\n ----------\n grad_req : {'write', 'add', 'null'}\n How gradient will be accumulated.\n * 'write': gradient will be overwritten on every backward.\n * 'add': gradient will be added to existing value on every backward.\n * 'null': do not compute gradient for this NDArray.\n \"\"\"\n grad = _mx_nd_np.zeros_like(self) # pylint: disable=undefined-variable\n grad_req = _GRAD_REQ_MAP[grad_req]\n check_call(_LIB.MXAutogradMarkVariables(\n 1, ctypes.pointer(self.handle),\n ctypes.pointer(mx_uint(grad_req)),\n ctypes.pointer(grad.handle)))\n\n def drop_grad(self):\n \"\"\"Free the memory of the marked ndarray.\"\"\"\n check_call(_LIB.MXAutogradDropGrads(\n 1, ctypes.pointer(self.handle)))\n\n @property\n def grad(self):\n \"\"\"Returns gradient buffer attached to this ndarray.\"\"\"\n hdl = NDArrayHandle()\n check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))\n if hdl.value is None:\n return None\n return _np_ndarray_cls(hdl)\n\n def detach(self):\n \"\"\"Returns a new ndarray, detached from the current graph.\"\"\"\n hdl = NDArrayHandle()\n check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))\n return _np_ndarray_cls(hdl)\n\n def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): # pylint: disable=arguments-differ,unused-argument, too-many-arguments\n \"\"\"\n Copy of the array, cast to a specified type.\n\n Parameters\n ----------\n dtype : str or dtype\n Typecode or data-type to which the array is cast.\n order : {'C', 'F', 'A', 'K'}, optional\n Controls the memory layout order of the result.\n 'C' means C order, 'F' means Fortran order, 'A'\n means 'F' order if all the arrays are Fortran contiguous,\n 'C' order otherwise, and 'K' means as close to the\n order the array elements appear in memory as possible.\n Default is 'K'.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'unsafe'\n for backwards compatibility.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n subok : bool, optional\n If True, then sub-classes will be passed-through (default), otherwise\n the returned array will be forced to be a base-class array.\n copy : bool, optional\n Default `True`. By default, astype always returns a newly\n allocated ndarray on the same device. If this is set to\n `False`, and the dtype requested is the same as the ndarray's\n dtype, the ndarray is returned instead of a copy.\n\n Returns\n -------\n arr_t : ndarray\n Unless `copy` is False and the other conditions for returning the input\n array are satisfied (see description for `copy` input parameter), `arr_t`\n is a new array of the same shape as the input array with `dtype`.\n\n Notes\n -----\n This function differs from the official `ndarray`'s ``astype`` function in the following\n aspects:\n * `order` only supports 'C' and 'K'.\n * `casting` only supports 'unsafe'.\n * `subok` only supports ``True``.\n \"\"\"\n if order is not None and order != 'K' and order != 'C':\n raise ValueError('order must be either \\'K\\' or \\'C\\'')\n if casting != 'unsafe':\n raise ValueError('casting must be equal to \\'unsafe\\'')\n if not subok:\n raise ValueError('subok must be equal to True')\n if dtype is None:\n dtype = _np.float32\n if not copy and _np.dtype(dtype) == self.dtype:\n return self\n\n return _npi.cast(self, dtype=dtype)\n\n def copyto(self, other):\n \"\"\"Copies the value of this array to another array.\n\n If ``other`` is a ``ndarray`` object, then ``other.shape`` and\n ``self.shape`` should be the same. This function copies the value from\n ``self`` to ``other``.\n\n If ``other`` is a device, a new ``np.ndarray`` will be first created on\n the target device, and the value of ``self`` is copied.\n\n Parameters\n ----------\n other : ndarray or Device\n The destination array or device.\n\n Returns\n -------\n out: ndarray\n The copied array. If ``other`` is an ``ndarray``, then the return value\n and ``other`` will point to the same ``ndarray``.\n\n Examples\n --------\n >>> x = np.ones((2, 3))\n >>> y = np.zeros((2, 3), device=npx.gpu(0))\n >>> z = x.copyto(y)\n >>> z is y\n True\n >>> y\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]])\n \"\"\"\n if isinstance(other, ndarray):\n if other.handle is self.handle:\n warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)\n return False\n return _npi.copyto(self, out=other)\n elif isinstance(other, Device):\n hret = ndarray(_new_alloc_handle(self.shape, other, True, self.dtype))\n return _npi.copyto(self, out=hret)\n else:\n raise TypeError('copyto does not support type ' + str(type(other)))\n\n def asscalar(self):\n raise AttributeError('mxnet.numpy.ndarray object has no attribute asscalar')\n\n def argmax(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Return indices of the maximum values along the given axis.\n Refer to `mxnet.numpy.argmax` for full documentation.\"\"\"\n return argmax(self, axis, out, keepdims)\n\n def as_in_context(self, context):\n \"\"\"This function has been deprecated. Please refer to ``ndarray.to_device``.\"\"\"\n warnings.warn('ndarray.as_in_context has been renamed to'\n ' ndarray.to_device', DeprecationWarning)\n return self.as_nd_ndarray().as_in_context(context).as_np_ndarray()\n\n def as_in_ctx(self, ctx):\n \"\"\"This function has been deprecated. Please refer to ``ndarray.to_device``.\"\"\"\n warnings.warn('ndarray.to_device has been renamed to'\n ' ndarray.to_device', DeprecationWarning)\n return self.to_device(ctx)\n\n @property\n def ctx(self):\n \"\"\"This property has been deprecated. Please refer to ``ndarray.device``.\"\"\"\n warnings.warn('ndarray.ctx has been renamed to ndarray.device', DeprecationWarning)\n return self.device\n\n\n def to_device(self, device):\n \"\"\"Returns an array on the target device with the same value as this array.\n\n If the target device is the same as ``self.device``, then ``self`` is\n returned. Otherwise, a copy is made.\n\n Parameters\n ----------\n device : Device\n The target device.\n\n Returns\n -------\n ndarray\n The target array.\n \"\"\"\n if self.device == device:\n return self\n return self.copyto(device)\n\n @property\n def device(self):\n \"\"\"Hardware device the array data resides on.\n\n Examples\n --------\n >>> x = np.array([1, 2, 3, 4])\n >>> x.device\n cpu(0)\n >>> type(x.device)\n <class 'mxnet.device.Device'>\n >>> y = np.zeros((2, 3), npx.gpu(0))\n >>> y.device\n gpu(0)\n \"\"\"\n dev_typeid = ctypes.c_int()\n dev_id = ctypes.c_int()\n check_call(_LIB.MXNDArrayGetContext(\n self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))\n return Device(Device.devtype2str[dev_typeid.value], dev_id.value)\n\n\n @property\n def context(self):\n \"\"\"This function has been deprecated. Please refer to ``ndarray.ctx``.\"\"\"\n warnings.warn('ndarray.context has been renamed to ndarray.ctx', DeprecationWarning)\n return self.as_nd_ndarray().context\n\n def copy(self, order='C'): # pylint: disable=arguments-differ\n \"\"\"Return a coyp of the array, keeping the same device.\n\n Parameters\n ----------\n order : str\n The memory layout of the copy. Currently, only c-contiguous memory\n layout is supported.\n\n Examples\n --------\n >>> x = np.ones((2, 3))\n >>> y = x.copy()\n >>> y\n array([[ 1., 1., 1.],\n [ 1., 1., 1.]])\n \"\"\"\n if order != 'C':\n raise NotImplementedError('ndarray.copy only supports order=\\'C\\', while '\n 'received {}'.format(str(order)))\n return self.copyto(self.device)\n\n def dot(self, b, out=None):\n \"\"\"Dot product of two arrays.\n Refer to ``numpy.dot`` for full documentation.\"\"\"\n return dot(self, b, out=out)\n\n def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ\n \"\"\"Returns a copy of the array with a new shape.\n\n Notes\n -----\n Unlike the free function `numpy.reshape`, this method on `ndarray` allows\n the elements of the shape parameter to be passed in as separate arguments.\n For example, ``a.reshape(10, 11)`` is equivalent to\n ``a.reshape((10, 11))``.\n \"\"\"\n order = 'C'\n if len(kwargs) > 1:\n raise TypeError('function takes at most 1 keyword argument')\n if len(kwargs) == 1:\n if 'order' not in kwargs:\n raise TypeError(\"'{}' is an invalid keyword argument for this function\"\n .format(list(kwargs.keys())[0]))\n order = kwargs.pop('order', 'C')\n if order != 'C':\n raise NotImplementedError('only supports C-order,'\n ' while received {}'.format(order))\n if len(args) == 0:\n raise TypeError('reshape() takes exactly 1 argument (0 given)')\n if len(args) == 1 and isinstance(args[0], tuple):\n return _mx_nd_np.reshape(self, newshape=args[0], order=order)\n else:\n return _mx_nd_np.reshape(self, newshape=args, order=order)\n\n def reshape_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`reshape_like`.\n\n The arguments are the same as for :py:func:`reshape_like`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute reshape_like')\n\n def reshape_view(self, *shape, **kwargs): # pylint: disable=redefined-outer-name\n \"\"\"Returns a **view** of this array with a new shape without altering any data.\n Inheritated from NDArray.reshape.\n \"\"\"\n return super(ndarray, self).reshape(*shape, **kwargs)\n\n def zeros_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`zeros_like`.\n\n The arguments are the same as for :py:func:`zeros_like`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute zeros_like')\n\n def ones_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`ones_like`.\n\n The arguments are the same as for :py:func:`ones_like`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute ones_like')\n\n def broadcast_axes(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`broadcast_axes`.\n\n The arguments are the same as for :py:func:`broadcast_axes`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')\n\n def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ\n \"\"\"Repeat elements of an array.\"\"\"\n return repeat(self, repeats=repeats, axis=axis)\n\n def pad(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`pad`.\n\n The arguments are the same as for :py:func:`pad`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute pad')\n\n def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ\n \"\"\"Return a copy of the array with axis1 and axis2 interchanged.\n Refer to `mxnet.numpy.swapaxes` for full documentation.\n \"\"\"\n return swapaxes(self, axis1, axis2)\n\n def split(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`split`.\n\n The arguments are the same as for :py:func:`split`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute split')\n\n def split_v2(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`split_v2`.\n\n The arguments are the same as for :py:func:`split_v2`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute split_v2')\n\n def slice(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice`.\n\n The arguments are the same as for :py:func:`slice`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute slice')\n\n def slice_axis(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice_axis`.\n\n The arguments are the same as for :py:func:`slice_axis`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_axis')\n\n def slice_like(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`slice_like`.\n\n The arguments are the same as for :py:func:`slice_like`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_like')\n\n def slice_assign_scalar(self, value, begin, end, step):\n \"\"\"\n Assign the scalar to a cropped subset of this ndarray. Value will broadcast to the shape of the cropped shape\n and will be cast to the same dtype of the ndarray.\n\n Parameters\n ----------\n value: numeric value\n Value and this ndarray should be of the same data type.\n The shape of rhs should be the same as the cropped shape of this ndarray.\n begin: tuple of begin indices\n end: tuple of end indices\n step: tuple of step lenghths\n\n Returns\n -------\n This ndarray.\n\n Examples\n --------\n >>> x = np.ones((2, 2, 2))\n >>> y = x.slice_assign_scalar(0, (0, 0, None), (1, 1, None), (None, None, None))\n >>> y\n array([[[0., 0.],\n [1., 1.]],\n\n [[1., 1.],\n [1., 1.]]])\n >>> x\n array([[[0., 0.],\n [1., 1.]],\n\n [[1., 1.],\n [1., 1.]]])\n \"\"\"\n return _npi.slice_assign_scalar(self, value, begin=begin, end=end, step=step, out=self)\n\n def slice_assign(self, rhs, begin, end, step):\n \"\"\"\n Assign the rhs to a cropped subset of this ndarray in place.\n Returns the view of this ndarray.\n\n Parameters\n ----------\n rhs: ndarray.\n rhs and this NDArray should be of the same data type, and on the same device.\n The shape of rhs should be the same as the cropped shape of this ndarray.\n begin: tuple of begin indices\n end: tuple of end indices\n step: tuple of step lenghths\n\n Returns\n -------\n out : ndarray\n This ndarray.\n\n Examples\n --------\n >>> x = np.ones((2, 2, 2))\n >>> assigned = np.zeros((1, 1, 2))\n >>> y = x.slice_assign(assigned, (0, 0, None), (1, 1, None), (None, None, None))\n >>> y\n array([[[0., 0.],\n [1., 1.]],\n\n [[1., 1.],\n [1., 1.]]])\n >>> x\n array([[[0., 0.],\n [1., 1.]],\n\n [[1., 1.],\n [1., 1.]]])\n \"\"\"\n return _npi.slice_assign(self, rhs, begin=begin, end=end, step=step, out=self)\n\n def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-differ, redefined-outer-name\n \"\"\"Convenience fluent method for :py:func:`take`.\n\n The arguments are the same as for :py:func:`take`, with\n this array as data.\n \"\"\"\n return take(self, indices, axis, mode=mode)\n\n def one_hot(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`one_hot`.\n\n The arguments are the same as for :py:func:`one_hot`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute one_hot')\n\n def pick(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`pick`.\n\n The arguments are the same as for :py:func:`pick`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute pick')\n\n def sort(self, axis=-1, descending=False, stable=True): # pylint: disable=arguments-differ\n \"\"\"Convenience fluent method for :py:func:`sort`.\n\n The arguments are the same as for :py:func:`sort`, with\n this array as data.\n \"\"\"\n return sort(self, axis=axis, descending=descending, stable=stable)\n\n def topk(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`topk`.\n\n The arguments are the same as for :py:func:`topk`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute topk')\n\n def argsort(self, axis=-1, descending=False, stable=True): # pylint: disable=arguments-differ\n \"\"\"Convenience fluent method for :py:func:`argsort`.\n\n The arguments are the same as for :py:func:`argsort`, with\n this array as data.\n \"\"\"\n return argsort(self, axis=axis, descending=descending, stable=stable)\n\n def argmax_channel(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`argmax_channel`.\n\n The arguments are the same as for :py:func:`argmax_channel`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute argmax_channel')\n\n def argmin(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Return indices of the minium values along the given axis.\n Refer to `mxnet.numpy.argmin` for full documentation.\"\"\"\n return argmin(self, axis, out, keepdims)\n\n def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ\n \"\"\"Return an array whose values are limited to [min, max].\n One of max or min must be given.\n \"\"\"\n return clip(self, min, max, out=out)\n\n def abs(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`abs`.\n\n The arguments are the same as for :py:func:`abs`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute abs')\n\n def sign(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sign`.\n\n The arguments are the same as for :py:func:`sign`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sign')\n\n def flatten(self, order='C'): # pylint: disable=arguments-differ\n \"\"\"Return a copy of the array collapsed into one dimension.\"\"\"\n return self.reshape(-1, order=order)\n\n def shape_array(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`shape_array`.\n\n The arguments are the same as for :py:func:`shape_array`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute shape_array')\n\n def size_array(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`size_array`.\n\n The arguments are the same as for :py:func:`size_array`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute size_array')\n\n def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument\n \"\"\"Convenience fluent method for :py:func:`expand_dims`.\n\n The arguments are the same as for :py:func:`expand_dims`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute expand_dims')\n\n def tile(self, reps): # pylint: disable=arguments-differ\n \"\"\"Construct an array by repeating A the number of times given by reps.\n Refer to `mxnet.numpy.tile` for full documentation.\"\"\"\n return tile(self, reps=reps)\n\n def transpose(self, *axes): # pylint: disable=arguments-differ\n \"\"\"Permute the dimensions of an array.\"\"\"\n if len(axes) == 0:\n axes = None\n elif len(axes) == 1:\n if isinstance(axes[0], (tuple, list)):\n axes = axes[0]\n elif axes[0] is None:\n axes = None\n return transpose(self, axes=axes)\n\n def flip(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`flip`.\n\n The arguments are the same as for :py:func:`flip`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute flip')\n\n def depth_to_space(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`depth_to_space`.\n\n The arguments are the same as for :py:func:`depth_to_space`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute depth_to_space')\n\n def space_to_depth(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`space_to_depth`.\n\n The arguments are the same as for :py:func:`space_to_depth`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute space_to_depth')\n\n def diag(self, k=0, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`diag`.\n\n The arguments are the same as for :py:func:`diag`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute diag')\n\n def diagonal(self, offset=0, axis1=0, axis2=1): # pylint: disable=arguments-differ\n \"\"\"Return the diagonal with the given offset.\n\n If array has more than two dimensions, then the axes specified by axis1 and\n axis2 are used to determine the 2-D sub-array whose diagonal is returned.\n\n Refer to `mxnet.numpy.diagonal` for full documents.\n \"\"\"\n return diagonal(self, offset=offset, axis1=axis1, axis2=axis2)\n\n def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Return the sum of the array elements over the given axis.\"\"\"\n return sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n def nansum(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`nansum`.\n\n The arguments are the same as for :py:func:`nansum`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute nansum')\n\n def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Return the product of the array elements over the given axis.\"\"\"\n return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out)\n\n def nanprod(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`nanprod`.\n\n The arguments are the same as for :py:func:`nanprod`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute nanprod')\n\n def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Returns the average of the array elements along given axis.\"\"\"\n return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n # pylint: disable=too-many-arguments, arguments-differ\n\n @wrap_data_api_statical_func\n def std(self, axis=None, dtype=None, out=None, correction=0, keepdims=False):\n \"\"\"Returns the standard deviation of the array elements along given axis.\"\"\"\n return std(self, axis=axis, dtype=dtype, correction=correction, keepdims=keepdims, out=out)\n\n @wrap_data_api_statical_func\n def var(self, axis=None, dtype=None, out=None, correction=0, keepdims=False):\n \"\"\"Returns the variance of the array elements, along given axis.\"\"\"\n return var(self, axis=axis, dtype=dtype, out=out, correction=correction, keepdims=keepdims)\n # pylint: enable=too-many-arguments, arguments-differ\n\n def cumsum(self, axis=None, dtype=None, out=None):\n \"\"\"Return the cumulative sum of the elements along the given axis.\"\"\"\n return _mx_nd_np.cumsum(self, axis=axis, dtype=dtype, out=out)\n\n def tolist(self):\n return self.asnumpy().tolist()\n\n def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Return the maximum along a given axis.\"\"\"\n return _mx_nd_np.max(self, axis=axis, out=out, keepdims=keepdims)\n\n def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"Convenience fluent method for :py:func:`min`.\n\n The arguments are the same as for :py:func:`min`, with\n this array as data.\n \"\"\"\n return _mx_nd_np.min(self, axis=axis, out=out, keepdims=keepdims)\n\n def norm(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`norm`.\n\n The arguments are the same as for :py:func:`norm`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute norm')\n\n def round(self, decimals=0, out=None, **kwargs): # pylint: disable=arguments-differ\n \"\"\"Convenience fluent method for :py:func:`round`.\n\n The arguments are the same as for :py:func:`round`, with\n this array as data.\n \"\"\"\n return round(self, decimals=decimals, out=out, **kwargs)\n\n def rint(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rint`.\n\n The arguments are the same as for :py:func:`rint`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute rint')\n\n def fix(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`fix`.\n\n The arguments are the same as for :py:func:`fix`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute fix')\n\n def floor(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`floor`.\n\n The arguments are the same as for :py:func:`floor`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute floor')\n\n def ceil(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`ceil`.\n\n The arguments are the same as for :py:func:`ceil`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute ceil')\n\n def trunc(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`trunc`.\n\n The arguments are the same as for :py:func:`trunc`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute trunc')\n\n def sin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sin`.\n\n The arguments are the same as for :py:func:`sin`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sin')\n\n def cos(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cos`.\n\n The arguments are the same as for :py:func:`cos`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute cos')\n\n def tan(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`tan`.\n\n The arguments are the same as for :py:func:`tan`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute tan')\n\n def arcsin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arcsin`.\n\n The arguments are the same as for :py:func:`arcsin`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsin')\n\n def arccos(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arccos`.\n\n The arguments are the same as for :py:func:`arccos`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arccos')\n\n def arctan(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arctan`.\n\n The arguments are the same as for :py:func:`arctan`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arctan')\n\n def degrees(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`degrees`.\n\n The arguments are the same as for :py:func:`degrees`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute degrees')\n\n def radians(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`radians`.\n\n The arguments are the same as for :py:func:`radians`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute radians')\n\n def sinh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sinh`.\n\n The arguments are the same as for :py:func:`sinh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sinh')\n\n def cosh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cosh`.\n\n The arguments are the same as for :py:func:`cosh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute cosh')\n\n def tanh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`tanh`.\n\n The arguments are the same as for :py:func:`tanh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute tanh')\n\n def arcsinh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arcsinh`.\n\n The arguments are the same as for :py:func:`arcsinh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsinh')\n\n def arccosh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arccosh`.\n\n The arguments are the same as for :py:func:`arccosh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arccosh')\n\n def arctanh(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`arctanh`.\n\n The arguments are the same as for :py:func:`arctanh`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute arctanh')\n\n def exp(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`exp`.\n\n The arguments are the same as for :py:func:`exp`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute exp')\n\n def expm1(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`expm1`.\n\n The arguments are the same as for :py:func:`expm1`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute expm1')\n\n def log(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log`.\n\n The arguments are the same as for :py:func:`log`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log')\n\n def log10(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log10`.\n\n The arguments are the same as for :py:func:`log10`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log10')\n\n def log2(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log2`.\n\n The arguments are the same as for :py:func:`log2`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log2')\n\n def log1p(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log1p`.\n\n The arguments are the same as for :py:func:`log1p`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log1p')\n\n def log_sigmoid(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log_sigmoid`.\n\n The arguments are the same as for :py:func:`log_sigmoid`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log_sigmoid')\n\n def sqrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sqrt`.\n\n The arguments are the same as for :py:func:`sqrt`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sqrt')\n\n def rsqrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rsqrt`.\n\n The arguments are the same as for :py:func:`rsqrt`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute rsqrt')\n\n def cbrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`cbrt`.\n\n The arguments are the same as for :py:func:`cbrt`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute cqrt')\n\n def rcbrt(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`rcbrt`.\n\n The arguments are the same as for :py:func:`rcbrt`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute rcqrt')\n\n def square(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`square`.\n\n The arguments are the same as for :py:func:`square`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute square')\n\n def reciprocal(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`reciprocal`.\n\n The arguments are the same as for :py:func:`reciprocal`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute reciprocal')\n\n def relu(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`relu`.\n\n The arguments are the same as for :py:func:`relu`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute relu')\n\n def sigmoid(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`sigmoid`.\n\n The arguments are the same as for :py:func:`sigmoid`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute sigmoid')\n\n def softmax(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`softmax`.\n\n The arguments are the same as for :py:func:`softmax`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute softmax')\n\n def log_softmax(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`log_softmax`.\n\n The arguments are the same as for :py:func:`log_softmax`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute log_softmax')\n\n def softmin(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`softmin`.\n\n The arguments are the same as for :py:func:`softmin`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute softmin')\n\n def mish(self, *args, **kwargs):\n \"\"\"Convenience fluent method for :py:func:`mish`.\n\n The arguments are the same as for :py:func:`mish`, with\n this array as data.\n \"\"\"\n raise AttributeError('mxnet.numpy.ndarray object has no attribute mish')\n\n def squeeze(self, axis=None): # pylint: disable=arguments-differ\n \"\"\"Remove single-dimensional entries from the shape of a.\"\"\"\n return squeeze(self, axis=axis)\n\n def broadcast_to(self, shape): # pylint: disable=redefined-outer-name\n return _mx_nd_np.broadcast_to(self, shape)\n\n def broadcast_like(self, other):\n raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')\n\n def _full(self, value):\n \"\"\"\n Currently for internal use only. Implemented for __setitem__.\n Assign to self an array of self's same shape and type, filled with value.\n \"\"\"\n return _mx_nd_np.full(self.shape, value, device=self.device, dtype=self.dtype, out=self)\n\n # pylint: disable=redefined-outer-name\n def _scatter_set_nd(self, value_nd, indices):\n \"\"\"\n This is added as an ndarray class method in order to support polymorphism in NDArray and numpy.ndarray indexing\n \"\"\"\n return _npi.scatter_set_nd(\n lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self\n )\n # pylint: enable=redefined-outer-name\n\n @property\n def shape(self):\n \"\"\"Tuple of array dimensions.\n\n Examples\n --------\n >>> x = mx.np.array([1, 2, 3, 4])\n >>> x.shape\n (4L,)\n >>> y = mx.np.zeros((2, 3, 4))\n >>> y.shape\n (2L, 3L, 4L)\n >>> z = mx.np.array(3)\n >>> z.shape\n ()\n \"\"\"\n num_dim = mx_int()\n if _int64_enabled():\n pdata = ctypes.POINTER(mx_int64)()\n check_call(_LIB.MXNDArrayGetShape64(\n self.handle, ctypes.byref(num_dim), ctypes.byref(pdata)))\n else:\n pdata = ctypes.POINTER(mx_int)()\n check_call(_LIB.MXNDArrayGetShape(\n self.handle, ctypes.byref(num_dim), ctypes.byref(pdata)))\n if num_dim.value == -1:\n return None\n else:\n return tuple(pdata[:num_dim.value]) # pylint: disable=invalid-slice-index\n\n @property\n def ndim(self):\n \"\"\"Number of array dimensions.\"\"\"\n return len(self.shape)\n\n @property\n def size(self):\n \"\"\"Number of elements in the array.\"\"\"\n return super(ndarray, self).size\n\n @property\n def dtype(self):\n \"\"\"Data-type of the array's elements.\n\n Returns\n -------\n numpy.dtype\n This NDArray's data type.\n\n Examples\n --------\n >>> x = np.zeros((2,3))\n >>> x.dtype\n dtype('float32')\n >>> y = np.zeros((2,3), dtype='int32')\n >>> y.dtype\n dtype('int32')\n \"\"\"\n return _np.dtype(super(ndarray, self).dtype)\n\n def tostype(self, stype):\n raise AttributeError('mxnet.numpy.ndarray object has no attribute tostype')\n\n\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef empty(shape, dtype=None, order='C', device=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, without initializing entries.\n\n Parameters\n ----------\n shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``.\n dtype : data-type, optional\n Desired output data-type for the array, e.g, `numpy.int8`.\n Note that this behavior is different from NumPy's `empty` function where `float64`\n is the default value, here you can set your default dtype as 'float32' or 'float64'\n because `float32` is considered as the default data type in deep learning.\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data of the given shape, dtype, and order.\n\n Examples\n --------\n >>> np.empty([2, 2])\n array([[ 0.000000e+00, -2.524355e-29],\n [ nan, -8.592023e+09]]) # uninitialized\n\n >>> np.empty([2, 2], dtype=int)\n array([[8751743591039004782, 3196766424264760104],\n [7583328881310196768, 562950123910254]], dtype=int64) # uninitialized\n \"\"\"\n if order != 'C':\n raise NotImplementedError('`empty` only supports order equal to `C`, while received {}'\n .format(str(order)))\n if device is None:\n device = current_device()\n if dtype is None or dtype is float:\n dtype = _np.float64 if is_np_default_dtype() else _np.float32\n if isinstance(shape, int):\n shape = (shape,)\n return ndarray(handle=_new_alloc_handle(shape, device, False, dtype))\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef array(object, dtype=None, device=None):\n \"\"\"\n Create an array.\n\n Parameters\n ----------\n object : array_like or `numpy.ndarray` or `mxnet.numpy.ndarray`\n An array, any object exposing the array interface, an object whose\n __array__ method returns an array, or any (nested) sequence.\n dtype : data-type, optional\n The desired data-type for the array.\n The default dtype is ``object.dtype`` if `object` is an `ndarray`, `float32` otherwise.\n Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.\n\n * When npx.is_np_default_dtype() returns False, default dtype is float32;\n * When npx.is_np_default_dtype() returns True, default dtype is float64.\n\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n out : ndarray\n An array object satisfying the specified requirements.\n\n Examples\n --------\n >>> np.array([1, 2, 3])\n array([1., 2., 3.])\n\n >>> np.array([[1, 2], [3, 4]])\n array([[1., 2.],\n [3., 4.]])\n\n >>> np.array([[1, 0], [0, 1]], dtype=bool)\n array([[ True, False],\n [False, True]])\n\n >>> np.array([1, 2, 3]).dtype\n dtype('float32')\n\n >>> npx.set_np(dtype=True)\n >>> np.array([1, 2, 3]).dtype\n dtype('float64')\n \"\"\"\n if device is None:\n device = current_device()\n if isinstance(object, _np.ndarray):\n if is_np_default_dtype():\n dtype = object.dtype if dtype is None else dtype\n else:\n dtype = _np.float32 if dtype is None or object.dtype is _np.float64 else dtype\n if isinstance(object, ndarray):\n dtype = object.dtype if dtype is None else dtype\n elif isinstance(object, NDArray):\n raise ValueError(\"If you're trying to create a mxnet.numpy.ndarray \"\n \"from mx.nd.NDArray, please use the zero-copy as_np_ndarray function.\")\n else:\n if dtype is None:\n default_dtype = _np.float64 if is_np_default_dtype() else _np.float32\n dtype = object.dtype if hasattr(object, \"dtype\") else default_dtype\n try:\n object = _np.array(object, dtype=dtype)\n except Exception as e:\n # printing out the error raised by official NumPy's array function\n # for transparency on users' side\n raise TypeError('{}'.format(str(e)))\n ret = empty(object.shape, dtype=dtype, device=device)\n if len(object.shape) == 0:\n ret[()] = object\n else:\n ret[:] = object\n return ret\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.numpy')\ndef shape(a):\n \"\"\"\n Return the shape of an array.\n\n Parameters\n ----------\n a : array_like\n Input array.\n\n Returns\n -------\n shape : tuple of ints\n The elements of the shape tuple give the lengths of the\n corresponding array dimensions.\n\n See Also\n --------\n ndarray.shape : Equivalent array method.\n\n Examples\n --------\n >>> np.shape(np.eye(3))\n (3, 3)\n >>> np.shape([[1, 2]])\n (1, 2)\n >>> np.shape([0])\n (1,)\n >>> np.shape(0)\n ()\n \"\"\"\n return _mx_nd_np.shape(a)\n\n\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef zeros(shape, dtype=None, order='C', device=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with zeros.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type,\n When npx.is_np_default_dtype() returns False, default dtype is float32,\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that this behavior is different from NumPy's `zeros` function where `float64`\n is the default value, here we can set 'float32' or 'float64' as your default dtype,\n because `float32` is considered as the default data type in deep learning.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n out : ndarray\n Array of zeros with the given shape, dtype, and device.\n\n Examples\n --------\n >>> np.zeros(5)\n array([0., 0., 0., 0., 0.])\n\n >>> np.zeros((5,), dtype=int)\n array([0, 0, 0, 0, 0], dtype=int64)\n\n >>> np.zeros((2, 1))\n array([[0.],\n [0.]])\n \"\"\"\n return _mx_nd_np.zeros(shape, dtype, order, device)\n\n\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef ones(shape, dtype=None, order='C', device=None): # pylint: disable=redefined-outer-name\n \"\"\"Return a new array of given shape and type, filled with ones.\n This function currently only supports storing multi-dimensional data\n in row-major (C-style).\n\n Parameters\n ----------\n shape : int or tuple of int\n The shape of the empty array.\n dtype : str or numpy.dtype, optional\n An optional value type. Default is depend on your current default dtype.\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that this behavior is different from NumPy's `ones` function where\n `float64` is the default value.\n order : {'C'}, optional, default: 'C'\n How to store multi-dimensional data in memory, currently only row-major\n (C-style) is supported.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n out : ndarray\n Array of ones with the given shape, dtype, and device.\n\n Examples\n --------\n >>> np.ones(5)\n array([1., 1., 1., 1., 1.])\n\n >>> np.ones((5,), dtype=int)\n array([1, 1, 1, 1, 1], dtype=int64)\n\n >>> np.ones((2, 1))\n array([[1.],\n [1.]])\n\n >>> s = (2,2)\n >>> np.ones(s)\n array([[1., 1.],\n [1., 1.]])\n \"\"\"\n return _mx_nd_np.ones(shape, dtype, order, device)\n\n\n@set_module('mxnet.numpy')\ndef broadcast_to(array, shape): # pylint: disable=redefined-outer-name\n \"\"\"\n Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : ndarray or scalar\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n MXNetError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n \"\"\"\n return _mx_nd_np.broadcast_to(array, shape)\n\n\n# pylint: disable=too-many-arguments, redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef full(shape, fill_value, dtype=None, order='C', device=None, out=None):\n r\"\"\"Return a new array of given shape and type, filled with `fill_value`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array, e.g., ``(2, 3)`` or ``2``.\n fill_value : scalar or ndarray\n Fill value.\n dtype : data-type, optional\n If dtype is None, the output array data type must be inferred from fill_value.\n If it’s an int, the output array dtype must be the default integer dtype;\n If it’s a float, then the output array dtype must be the default floating-point data type;\n If it’s a bool then the output array must have boolean dtype. Default: None.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the given shape, dtype, and order.\n If `fill_value` is an ndarray, out will have the same device as `fill_value`\n regardless of the provided `device`.\n\n .. note::\n This function differs from the original numpy.full in the following way(s):\n\n * Has an additional `device` argument to specify the device\n * Has an additional `out` argument\n * Currently does not support `order` selection\n\n See Also\n --------\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Examples\n --------\n >>> np.full((2, 2), 10)\n array([[10., 10.],\n [10., 10.]])\n >>> np.full((2, 2), 2, dtype=np.int32, device=mx.cpu(0))\n array([[2, 2],\n [2, 2]], dtype=int32)\n \"\"\"\n return _mx_nd_np.full(shape, fill_value, order=order, device=device, dtype=dtype, out=out)\n# pylint: enable=too-many-arguments, redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name, too-many-arguments\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef empty_like(prototype, dtype=None, device=None, order='C', subok=False, shape=None): # pylint: disable=W0621\n \"\"\"\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n prototype : ndarray\n The shape and data-type of `prototype` define these same attributes\n of the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n subok : {False}, optional\n If True, then the newly created array will use the sub-class\n type of 'a', otherwise it will be a base-class array. Defaults\n to False.\n (Only support False at this moment)\n shape : int or sequence of ints, optional.\n Overrides the shape of the result. If order='K' and the number of\n dimensions is unchanged, will try to keep order, otherwise,\n order='C' is implied.\n (Not supported at this moment)\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `prototype`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n\n Notes\n -----\n This function does *not* initialize the returned array; to do that use\n `zeros_like` or `ones_like` instead. It may be marginally faster than\n the functions that do set the array values.\n\n Examples\n --------\n >>> a = np.array([[1,2,3], [4,5,6]])\n >>> np.empty_like(a)\n array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized\n [ 4567052944, -5764607523034234880, 844424930131968]])\n >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])\n >>> np.empty_like(a)\n array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized\n [2.0e-323, 2.5e-323, 3.0e-323]])\n \"\"\"\n ret = _mx_nd_np.empty_like(prototype, dtype=dtype, order=order, subok=subok, shape=shape)\n if device is not None:\n ret.to_device(device)\n return ret\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef all(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Test whether all array elements along a given axis evaluate to True.\n\n Parameters\n ----------\n a : ndarray\n Input array or object that can be converted to an array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which a logical AND reduction is performed.\n The default (axis = None) is to perform a logical AND over\n all the dimensions of the input array.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output and its type is preserved\n\n Returns\n --------\n all : ndarray, bool\n A new boolean or array is returned unless out is specified,\n in which case a reference to out is returned.\n\n Examples:\n ---------\n >>> np.all([[True,False],[True,True]])\n False\n\n >>> np.all([[True,False],[True,True]], axis=0)\n array([ True, False])\n\n >>> np.all([-1, 4, 5])\n True\n\n >>> np.all([1.0, np.nan])\n True\n\n >>> o=np.array(False)\n >>> z=np.all([-1, 4, 5], out=o)\n >>> id(z), id(o), z\n (28293632, 28293632, array(True)) # may vary\n \"\"\"\n return _mx_nd_np.all(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@set_module('mxnet.numpy')\ndef any(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Test whether any array element along a given axis evaluates to True.\n Returns single boolean unless axis is not None\n\n Parameters\n ----------\n a : ndarray\n Input array or object that can be converted to an array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which a logical AND reduction is performed.\n The default (axis = None) is to perform a logical AND over\n all the dimensions of the input array.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output and its type is preserved\n\n Returns\n --------\n any : bool or ndarray\n A new boolean or ndarray is returned unless out is specified,\n in which case a reference to out is returned.\n\n Examples:\n ---------\n >>> np.any([[True, False], [True, True]])\n True\n\n >>> np.any([[True, False], [False, False]], axis=0)\n array([ True, False])\n\n >>> np.any([-1, 0, 5])\n True\n\n >>> np.any(np.nan)\n True\n\n >>> o=np.array(False)\n >>> z=np.any([-1, 4, 5], out=o)\n >>> z, o\n (array(True), array(True))\n >>> # Check now that z is a reference to o\n >>> z is o\n True\n >>> id(z), id(o) # identity of z and o # doctest: +SKIP\n (191614240, 191614240)\n \"\"\"\n return _mx_nd_np.any(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef identity(n, dtype=None, device=None):\n \"\"\"\n Return the identity array.\n\n The identity array is a square array with ones on\n the main diagonal.\n\n Parameters\n ----------\n n : int\n Number of rows (and columns) in `n` x `n` output.\n dtype : data-type, optional\n Data-type of the output.\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n out : ndarray\n `n` x `n` array with its main diagonal set to one,\n and all other elements 0.\n\n Examples\n --------\n >>> np.identity(3)\n >>> np.identity(3)\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n \"\"\"\n return _mx_nd_np.identity(n, dtype, device)\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef take(a, indices, axis=None, mode='raise', out=None):\n r\"\"\"\n Take elements from an array along an axis.\n\n When axis is not None, this function does the same thing as \"fancy\"\n indexing (indexing arrays using arrays); however, it can be easier to use\n if you need elements along a given axis. A call such as\n ``np.take(arr, indices, axis=3)`` is equivalent to\n ``arr[:,:,:,indices,...]``.\n\n Explained without fancy indexing, this is equivalent to the following use\n of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of\n indices::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n Nj = indices.shape\n for ii in ndindex(Ni):\n for jj in ndindex(Nj):\n for kk in ndindex(Nk):\n out[ii + jj + kk] = a[ii + (indices[jj],) + kk]\n\n Parameters\n ----------\n a : ndarray\n The source array.\n indices : ndarray\n The indices of the values to extract. Also allow scalars for indices.\n axis : int, optional\n The axis over which to select values. By default, the flattened\n input array is used.\n out : ndarray, optional\n If provided, the result will be placed in this array. It should\n be of the appropriate shape and dtype.\n mode : {'clip', 'wrap'}, optional\n Specifies how out-of-bounds indices will behave.\n\n * 'clip' -- clip to the range (default)\n * 'wrap' -- wrap around\n\n 'clip' mode means that all indices that are too large are replaced\n by the index that addresses the last element along that axis. Note\n that this disables indexing with negative numbers.\n\n Returns\n -------\n out : ndarray\n The returned array has the same type as `a`.\n\n .. note::\n\n This function differs from the original `numpy.take\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in\n the following way(s):\n\n * Only ndarray or scalar ndarray is accepted as valid input.\n\n Examples\n --------\n >>> a = np.array([4, 3, 5, 7, 6, 8])\n >>> indices = np.array([0, 1, 4])\n >>> np.take(a, indices)\n array([4., 3., 6.])\n\n In this example for `a` is an ndarray, \"fancy\" indexing can be used.\n\n >>> a[indices]\n array([4., 3., 6.])\n\n If `indices` is not one dimensional, the output also has these dimensions.\n\n >>> np.take(a, np.array([[0, 1], [2, 3]]))\n array([[4., 3.],\n [5., 7.]])\n \"\"\"\n return _mx_nd_np.take(a, indices, axis, mode, out)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.numpy')\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):\n \"\"\"\n Find the unique elements of an array.\n\n Returns the sorted unique elements of an array. There are three optional\n outputs in addition to the unique elements:\n\n * the indices of the input array that give the unique values\n * the indices of the unique array that reconstruct the input array\n * the number of times each unique value comes up in the input array\n\n Parameters\n ----------\n ar : ndarray\n Input array. Unless `axis` is specified, this will be flattened if it\n is not already 1-D.\n return_index : bool, optional\n If True, also return the indices of `ar` (along the specified axis,\n if provided, or in the flattened array) that result in the unique array.\n return_inverse : bool, optional\n If True, also return the indices of the unique array (for the specified\n axis, if provided) that can be used to reconstruct `ar`.\n return_counts : bool, optional\n If True, also return the number of times each unique item appears\n in `ar`.\n axis : int or None, optional\n The axis to operate on. If None, `ar` will be flattened. If an integer,\n the subarrays indexed by the given axis will be flattened and treated\n as the elements of a 1-D array with the dimension of the given axis,\n see the notes for more details. The default is None.\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n unique_indices : ndarray, optional\n The indices of the first occurrences of the unique values in the\n original array. Only provided if `return_index` is True.\n unique_inverse : ndarray, optional\n The indices to reconstruct the original array from the\n unique array. Only provided if `return_inverse` is True.\n unique_counts : ndarray, optional\n The number of times each of the unique values comes up in the\n original array. Only provided if `return_counts` is True.\n\n .. note::\n\n When an axis is specified the subarrays indexed by the axis are sorted.\n This is done by making the specified axis the first dimension of the array\n and then flattening the subarrays in C order. The flattened subarrays are\n then viewed as a structured type with each element given a label, with the\n effect that we end up with a 1-D array of structured types that can be\n treated in the same way as any other 1-D array. The result is that the\n flattened subarrays are sorted in lexicographic order starting with the\n first element.\n\n This function differs from the original `numpy.unique\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in\n the following aspects:\n\n * Only support ndarray as input.\n * Object arrays or structured arrays are not supported.\n\n Examples\n --------\n >>> np.unique(np.array([1, 1, 2, 2, 3, 3]))\n array([1., 2., 3.])\n >>> a = np.array([[1, 1], [2, 3]])\n >>> np.unique(a)\n array([1., 2., 3.])\n\n Return the unique rows of a 2D array\n\n >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])\n >>> np.unique(a, axis=0)\n array([[1., 0., 0.],\n [2., 3., 4.]])\n\n Return the indices of the original array that give the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_index=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 5, 3, 2], dtype=int64)\n >>> a[indices]\n array([1., 2., 3., 4., 6.])\n\n Reconstruct the input array from the unique values:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_inverse=True)\n >>> u\n array([1., 2., 3., 4., 6.])\n >>> indices\n array([0, 1, 4, 3, 1, 2, 1], dtype=int64)\n >>> u[indices]\n array([1., 2., 6., 4., 2., 3., 2.])\n \"\"\"\n return _mx_nd_np.unique(ar, return_index, return_inverse, return_counts, axis)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef add(x1, x2, out=None, **kwargs):\n \"\"\"\n Add arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n .. note::\n\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n\n Examples\n --------\n >>> np.add(1.0, 4.0)\n 5.0\n >>>\n >>> x1 = np.arange(9.0).reshape((3, 3))\n >>> x2 = np.arange(3.0)\n >>> np.add(x1, x2)\n array([[ 0., 2., 4.],\n [ 3., 5., 7.],\n [ 6., 8., 10.]])\n \"\"\"\n return _mx_nd_np.add(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef subtract(x1, x2, out=None, **kwargs):\n r\"\"\"Subtract arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be subtracted from each other. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape\n of one or the other).\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n subtract : ndarray or scalar\n The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n .. note::\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n\n Examples\n --------\n >>> np.subtract(1.0, 4.0)\n -3.0\n >>> x1 = np.arange(9.0).reshape((3, 3))\n >>> x2 = np.arange(3.0)\n >>> np.subtract(x1, x2)\n array([[0., 0., 0.],\n [3., 3., 3.],\n [6., 6., 6.]])\n \"\"\"\n return _mx_nd_np.subtract(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef multiply(x1, x2, out=None, **kwargs):\n \"\"\"\n Multiply arguments element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to\n a common shape (which may be the shape of one or the other).\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n .. note::\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), not supported yet.\n\n Examples\n --------\n >>> np.multiply(2.0, 4.0)\n 8.0\n >>> x1 = np.arange(9.0).reshape((3, 3))\n >>> x2 = np.arange(3.0)\n >>> np.multiply(x1, x2)\n array([[ 0., 1., 4.],\n [ 0., 4., 10.],\n [ 0., 7., 16.]])\n \"\"\"\n return _mx_nd_np.multiply(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef divide(x1, x2, out=None, **kwargs):\n \"\"\"Returns a true division of the inputs, element-wise.\n\n .. note::\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types including boolean, the output is of float32 or\n float64 type, which depends on your current default dtype:\n\n * When ``npx.is_np_default_dtype()`` returns False, default dtype is float32.\n * When ``npx.is_np_default_dtype()`` returns True, default dtype is float64.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n x2 : ndarray or scalar\n Divisor array.\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.true_divide(x, 4)\n array([0. , 0.25, 0.5 , 0.75, 1. ])\n \"\"\"\n return _mx_nd_np.divide(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\ndef true_divide(x1, x2, out=None):\n \"\"\"Returns a true division of the inputs, element-wise.\n\n Instead of the Python traditional 'floor division', this returns a true\n division. True division adjusts the output type to present the best\n answer, regardless of input types.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n x2 : ndarray or scalar\n Divisor array.\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n .. note::\n\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is of float32 or\n float64 type, which depends on your current default dtype.\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n\n Examples\n --------\n >>> x = np.arange(5)\n >>> np.true_divide(x, 4)\n array([0. , 0.25, 0.5 , 0.75, 1. ])\n \"\"\"\n return _mx_nd_np.true_divide(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef floor_divide(x1, x2, out=None):\n \"\"\"Return the largest integer smaller or equal to the division of the inputs.\n\n It is equivalent to the Python // operator and pairs with the Python % (remainder),\n function so that a = a % b + b * (a // b) up to roundoff.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n x2 : ndarray or scalar\n Divisor array.\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n .. note::\n\n This operator now supports automatic type promotion. The resulting type will be determined\n according to the following rules:\n\n * If both inputs are of floating number types, the output is the more precise type.\n * If only one of the inputs is floating number type, the result is that type.\n * If both inputs are of integer types (including boolean), the output is the more\n precise type\n\n Examples\n --------\n >>> np.floor_divide(7,3)\n 2\n >>> np.floor_divide([1., 2., 3., 4.], 2.5)\n array([ 0., 0., 1., 1.])\n \"\"\"\n return _mx_nd_np.floor_divide(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef mod(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.mod(np.arange(7), 5)\n array([0., 1., 2., 3., 4., 0., 1.])\n \"\"\"\n return _mx_nd_np.mod(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef fmod(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.fmod(np.arange(7), 5)\n array([0., 1., 2., 3., 4., 0., 1.])\n \"\"\"\n return _mx_nd_np.fmod(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef matmul(a, b, out=None, **kwargs):\n r\"\"\"Matrix product of two arrays.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays, scalars not allowed.\n out : ndarray, optional\n A location into which the result is stored.\n If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The matrix product of the inputs.\n This is a scalar only when both x1, x2 are 1-d vectors.\n\n Raises\n ------\n MXNetError\n If the last dimension of a is not the same size as the second-to-last dimension of b.\n If a scalar value is passed in.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : alternative matrix product with different broadcasting rules.\n einsum : Einstein summation convention.\n\n .. note::\n\n The behavior depends on the arguments in the following way.\n\n * If both arguments are ``2-D`` they are multiplied like conventional matrices.\n * If either argument is ``N-D``, ``N > 2``, it is treated as a stack of matrices\n residing in the last two indexes and broadcast accordingly.\n * If the first argument is ``1-D``, it is promoted to a matrix by prepending\n a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.\n * If the second argument is ``1-D``, it is promoted to a matrix by appending a 1\n to its dimensions. After matrix multiplication the appended 1 is removed.\n\n matmul differs from dot in two important ways:\n\n * Multiplication by scalars is not allowed, use multiply instead.\n * Stacks of matrices are broadcast together as if the matrices were elements,\n respecting the signature ``(n,k),(k,m)->(n,m)``:\n\n >>> a = np.ones([9, 5, 7, 4])\n >>> c = np.ones([9, 5, 4, 3])\n >>> np.dot(a, c).shape\n (9, 5, 7, 9, 5, 3)\n >>> np.matmul(a, c).shape\n (9, 5, 7, 3)\n >>> # n is 7, k is 4, m is 3\n\n Examples\n --------\n For 2-D arrays it is the matrix product:\n\n >>> a = np.array([[1, 0],\n ... [0, 1]])\n >>> b = np.array([[4, 1],\n ... [2, 2]])\n >>> np.matmul(a, b)\n array([[4., 1.],\n [2., 2.]])\n\n For 2-D mixed with 1-D, the result is the usual.\n\n >>> a = np.array([[1, 0],\n ... [0, 1]])\n >>> b = np.array([1, 2])\n >>> np.matmul(a, b)\n array([1., 2.])\n >>> np.matmul(b, a)\n array([1., 2.])\n\n Broadcasting is conventional for stacks of arrays\n\n >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))\n >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))\n >>> np.matmul(a, b).shape\n (2, 2, 2)\n >>> np.matmul(a, b)[0, 1, 1]\n array(98.)\n >>> sum(a[0, 1, :] * b[0, :, 1])\n array(98.)\n\n Scalar multiplication raises an error.\n\n >>> np.matmul([1, 2], 3)\n Traceback (most recent call last):\n ...\n mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.\n\n \"\"\"\n return _mx_nd_np.matmul(a, b, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef remainder(x1, x2, out=None, **kwargs):\n \"\"\"\n Return element-wise remainder of division.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Dividend array.\n\n x2 : ndarray or scalar\n Divisor array.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.remainder(np.arange(7), 5)\n array([0., 1., 2., 3., 4., 0., 1.])\n \"\"\"\n return _mx_nd_np.remainder(x1, x2, out=out)\n\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef power(x1, x2, out=None, **kwargs):\n \"\"\"\n First array elements raised to powers from second array, element-wise.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n The bases.\n\n x2 : ndarray or scalar\n The exponent.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The bases in x1 raised to the exponents in x2.\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> x1 = np.arange(6)\n >>> np.power(x1, 3)\n array([ 0., 1., 8., 27., 64., 125.])\n\n Raise the bases to different exponents.\n\n >>> x2 = np.array([1.0, 2.0, 3.0, 3.0, 2.0, 1.0])\n >>> np.power(x1, x2)\n array([ 0., 1., 8., 27., 16., 5.])\n\n The effect of broadcasting.\n\n >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])\n >>> x2\n array([[1., 2., 3., 3., 2., 1.],\n [1., 2., 3., 3., 2., 1.]])\n\n >>> np.power(x1, x2)\n array([[ 0., 1., 8., 27., 16., 5.],\n [ 0., 1., 8., 27., 16., 5.]])\n \"\"\"\n return _mx_nd_np.power(x1, x2, out=out)\n\npow = power\npow.__doc_ = \"\"\"\n First array elements raised to powers from second array, element-wise.\n \n Notes \n ----- \n `pow` is an alias for `power`. It is a standard API in \n https://data-apis.org/array-api/latest/API_specification/elementwise_functions.html#pow-x1-x2 \n instead of an official NumPy operator. \n \n >>> np.pow is np.power \n True \n\n Parameters\n ----------\n x1 : ndarray or scalar\n The bases.\n\n x2 : ndarray or scalar\n The exponent.\n\n out : ndarray\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The bases in x1 raised to the exponents in x2.\n This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> x1 = np.arange(6)\n >>> np.pow(x1, 3)\n array([ 0., 1., 8., 27., 64., 125.])\n\n Raise the bases to different exponents.\n\n >>> x2 = np.array([1.0, 2.0, 3.0, 3.0, 2.0, 1.0])\n >>> np.pow(x1, x2)\n array([ 0., 1., 8., 27., 16., 5.])\n\n The effect of broadcasting.\n\n >>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])\n >>> x2\n array([[1., 2., 3., 3., 2., 1.],\n [1., 2., 3., 3., 2., 1.]])\n\n >>> np.pow(x1, x2)\n array([[ 0., 1., 8., 27., 16., 5.],\n [ 0., 1., 8., 27., 16., 5.]])\n \"\"\"\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef gcd(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns the greatest common divisor of ``|x1|`` and ``|x2|``\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays for computing greatest common divisor. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape of\n one or the other).\n\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The greatest common divisor of the absolute value of the inputs\n This is a scalar if both `x1` and `x2` are scalars.\n\n See Also\n --------\n gcd : The lowest common multiple\n\n Examples\n --------\n >>> np.gcd(12, 20)\n 4\n >>> np.gcd(np.arange(6, dtype=int), 20)\n array([20, 1, 2, 1, 4, 5], dtype=int64)\n \"\"\"\n return _mx_nd_np.gcd(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef lcm(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns the lowest common multiple of ``|x1|`` and ``|x2|``\n\n Parameters\n ----------\n x1, x2 : ndarrays or scalar values\n The arrays for computing lowest common multiple. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which may be the shape of\n one or the other).\n\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array\n is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The lowest common multiple of the absolute value of the inputs\n This is a scalar if both `x1` and `x2` are scalars.\n\n See Also\n --------\n gcd : The greatest common divisor\n\n Examples\n --------\n >>> np.lcm(12, 20)\n 60\n >>> np.lcm(np.arange(6, dtype=int), 20)\n array([ 0, 20, 20, 60, 20, 20], dtype=int64)\n \"\"\"\n return _mx_nd_np.lcm(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef sin(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The sine of each element of x. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sin(np.pi/2.)\n 1.0\n >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)\n array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])\n \"\"\"\n return _mx_nd_np.sin(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef cos(x, out=None, **kwargs):\n r\"\"\"\n Cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding cosine values. This is a scalar if x is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cos(np.array([0, np.pi/2, np.pi]))\n array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.cos(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _mx_nd_np.cos(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef sinh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic sine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sinh(0)\n 0.0\n >>> # Example of providing the optional output parameter\n >>> out1 = np.array([0], dtype='f')\n >>> out2 = np.sinh(np.array([0.1]), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _mx_nd_np.sinh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef cosh(x, out=None, **kwargs):\n \"\"\"\n Hyperbolic cosine, element-wise.\n Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.cosh(0)\n 1.0\n \"\"\"\n return _mx_nd_np.cosh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef tanh(x, out=None, **kwargs):\n \"\"\"\n Compute hyperbolic tangent element-wise.\n Equivalent to ``np.sinh(x)/np.cosh(x)``.\n\n Parameters\n ----------\n x : ndarray or scalar.\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n ----------\n y : ndarray or scalar\n The corresponding hyperbolic tangent values.\n\n .. note::\n If `out` is provided, the function writes the result into it,\n and returns a reference to `out`. (See Examples)\n\n * input x does not support complex computation (like imaginary number)\n\n >>> np.tanh(np.pi*1j)\n TypeError: type <type 'complex'> not supported\n\n Examples\n --------\n >>> np.tanh(np.array[0, np.pi]))\n array([0. , 0.9962721])\n >>> np.tanh(np.pi)\n 0.99627207622075\n >>> # Example of providing the optional output parameter illustrating\n >>> # that what is returned is a reference to said parameter\n >>> out1 = np.array(1)\n >>> out2 = np.tanh(np.array(0.1), out1)\n >>> out2 is out1\n True\n \"\"\"\n return _mx_nd_np.tanh(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef log10(x, out=None, **kwargs):\n \"\"\"\n Return the base 10 logarithm of the input array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array or scalar.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs broadcast to. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output is the same as that of the input if the input is an ndarray.\n\n Returns\n -------\n y : ndarray or scalar\n The logarithm to the base 10 of `x`, element-wise. NaNs are\n returned where x is negative. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.log10(np.array([1e-15, -3.]))\n array([-15., nan])\n \"\"\"\n return _mx_nd_np.log10(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef sqrt(x, out=None, **kwargs):\n \"\"\"\n Return the non-negative square-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose square-roots are required.\n out : ndarray, or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n An array of the same shape as `x`, containing the positive\n square-root of each element in `x`. This is a scalar if `x` is a scalar.\n\n Notes\n ----\n This function only supports input type of float.\n\n Examples\n --------\n >>> np.sqrt(np.array([1,4,9]))\n array([1., 2., 3.])\n >>> np.sqrt(np.array([4, -1, _np.inf]))\n array([ 2., nan, inf])\n \"\"\"\n return _mx_nd_np.sqrt(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef cbrt(x, out=None, **kwargs):\n \"\"\"\n Return the cube-root of an array, element-wise.\n\n Parameters\n ----------\n x : ndarray\n The values whose cube-roots are required.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n y : ndarray\n An array of the same shape as x, containing the cube cube-root of each element in x.\n If out was provided, y is a reference to it. This is a scalar if x is a scalar.\n\n Examples\n ----------\n >>> np.cbrt([1,8,27])\n array([ 1., 2., 3.])\n \"\"\"\n return _mx_nd_np.cbrt(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef abs(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n absolute : ndarray\n An ndarray containing the absolute value of\n each element in `x`. This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> x = np.array([-1.2, 1.2])\n >>> np.abs(x)\n array([1.2, 1.2])\n \"\"\"\n return _mx_nd_np.abs(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef fabs(x, out=None, **kwargs):\n r\"\"\"\n Calculate the absolute value element-wise.\n\n This function returns the absolute values (positive magnitude) of the\n data in `x`. Complex values are not handled, use `absolute` to find the\n absolute values of complex data.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n absolute : ndarray\n An ndarray containing the absolute value of\n each element in `x`. This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.fabs(-1)\n 1.0\n >>> np.fabs(np.array([-1.2, 1.2]))s\n array([ 1.2, 1.2])\n \"\"\"\n return _mx_nd_np.fabs(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef absolute(x, out=None, **kwargs):\n \"\"\"\n Calculate the absolute value element-wise.\n np.abs is a shorthand for this function.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape\n that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n absolute : ndarray\n An ndarray containing the absolute value of each element in x.\n\n Examples\n ----------\n >>> x = np.array([-1.2, 1.2])\n >>> np.absolute(x)\n array([ 1.2, 1.2])\n \"\"\"\n return _mx_nd_np.absolute(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef exp(x, out=None, **kwargs):\n r\"\"\"\n Calculate the exponential of all elements in the input array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential of `x`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.exp(1)\n 2.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.exp(x)\n array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])\n \"\"\"\n return _mx_nd_np.exp(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef expm1(x, out=None, **kwargs):\n r\"\"\"\n Calculate `exp(x) - 1` for all elements in the array.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array, element-wise exponential minus one: `out = exp(x) - 1`.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> np.expm1(1)\n 1.718281828459045\n >>> x = np.array([-1, 1, -2, 2])\n >>> np.exp(x)\n array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])\n \"\"\"\n return _mx_nd_np.expm1(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arcsin(x, out=None, **kwargs):\n r\"\"\"\n Inverse sine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n `y`-coordinate on the unit circle.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n angle : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n The inverse sine of each element in `x`, in radians and in the\n closed interval ``[-pi/2, pi/2]``.\n\n Examples\n --------\n >>> np.arcsin(1) # pi/2\n 1.5707963267948966\n >>> np.arcsin(-1) # -pi/2\n -1.5707963267948966\n >>> np.arcsin(0)\n 0.0\n\n .. note::\n `arcsin` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that :math:`sin(z) = x`. The convention is to\n return the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, *arcsin* always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n The inverse sine is also known as `asin` or sin^{-1}.\n The output `ndarray` has the same `device` as the input `ndarray`.\n This function differs from the original `numpy.arcsin\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in\n the following aspects:\n\n * Only support ndarray or scalar now.\n * `where` argument is not supported.\n * Complex input is not supported.\n\n References\n ----------\n Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,\n 10th printing, New York: Dover, 1964, pp. 79ff.\n http://www.math.sfu.ca/~cbm/aands/\n \"\"\"\n return _mx_nd_np.arcsin(x, out=out, **kwargs)\n\nasin = arcsin\nasin.__doc__ = \"\"\"\n Inverse sine, element-wise.\n \n >>>np.asin is np.asin\n True\n\n Parameters\n ----------\n x : ndarray or scalar\n `y`-coordinate on the unit circle.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n angle : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n The inverse sine of each element in `x`, in radians and in the\n closed interval ``[-pi/2, pi/2]``.\n\n Examples\n --------\n >>> np.asin(1) # pi/2\n 1.5707963267948966\n >>> np.asin(-1) # -pi/2\n -1.5707963267948966\n >>> np.asin(0)\n 0.0\n\n .. note::\n `asin` is a alias for `arcsin`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/elementwise_functions.html#asin-x\n instead of an official NumPy operator.\n \n `asin` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that :math:`sin(z) = x`. The convention is to\n return the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, *asin* always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n The inverse sine is also known as `asin` or sin^{-1}.\n The output `ndarray` has the same `ctx` as the input `ndarray`.\n This function differs from the original `numpy.arcsin\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in\n the following aspects:\n\n * Only support ndarray or scalar now.\n * `where` argument is not supported.\n * Complex input is not supported.\n\n References\n ----------\n Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,\n 10th printing, New York: Dover, 1964, pp. 79ff.\n http://www.math.sfu.ca/~cbm/aands/\n \"\"\"\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arccos(x, out=None, **kwargs):\n \"\"\"\n Trigonometric inverse cosine, element-wise.\n The inverse of cos so that, if y = cos(x), then x = arccos(y).\n\n Parameters\n ----------\n x : ndarray\n x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that\n the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n angle : ndarray\n The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].\n This is a scalar if x is a scalar.\n\n Notes\n ----------\n arccos is a multivalued function: for each x there are infinitely many numbers z such that\n cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].\n For real-valued input data types, arccos always returns real output.\n For each value that cannot be expressed as a real number or infinity, it yields nan and sets\n the invalid floating point error flag.\n The inverse cos is also known as acos or cos^-1.\n\n Examples\n ----------\n >>> np.arccos([1, -1])\n array([ 0. , 3.14159265])\n \"\"\"\n return _mx_nd_np.arccos(x, out=out, **kwargs)\n\nacos = arccos\nacos.__doc__ = \"\"\"\n Trigonometric inverse cosine, element-wise.\n The inverse of cos so that, if y = cos(x), then x = acos(y).\n \n >>>np.acos is np.arccos\n True\n\n Parameters\n ----------\n x : ndarray\n x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that\n the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length equal to the number of outputs.\n\n Returns\n ----------\n angle : ndarray\n The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].\n This is a scalar if x is a scalar.\n\n Notes\n ----------\n `acos` is a alias for `arccos`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/elementwise_functions.html#acos-x\n instead of an official NumPy operator.\n \n acos is a multivalued function: for each x there are infinitely many numbers z such that\n cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].\n For real-valued input data types, acos always returns real output.\n For each value that cannot be expressed as a real number or infinity, it yields nan and sets\n the invalid floating point error flag.\n The inverse cos is also known as acos or cos^-1.\n\n Examples\n ----------\n >>> np.acos([1, -1])\n array([ 0. , 3.14159265])\n \"\"\"\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arctan(x, out=None, **kwargs):\n r\"\"\"\n Trigonometric inverse tangent, element-wise.\n The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Out has the same shape as `x`. It lies is in\n ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `arctan` is a multi-valued function: for each `x` there are infinitely\n many numbers `z` such that tan(`z`) = `x`. The convention is to return\n the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, `arctan` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n For complex-valued input, we do not have support for them yet.\n The inverse tangent is also known as `atan` or tan^{-1}.\n\n Examples\n --------\n >>> x = np.array([0, 1])\n >>> np.arctan(x)\n array([0. , 0.7853982])\n >>> np.pi/4\n 0.7853981633974483\n \"\"\"\n return _mx_nd_np.arctan(x, out=out, **kwargs)\n\natan = arctan\natan.__doc__ = \"\"\"\n Trigonometric inverse tangent, element-wise.\n The inverse of tan, so that if ``y = tan(x)`` then ``x = atan(y)``.\n \n >>>np.atan is np.arctan\n True\n \n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Out has the same shape as `x`. It lies is in\n ``[-pi/2, pi/2]`` (``atan(+/-inf)`` returns ``+/-pi/2``).\n This is a scalar if `x` is a scalar.\n\n Notes\n -----\n `atan` is a alias for `arctan`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/elementwise_functions.html#atan-x\n instead of an official NumPy operator.\n \n `atan` is a multi-valued function: for each `x` there are infinitely\n many numbers `z` such that tan(`z`) = `x`. The convention is to return\n the angle `z` whose real part lies in [-pi/2, pi/2].\n For real-valued input data types, `atan` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n For complex-valued input, we do not have support for them yet.\n The inverse tangent is also known as `atan` or tan^{-1}.\n\n Examples\n --------\n >>> x = np.array([0, 1])\n >>> np.atan(x)\n array([0. , 0.7853982])\n >>> np.pi/4\n 0.7853981633974483\n \"\"\"\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef sign(x, out=None, **kwargs):\n \"\"\"\n Returns an element-wise indication of the sign of a number.\n The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.\n\n Parameters\n ----------\n x : ndarray or a scalar\n Input values.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The sign of `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n * Only supports real number as input elements.\n * Input type does not support Python native iterables(list, tuple, ...).\n * ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be\n the same as the expected output.\n * ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the\n same as the expected output.\n * ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([-5., 4.5])\n >>> np.sign(a)\n array([-1., 1.])\n Scalars as input:\n >>> np.sign(4.0)\n 1.0\n >>> np.sign(0)\n 0\n Use ``out`` parameter:\n >>> b = np.zeros((2, ))\n >>> np.sign(a, out=b)\n array([-1., 1.])\n >>> b\n array([-1., 1.])\n \"\"\"\n return _mx_nd_np.sign(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef log(x, out=None, **kwargs):\n \"\"\"\n Natural logarithm, element-wise.\n The natural logarithm `log` is the inverse of the exponential function,\n so that `log(exp(x)) = x`. The natural logarithm is logarithm in base\n `e`.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The natural logarithm of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n .. note::\n Currently only supports data of real values and ``inf`` as input. Returns data of\n real value, ``inf``, ``-inf`` and ``nan`` according to the input.\n This function differs from the original `numpy.log\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in\n the following aspects:\n\n * Does not support complex number for now\n * Input type does not support Python native iterables(list, tuple, ...).\n * ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be\n the same as the expected output.\n * ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the\n same as the expected output.\n * ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)\n >>> np.log(a)\n array([ 0., 1., 2., -inf], dtype=float64)\n >>> # Using the default float32 dtype leads to slightly different behavior\n >>> a = np.array([1, np.exp(1), np.exp(2), 0])\n >>> np.log(a)\n array([ 0., 0.99999994, 2., -inf])\n >>> np.log(1)\n 0.0\n \"\"\"\n return _mx_nd_np.log(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef rint(x, out=None, **kwargs):\n \"\"\"\n Round elements of the array to the nearest integer.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n .. note::\n This function differs from the original `numpy.rint\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in\n the following way(s):\n\n * only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n * broadcasting to `out` of different shape is currently not supported\n * when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.rint(a)\n array([-2., -2., -0., 0., 1., 2., 2.])\n \"\"\"\n return _mx_nd_np.rint(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef log2(x, out=None, **kwargs):\n \"\"\"\n Base-2 logarithm of x.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input values.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The logarithm base two of `x`, element-wise.\n This is a scalar if `x` is a scalar.\n\n .. note::\n This function differs from the original `numpy.log2\n <https://www.google.com/search?q=numpy+log2>`_ in\n the following way(s):\n\n * only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n * broadcasting to `out` of different shape is currently not supported\n * when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> x = np.array([0, 1, 2, 2**4])\n >>> np.log2(x)\n array([-inf, 0., 1., 4.])\n \"\"\"\n return _mx_nd_np.log2(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef log1p(x, out=None, **kwargs):\n \"\"\"\n Return the natural logarithm of one plus the input array, element-wise.\n Calculates ``log(1 + x)``.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n Natural logarithm of 1 + x, element-wise. This is a scalar\n if x is a scalar.\n\n Notes\n -----\n For real-valued input, `log1p` is accurate also for `x` so small\n that `1 + x == 1` in floating-point accuracy.\n Logarithm is a multivalued function: for each `x` there is an infinite\n number of `z` such that `exp(z) = 1 + x`. The convention is to return\n the `z` whose imaginary part lies in `[-pi, pi]`.\n For real-valued input data types, `log1p` always returns real output.\n For each value that cannot be expressed as a real number or infinity,\n it yields ``nan`` and sets the `invalid` floating point error flag.\n cannot support complex-valued input.\n\n Examples\n --------\n >>> np.log1p(1e-99)\n 1e-99\n >>> a = np.array([3, 4, 5])\n >>> np.log1p(a)\n array([1.3862944, 1.609438 , 1.7917595])\n \"\"\"\n return _mx_nd_np.log1p(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef degrees(x, out=None, **kwargs):\n \"\"\"\n Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray\n Input value. Elements must be of real value.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding degree values; if `out` was supplied this is a\n reference to it.\n This is a scalar if `x` is a scalar.\n\n .. note::\n This function differs from the original `numpy.degrees\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in\n the following aspects:\n\n * Input type does not support Python native iterables(list, tuple, ...).\n Only ndarray is supported.\n * ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be\n the same as the expected output.\n * ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the\n same as the expected output.\n * ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> rad = np.arange(12.) * np.pi / 6\n >>> np.degrees(rad)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> # Use specified ``out`` ndarray:\n >>> out = np.zeros((rad.shape))\n >>> np.degrees(rad, out)\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n >>> out\n array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])\n \"\"\"\n return _mx_nd_np.degrees(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef rad2deg(x, out=None, **kwargs):\n r\"\"\"Convert angles from radians to degrees.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n .. note::\n\n \"rad2deg(x)\" is \"x * 180 / pi\".\n\n This function differs from the original numpy.arange in the following aspects:\n\n * Only support float32 and float64.\n * `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.rad2deg(np.pi/2)\n 90.0\n \"\"\"\n return _mx_nd_np.rad2deg(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef radians(x, out=None, **kwargs):\n \"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array in degrees.\n out : ndarray or None\n A location into which the result is stored.\n If provided, it must have the same shape and type as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray\n The corresponding radian values. This is a scalar if x is a scalar.\n\n .. note::\n This function differs from the original `numpy.radians\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in\n the following way(s):\n\n * only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported\n * broadcasting to `out` of different shape is currently not supported\n * when input is plain python numerics, the result will not be stored in the `out` param\n\n Examples\n --------\n >>> deg = np.arange(12.) * 30.\n >>> np.radians(deg)\n array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,\n 3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],\n dtype=float32)\n \"\"\"\n return _mx_nd_np.radians(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef deg2rad(x, out=None, **kwargs):\n r\"\"\"\n Convert angles from degrees to radians.\n\n Parameters\n ----------\n x : ndarray or scalar\n Angles in degrees.\n out : ndarray or None, optional\n A location into which the result is stored. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The corresponding angle in radians.\n This is a scalar if `x` is a scalar.\n\n .. note::\n \"deg2rad(x)\" is \"x * pi / 180\".\n\n This function differs from the original numpy.arange in the following aspects:\n\n * Only support float32 and float64.\n * `out` must be in the same size of input.\n\n Examples\n --------\n >>> np.deg2rad(180)\n 3.1415927\n \"\"\"\n return _mx_nd_np.deg2rad(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef reciprocal(x, out=None, **kwargs):\n r\"\"\"Return the reciprocal of the argument, element-wise.\n Calculates ``1/x``.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose reciprocals are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.reciprocal(2.)\n 0.5\n >>> x = np.array([1, 2., 3.33])\n >>> np.reciprocal(x)\n array([1. , 0.5 , 0.3003003])\n\n .. note::\n\n This function is not designed to work with integers.\n For integer arguments with absolute value larger than 1 the result is\n always zero because of the way Python handles integer division. For\n integer zero the result is an overflow.\n The output `ndarray` has the same `device` as the input `ndarray`.\n This function differs from the original `numpy.reciprocal\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in\n the following aspects:\n\n * Only support ndarray and scalar now.\n * `where` argument is not supported.\n\n \"\"\"\n return _mx_nd_np.reciprocal(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef square(x, out=None, **kwargs):\n r\"\"\"\n Return the element-wise square of the input.\n\n Parameters\n ----------\n x : ndarray or scalar\n The values whose squares are required.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape as the input.\n If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Output array is same shape and type as x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.square(2.)\n 4.0\n >>> x = np.array([1, 2., -1])\n >>> np.square(x)\n array([1., 4., 1.])\n\n .. note::\n The output `ndarray` has the same `device` as the input `ndarray`.\n This function differs from the original `numpy.square\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in\n the following aspects:\n\n * Only support ndarray and scalar now.\n * `where` argument is not supported.\n * Complex input is not supported.\n\n \"\"\"\n return _mx_nd_np.square(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef negative(x, out=None, **kwargs):\n r\"\"\"\n Numerical negative, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored.\n If provided, it must have a shape that the inputs broadcast to.\n If not provided or None, a freshly-allocated array is returned.\n A tuple (possible only as a keyword argument) must have length\n equal to the number of outputs.\n\n Returns\n -------\n y : ndarray or scalar\n Returned array or scalar: y = -x. This is a scalar if x is a scalar.\n\n Examples\n --------\n >>> np.negative(1)\n -1\n \"\"\"\n return _mx_nd_np.negative(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef positive(x, out=None, **kwargs):\n r\"\"\"\n Computes the numerical positive of each element `x_i` (i.e.,`y_i = +x_i`)\n of the input array x .\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n\n Returns\n -------\n y : ndarray or scalar\n Returned array or scalar: y = +x. This is a scalar if x is a scalar.\n\n Notes\n -----\n Equivalent to `x.copy()`, but only defined for types that support arithmetic.\n\n Examples\n --------\n >>> x1 = np.array(([1., -1.]))\n >>> np.positive(x1)\n array([ 1., -1.])\n >>> +x1\n array([ 1., -1.])\n \"\"\"\n return _mx_nd_np.positive(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef fix(x, out=None, **kwargs):\n \"\"\"\n Round an array of floats element-wise to nearest integer towards zero.\n The rounded values are returned as floats.\n\n Parameters\n ----------\n x : ndarray\n An array of floats to be rounded\n out : ndarray, optional\n Output array\n\n Returns\n -------\n y : ndarray or scalar\n Returned array or scalar: y = -x. This is a scalar if x is a scalar.ndarray of floats\n\n Examples\n ---------\n >>> np.fix(3.14)\n 3\n \"\"\"\n return _mx_nd_np.fix(x, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef tan(x, out=None, **kwargs):\n r\"\"\"\n Compute tangent element-wise.\n Equivalent to np.sin(x)/np.cos(x) element-wise.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or none, optional\n A location into which the result is stored. If provided,\n it must have a shape that the inputs broadcast to. If not provided or None,\n a freshly-allocated array is returned. A tuple (possible only as a keyword argument)\n must have length equal to the number of outputs.\n\n Returns\n -------\n y : ndarray\n The corresponding tangent values. This is a scalar if x is a scalar.\n\n Examples\n ---------\n >>> np.tan(np.array([-np.pi, np.pi/2, np.pi]))\n array([-8.7422777e-08, -2.2877332e+07, 8.7422777e-08])\n \"\"\"\n\n return _mx_nd_np.tan(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef ceil(x, out=None, **kwargs):\n r\"\"\"\n Return the ceiling of the input, element-wise.\n The ceil of the ndarray `x` is the smallest integer `i`, such that\n `i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The ceiling of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.ceil(a)\n array([-1., -1., -0., 1., 2., 2., 2.])\n >>> # if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.ceil(np.array(3.5), a)\n array(4.)\n >>> a\n array(4.)\n \"\"\"\n return _mx_nd_np.ceil(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef floor(x, out=None, **kwargs):\n r\"\"\"\n Return the floor of the input, element-wise.\n The ceil of the ndarray `x` is the largest integer `i`, such that\n `i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None\n A location into which the result is stored. If provided, it\n must have a shape that the inputs fill into. If not provided\n or None, a freshly-allocated array is returned. The dtype of the\n output and input must be the same.\n\n Returns\n -------\n y : ndarray or scalar\n The floor of each element in `x`, with `float` dtype.\n This is a scalar if `x` is a scalar.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.floor(a)\n array([-2., -2., -1., 0., 1., 1., 2.])\n >>> # if you use parameter out, x and out must be ndarray.\n >>> a = np.array(1)\n >>> np.floor(np.array(3.5), a)\n array(3.)\n >>> a\n array(3.)\n \"\"\"\n return _mx_nd_np.floor(x, out=out, **kwargs)\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef bitwise_invert(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.bitwise_invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _mx_nd_np.bitwise_not(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef invert(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _mx_nd_np.bitwise_not(x, out=out, **kwargs)\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef bitwise_not(x, out=None, **kwargs):\n r\"\"\"\n Compute bit-wise inversion, or bit-wise NOT, element-wise.\n Computes the bit-wise NOT of the underlying binary representation of\n the integers in the input arrays. This ufunc implements the C/Python\n operator ``~``.\n\n Parameters\n ----------\n x : array_like\n Only integer and boolean types are handled.\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n out : ndarray or scalar\n Result.\n This is a scalar if `x` is a scalar.\n\n See Also\n --------\n bitwise_and, bitwise_or, bitwise_xor\n logical_not\n binary_repr :\n Return the binary representation of the input number as a string.\n\n Examples\n --------\n We've seen that 13 is represented by ``00001101``.\n The invert or bit-wise NOT of 13 is then:\n\n >>> x = np.invert(np.array(13, dtype=np.uint8))\n >>> x\n 242\n >>> np.binary_repr(x, width=8)\n '11110010'\n\n Notes\n -----\n `bitwise_not` is an alias for `invert`:\n\n >>> np.bitwise_not is np.invert\n True\n \"\"\"\n return _mx_nd_np.bitwise_not(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef trunc(x, out=None, **kwargs):\n r\"\"\"\n Return the truncated value of the input, element-wise.\n The truncated value of the scalar `x` is the nearest integer `i` which\n is closer to zero than `x` is. In short, the fractional part of the\n signed number `x` is discarded.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : ndarray or scalar\n The truncated value of each element in `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n This function differs from the original numpy.trunc in the following aspects:\n\n * Do not support `where`, a parameter in numpy which indicates where to calculate.\n * Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n * Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n * If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n >>> np.trunc(a)\n array([-1., -1., -0., 0., 1., 1., 2.])\n \"\"\"\n return _mx_nd_np.trunc(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef logical_not(x, out=None, **kwargs):\n r\"\"\"\n Compute the truth value of NOT x element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Logical NOT is applied to the elements of `x`.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n y : bool or ndarray of bool\n Boolean result with the same shape as `x` of the NOT operation\n on elements of `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n This function differs from the original numpy.logical_not in the following aspects:\n * Do not support `where`, a parameter in numpy which indicates where to calculate.\n * Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n * Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n * If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> x= np.array([True, False, 0, 1])\n >>> np.logical_not(x)\n array([False, True, True, False])\n\n >>> x = np.arange(5)\n >>> np.logical_not(x<3)\n array([False, False, False, True, True])\n \"\"\"\n return _mx_nd_np.logical_not(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arcsinh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arcsinh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n `arcsinh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `sinh(z) = x`.\n\n For real-valued input data types, `arcsinh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arcsinh in the following aspects:\n\n * Do not support `where`, a parameter in numpy which indicates where to calculate.\n * Do not support complex-valued input.\n * Cannot cast type automatically. DType of `out` must be same as the expected one.\n * Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n * If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arcsinh(a)\n array([1.8309381, 2.2924316])\n\n >>> np.arcsinh(1)\n 0.0\n \"\"\"\n return _mx_nd_np.arcsinh(x, out=out, **kwargs)\n\nasinh = arcsinh\nasinh.__doc__ = \"\"\"\n Inverse hyperbolic cosine, element-wise.\n \n >>>np.asinh is np.arcsinh\n True\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n asinh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n `asinh` is a alias for `arcsinh`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/elementwise_functions.html#asinh-x\n instead of an official NumPy operator.\n \n `asinh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `sinh(z) = x`.\n\n For real-valued input data types, `asinh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arcsinh in the following aspects:\n\n * Do not support `where`, a parameter in numpy which indicates where to calculate.\n * Do not support complex-valued input.\n * Cannot cast type automatically. DType of `out` must be same as the expected one.\n * Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n * If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.asinh(a)\n array([1.8309381, 2.2924316])\n\n >>> np.asinh(1)\n 0.0\n \"\"\"\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arccosh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic cosine, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arccosh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n `arccosh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `cosh(z) = x`.\n\n For real-valued input data types, `arccosh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arccosh in the following aspects:\n\n * Do not support `where`, a parameter in numpy which indicates where to calculate.\n * Do not support complex-valued input.\n * Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n * Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n * If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.arccosh(a)\n array([1.8309381, 2.2924316])\n\n >>> np.arccosh(1)\n 0.0\n \"\"\"\n return _mx_nd_np.arccosh(x, out=out, **kwargs)\n\nacosh = arccosh\nacosh.__doc__ = \"\"\"\n Inverse hyperbolic cosine, element-wise.\n \n >>>np.acosh is np.arccosh\n True\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n acosh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n `acosh` is a alias for `arccosh`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/elementwise_functions.html#acosh-x\n instead of an official NumPy operator.\n \n `acosh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `cosh(z) = x`.\n\n For real-valued input data types, `acosh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arccosh in the following aspects:\n\n * Do not support `where`, a parameter in numpy which indicates where to calculate.\n * Do not support complex-valued input.\n * Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n * Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n * If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([3.2, 5.0])\n >>> np.acosh(a)\n array([1.8309381, 2.2924316])\n\n >>> np.acosh(1)\n 0.0\n \"\"\"\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef arctanh(x, out=None, **kwargs):\n r\"\"\"\n Inverse hyperbolic tangent, element-wise.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n arctanh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n `arctanh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `tanh(z) = x`.\n\n For real-valued input data types, `arctanh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arctanh in the following aspects:\n\n * Do not support `where`, a parameter in numpy which indicates where to calculate.\n * Do not support complex-valued input.\n * Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n * Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n * If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([0.0, -0.5])\n >>> np.arctanh(a)\n array([0., -0.54930615])\n\n >>> np.arctanh(1)\n 0.0\n \"\"\"\n return _mx_nd_np.arctanh(x, out=out, **kwargs)\n\natanh = arctanh\natanh.__doc__ = \"\"\"\n Inverse hyperbolic tangent, element-wise.\n\n >>>np.atanh is np.arctanh\n True\n\n Parameters\n ----------\n x : ndarray or scalar\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n\n Returns\n -------\n atanh : ndarray\n Array of the same shape as `x`.\n This is a scalar if `x` is a scalar.\n\n .. note::\n `atanh` is a alias for `arctanh`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/elementwise_functions.html#atanh-x\n instead of an official NumPy operator.\n \n `atanh` is a multivalued function: for each `x` there are infinitely\n many numbers `z` such that `tanh(z) = x`.\n\n For real-valued input data types, `atanh` always returns real output.\n For each value that cannot be expressed as a real number or infinity, it\n yields ``nan`` and sets the `invalid` floating point error flag.\n\n This function differs from the original numpy.arctanh in the following aspects:\n\n * Do not support `where`, a parameter in numpy which indicates where to calculate.\n * Do not support complex-valued input.\n * Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n * Cannot broadcast automatically. Shape of `out` must be same as the expected one.\n * If `x` is plain python numeric, the result won't be stored in out.\n\n Examples\n --------\n >>> a = np.array([0.0, -0.5])\n >>> np.atanh(a)\n array([0., -0.54930615])\n\n >>> np.atanh(1)\n 0.0\n \"\"\"\n\n\n@set_module('mxnet.numpy')\n@wrap_sort_functions\ndef argsort(a, axis=-1, descending=False, stable=True):\n \"\"\"\n Returns the indices that sort an array `x` along a specified axis.\n\n Notes\n -----\n `argsort` is a standard API in\n https://data-apis.org/array-api/latest/API_specification/sorting_functions.html#argsort-x-axis-1-descending-false-stable-true\n instead of an official NumPy operator.\n\n Parameters\n ----------\n a : ndarray\n Array to sort.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n descending : bool, optional\n sort order. If `True`, the returned indices sort x in descending order (by value).\n If `False`, the returned indices sort x in ascending order (by value).Default: False.\n stable : bool, optional\n sort stability. If `True`, the returned indices must maintain the relative order\n of x values which compare as equal. If `False`, the returned indices may or may not\n maintain the relative order of x values which compare as equal. Default: True.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sort `a` along the specified `axis`.\n If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.\n More generally, ``np.take_along_axis(a, index_array, axis=axis)``\n always yields the sorted `a`, irrespective of dimensionality.\n\n Notes\n -----\n This operator does not support different sorting algorithms.\n\n Examples\n --------\n One dimensional array:\n\n >>> x = np.array([3, 1, 2])\n >>> np.argsort(x)\n array([1, 2, 0])\n\n Two-dimensional array:\n\n >>> x = np.array([[0, 3], [2, 2]])\n >>> x\n array([[0, 3],\n [2, 2]])\n >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)\n >>> ind\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)\n array([[0, 2],\n [2, 3]])\n >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)\n >>> ind\n array([[0, 1],\n [0, 1]])\n >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)\n array([[0, 3],\n [2, 2]])\n\n Indices of the sorted elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)\n >>> ind\n (array([0, 1, 1, 0]), array([0, 0, 1, 1]))\n >>> x[ind] # same as np.sort(x, axis=None)\n array([0, 2, 2, 3])\n \"\"\"\n if stable:\n warnings.warn(\"Currently, MXNet only support quicksort in backend, which is not stable\")\n return _mx_nd_np.argsort(a, axis=axis, descending=descending)\n\n\n@set_module('mxnet.numpy')\n@wrap_sort_functions\ndef sort(a, axis=-1, descending=False, stable=True):\n \"\"\"\n Return a sorted copy of an array.\n\n Notes\n -----\n `sort` is a standard API in\n https://data-apis.org/array-api/latest/API_specification/sorting_functions.html#sort-x-axis-1-descending-false-stable-true\n instead of an official NumPy operator.\n\n Parameters\n ----------\n a : ndarray\n Array to sort.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n descending : bool, optional\n sort order. If `True`, the returned indices sort x in descending order (by value).\n If `False`, the returned indices sort x in ascending order (by value).Default: False.\n stable : bool, optional\n sort stability. If `True`, the returned indices must maintain the relative order\n of x values which compare as equal. If `False`, the returned indices may or may not\n maintain the relative order of x values which compare as equal. Default: True.\n\n Returns\n -------\n sorted_array : ndarray\n Array of the same type and shape as `a`.\n\n Notes\n -----\n This operator does not support different sorting algorithms.\n\n Examples\n --------\n >>> a = np.array([[1,4],[3,1]])\n >>> np.sort(a) # sort along the last axis\n array([[1, 4],\n [1, 3]])\n >>> np.sort(a, axis=None) # sort the flattened array\n array([1, 1, 3, 4])\n >>> np.sort(a, axis=0) # sort along the first axis\n array([[1, 1],\n [3, 4]])\n \"\"\"\n return _mx_nd_np.sort(a, axis=axis, descending=descending)\n\n\n@set_module('mxnet.numpy')\ndef tensordot(a, b, axes=2):\n r\"\"\"Compute tensor dot product along specified axes for arrays >= 1-D.\n Given two tensors (arrays of dimension greater than or equal to one),\n ``a`` and ``b``, and an ndarray object containing two ndarray\n objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s\n elements (components) over the axes specified by ``a_axes`` and\n ``b_axes``. The third argument can be a single non-negative\n integer_like scalar, ``N``; if it is such, then the last ``N``\n dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed\n over.\n\n Parameters\n ----------\n a, b : ndarray, len(shape) >= 1\n Tensors to \"dot\".\n axes : int or (2,) ndarray\n\n * integer_like\n If an int N, sum over the last N axes of `a` and the first N axes\n of `b` in order. The sizes of the corresponding axes must match.\n * (2,) ndarray\n Or, a list of axes to be summed over, first sequence applying to `a`,\n second to `b`. Both elements ndarray must be of the same length.\n\n See Also\n --------\n dot, einsum\n\n .. note::\n\n Three common use cases are:\n\n * ``axes = 0`` : tensor product :math:`a\\otimes b`\n * ``axes = 1`` : tensor dot product :math:`a\\cdot b`\n * ``axes = 2`` : (default) tensor double contraction :math:`a:b`\n When `axes` is integer_like, the sequence for evaluation will be: first\n the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and\n Nth axis in `b` last.\n When there is more than one axis to sum over - and they are not the last\n (first) axes of `a` (`b`) - the argument `axes` should consist of\n two sequences of the same length, with the first axis to sum over given\n first in both sequences, the second axis second, and so forth.\n\n Examples\n --------\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))\n >>> c.shape\n (5, 2)\n >>> c\n array([[ 4400., 4730.],\n [ 4532., 4874.],\n [ 4664., 5018.],\n [ 4796., 5162.],\n [ 4928., 5306.]])\n \"\"\"\n return _mx_nd_np.tensordot(a, b, axes)\n\n\n@set_module('mxnet.numpy')\ndef histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the histogram of a set of data.\n\n Parameters\n ----------\n a : ndarray\n Input data. The histogram is computed over the flattened array.\n bins : int or ndarray\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n .. versionadded:: 1.11.0\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n range : (float, float)\n The lower and upper range of the bins. Required when `bins` is an integer.\n Values outside the range are ignored. The first element of the range must\n be less than or equal to the second.\n normed : bool, optional\n Not supported yet, coming soon.\n weights : array_like, optional\n Not supported yet, coming soon.\n density : bool, optional\n Not supported yet, coming soon.\n\n Examples\n --------\n >>> np.histogram(np.arange(4), bins=np.arange(5))\n [array([1, 1, 1, 1], dtype=int64), array([0., 1., 2., 3., 4.])]\n \"\"\"\n return _mx_nd_np.histogram(a, bins=bins, range=range, normed=normed, weights=weights, density=density)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef eye(N, M=None, k=0, dtype=None, device=None, **kwargs):\n \"\"\"\n Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to N.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal,\n and a negative value to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n I : ndarray of shape (N,M)\n An array where all elements are equal to zero,\n except for the k-th diagonal, whose values are equal to one.\n\n Examples\n --------\n >>> np.eye(2, dtype=int)\n array([[1, 0],\n [0, 1]], dtype=int64)\n >>> np.eye(3, k=1)\n array([[0., 1., 0.],\n [0., 0., 1.],\n [0., 0., 0.]])\n \"\"\"\n return _mx_nd_np.eye(N, M, k, dtype, device=device, **kwargs)\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, device=None): # pylint: disable=too-many-arguments\n r\"\"\"\n Return evenly spaced numbers over a specified interval.\n\n Returns num evenly spaced samples, calculated over the interval [start, stop].\n The endpoint of the interval can optionally be excluded.\n\n Parameters\n ----------\n start : int or float\n The starting value of the sequence.\n stop : int or float\n The end value of the sequence, unless endpoint is set to False. In\n that case, the sequence consists of all but the last of num + 1\n evenly spaced samples, so that stop is excluded. Note that the step\n size changes when endpoint is False.\n num : int, optional\n Number of samples to generate. Default is 50. Must be non-negative.\n endpoint : bool, optional\n If True, stop is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (samples, step), where step is the spacing between samples.\n dtype : dtype, optional\n The type of the output array. If dtype is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start or\n stop are array-like. By default (0), the samples will be along a new\n axis inserted at the beginning. Use -1 to get an axis at the end.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n samples : ndarray\n There are num equally spaced samples in the closed interval\n `[start, stop]` or the half-open interval `[start, stop)`\n (depending on whether endpoint is True or False).\n step : float, optional\n Only returned if retstep is True\n Size of spacing between samples.\n\n\n See Also\n --------\n arange : Similar to `linspace`, but uses a step size (instead of the\n number of samples).\n\n Examples\n --------\n >>> np.linspace(2.0, 3.0, num=5)\n array([2. , 2.25, 2.5 , 2.75, 3. ])\n >>> np.linspace(2.0, 3.0, num=5, endpoint=False)\n array([2. , 2.2, 2.4, 2.6, 2.8])\n >>> np.linspace(2.0, 3.0, num=5, retstep=True)\n (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 8\n >>> y = np.zeros(N)\n >>> x1 = np.linspace(0, 10, N, endpoint=True)\n >>> x2 = np.linspace(0, 10, N, endpoint=False)\n >>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n .. note::\n\n This function differs from the original `numpy.linspace\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in\n the following aspects:\n\n * `start` and `stop` do not support list, numpy ndarray and mxnet ndarray\n * axis could only be 0\n * There could be an additional `device` argument to specify the device, e.g. the i-th\n GPU.\n \"\"\"\n return _mx_nd_np.linspace(start, stop, num, endpoint, retstep, dtype, axis, device)\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=too-many-arguments, redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, device=None):\n r\"\"\"Return numbers spaced evenly on a log scale.\n\n In linear space, the sequence starts at ``base ** start``\n (`base` to the power of `start`) and ends with ``base ** stop``\n (see `endpoint` below).\n\n Non-scalar `start` and `stop` are now supported.\n\n Parameters\n ----------\n start : int or float\n ``base ** start`` is the starting value of the sequence.\n stop : int or float\n ``base ** stop`` is the final value of the sequence, unless `endpoint`\n is False. In that case, ``num + 1`` values are spaced over the\n interval in log-space, of which all but the last (a sequence of\n length `num`) are returned.\n num : integer, optional\n Number of samples to generate. Default is 50.\n endpoint : boolean, optional\n If true, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n base : float, optional\n The base of the log space. The step size between the elements in\n ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.\n Default is 10.0.\n dtype : dtype\n The type of the output array. If `dtype` is not given, infer the data\n type from the other input arguments.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start\n or stop are array-like. By default (0), the samples will be along a\n new axis inserted at the beginning. Now, axis only support axis = 0.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n samples : ndarray\n `num` samples, equally spaced on a log scale.\n\n See Also\n --------\n arange : Similar to linspace, with the step size specified instead of the\n number of samples. Note that, when used with a float endpoint, the\n endpoint may or may not be included.\n linspace : Similar to logspace, but with the samples uniformly distributed\n in linear space, instead of log space.\n\n Notes\n -----\n Logspace is equivalent to the code\n\n >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)\n ...\n >>> power(base, y).astype(dtype)\n ...\n\n Examples\n --------\n >>> np.logspace(2.0, 3.0, num=4)\n array([ 100. , 215.44347, 464.15887, 1000. ])\n >>> np.logspace(2.0, 3.0, num=4, endpoint=False)\n array([100. , 177.82794, 316.22775, 562.3413 ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0)\n array([4. , 5.0396843, 6.349604 , 8. ])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)\n array([4, 5, 6, 8], dtype=int32)\n >>> np.logspace(2.0, 3.0, num=4, device=npx.gpu(0))\n array([ 100. , 215.44347, 464.15887, 1000. ], device=gpu(0))\n \"\"\"\n return _mx_nd_np.logspace(start, stop, num, endpoint, base, dtype, axis, device=device)\n# pylint: enable=too-many-arguments, redefined-outer-name\n\n\n@set_module('mxnet.numpy')\ndef expand_dims(a, axis):\n \"\"\"Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded array shape.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis : int\n Position in the expanded axes where the new axis is placed.\n\n Returns\n -------\n res : ndarray\n Output array. The number of dimensions is one greater than that of\n the input array.\n\n See Also\n --------\n squeeze : The inverse operation, removing singleton dimensions\n reshape : Insert, remove, and combine dimensions, and resize existing ones\n\n Examples\n --------\n >>> x = np.array([1,2])\n >>> x.shape\n (2,)\n\n >>> y = np.expand_dims(x, axis=0)\n >>> y\n array([[1., 2.]])\n\n >>> y.shape\n (1, 2)\n\n >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]\n >>> y\n array([[1.],\n [2.]])\n\n >>> y.shape\n (2, 1)\n\n Note that some examples may use None instead of np.newaxis. These are the same objects:\n\n >>> np.newaxis is None\n True\n \"\"\"\n return _mx_nd_np.expand_dims(a, axis)\n\n\n@set_module('mxnet.numpy')\ndef tile(A, reps):\n r\"\"\"\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Parameters\n ----------\n A : ndarray or scalar\n An input array or a scalar to repeat.\n reps : a single integer or tuple of integers\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n Examples\n --------\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0., 1., 2., 0., 1., 2.])\n >>> np.tile(a, (2, 2))\n array([[0., 1., 2., 0., 1., 2.],\n [0., 1., 2., 0., 1., 2.]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0., 1., 2., 0., 1., 2.]],\n [[0., 1., 2., 0., 1., 2.]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1., 2., 1., 2.],\n [3., 4., 3., 4.]])\n >>> np.tile(b, (2, 1))\n array([[1., 2.],\n [3., 4.],\n [1., 2.],\n [3., 4.]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.],\n [1., 2., 3., 4.]])\n\n Scalar as input:\n\n >>> np.tile(2, 3)\n array([2, 2, 2]) # repeating integer `2`\n\n \"\"\"\n return _mx_nd_np.tile(A, reps)\n\n\n@set_module('mxnet.numpy')\ndef trace(a, offset=0, axis1=0, axis2=1, out=None):\n \"\"\"\n Return the sum along diagonals of the array.\n If `a` is 2-D, the sum along its diagonal with the given offset\n is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.\n If `a` has more than two dimensions, then the axes specified by axis1 and\n axis2 are used to determine the 2-D sub-arrays whose traces are returned.\n The shape of the resulting array is the same as that of `a` with `axis1`\n and `axis2` removed.\n\n Parameters\n ----------\n a : ndarray\n Input array, from which the diagonals are taken.\n offset : int, optional\n Offset of the diagonal from the main diagonal. Can be both positive\n and negative. Defaults to 0.\n axis1, axis2 : int, optional\n Axes to be used as the first and second axis of the 2-D sub-arrays\n from which the diagonals should be taken. Defaults are the first two\n axes of `a`.\n out : ndarray, optional\n Array into which the output is placed. It must be of the right shape\n and right type to hold the output.\n\n Returns\n -------\n sum_along_diagonals : ndarray\n If `a` is 2-D, the sum along the diagonal is returned. If `a` has\n larger dimensions, then an array of sums along diagonals is returned.\n\n Examples\n --------\n >>> a = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n >>> np.trace(a)\n array(3.)\n >>> a = np.arange(8).reshape((2, 2, 2))\n >>> np.trace(a)\n array([6., 8.])\n >>> a = np.arange(24).reshape((2, 2, 2, 3))\n >>> np.trace(a).shape\n (2, 3)\n \"\"\"\n return _mx_nd_np.trace(a, offset, axis1, axis2, out)\n\n\n@set_module('mxnet.numpy')\ndef transpose(a, axes=None):\n \"\"\"\n Permute the dimensions of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axes : list of ints, optional\n By default, reverse the dimensions,\n otherwise permute the axes according to the values given.\n\n Returns\n -------\n p : ndarray\n a with its axes permuted.\n\n .. note::\n\n This function differs from the original `numpy.transpose\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html>`_ in\n the following way(s):\n\n * only ndarray is accepted as valid input, python iterables are not supported\n * the operator always returns an `ndarray` that does not share the memory with the input\n\n Examples\n --------\n >>> x = np.arange(4).reshape((2,2))\n >>> x\n array([[0., 1.],\n [2., 3.]])\n >>> np.transpose(x)\n array([[0., 2.],\n [1., 3.]])\n >>> x = np.ones((1, 2, 3))\n >>> np.transpose(x, (1, 0, 2)).shape\n (2, 1, 3)\n \"\"\"\n return _mx_nd_np.transpose(a, axes)\n\n\n@set_module('mxnet.numpy')\ndef permute_dims(a, axes=None):\n \"\"\"\n Permute the dimensions of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axes : list of ints, optional\n By default, reverse the dimensions,\n otherwise permute the axes according to the values given.\n\n Returns\n -------\n p : ndarray\n a with its axes permuted.\n\n Note\n --------\n `permute_dims` is a alias for `transpose`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/manipulation_functions.html#permute-dims-x-axes\n instead of an official NumPy operator.\n\n Examples\n --------\n >>> x = np.arange(4).reshape((2,2))\n >>> x\n array([[0., 1.],\n [2., 3.]])\n >>> np.permute_dims(x)\n array([[0., 2.],\n [1., 3.]])\n >>> x = np.ones((1, 2, 3))\n >>> np.permute_dims(x, (1, 0, 2)).shape\n (2, 1, 3)\n \"\"\"\n return _mx_nd_np.transpose(a, axes)\n\n\n@set_module('mxnet.numpy')\ndef repeat(a, repeats, axis=None):\n \"\"\"\n Repeat elements of an array.\n\n Parameters\n ----------\n a : array_like\n Input array.\n repeats : int\n The number of repetitions for each element.\n axis : int, optional\n The axis along which to repeat values. By default, use the\n flattened input array, and return a flat output array.\n\n Returns\n -------\n repeated_array : ndarray\n Output array which has the same shape as `a`, except along\n the given axis.\n\n See Also\n --------\n tile : Tile an array.\n\n Examples\n --------\n >>> np.repeat(3, 4)\n array([3, 3, 3, 3])\n >>> x = np.array([[1,2],[3,4]])\n >>> np.repeat(x, 2)\n array([1, 1, 2, 2, 3, 3, 4, 4])\n >>> np.repeat(x, 3, axis=1)\n array([[1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4]])\n >>> np.repeat(x, [1, 2], axis=0)\n array([[1, 2],\n [3, 4],\n [3, 4]])\n \"\"\"\n return _mx_nd_np.repeat(a, repeats, axis)\n\n\n@set_module('mxnet.numpy')\ndef tril(m, k=0):\n r\"\"\"\n Lower triangle of an array.\n\n Return a copy of an array with elements above the `k`-th diagonal zeroed.\n\n Parameters\n ----------\n m : ndarray, shape (M, N)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n tril : ndarray, shape (M, N)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : same thing, only for the upper triangle\n\n Examples\n --------\n >>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])\n >>> np.tril(a, -1)\n array([[ 0., 0., 0.],\n [ 4., 0., 0.],\n [ 7., 8., 0.],\n [10., 11., 12.]])\n \"\"\"\n return _mx_nd_np.tril(m, k)\n\n\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef tri(N, M=None, k=0, dtype=None, device=None): # pylint: disable=redefined-outer-name\n r\"\"\"\n An array with ones at and below the given diagonal and zeros elsewhere.\n Parameters\n ----------\n N : int\n Number of rows in the array.\n M : int, optional\n Number of columns in the array.\n By default, `M` is taken equal to `N`.\n k : int, optional\n The sub-diagonal at and below which the array is filled.\n `k` = 0 is the main diagonal, while `k` < 0 is below it,\n and `k` > 0 is above. The default is 0.\n dtype : dtype, optional\n Data type of the returned array. The default is float.\n Returns\n -------\n tri : ndarray of shape (N, M)\n Array with its lower triangle filled with ones and zero elsewhere;\n in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.\n Examples\n --------\n >>> np.tri(3, 5, 2, dtype=int)\n array([[1, 1, 1, 0, 0],\n [1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1]])\n >>> np.tri(3, 5, -1)\n array([[0., 0., 0., 0., 0.],\n [1., 0., 0., 0., 0.],\n [1., 1., 0., 0., 0.]])\n \"\"\"\n return _mx_nd_np.tri(N, M, k, dtype, device)\n\n\n@set_module('mxnet.numpy')\ndef triu_indices(n, k=0, m=None, device=None): # pylint: disable=redefined-outer-name\n r\"\"\"\n Return the indices for the upper-triangle of an (n, m) array.\n Parameters\n ----------\n n : int\n The size of the arrays for which the returned indices will\n be valid.\n k : int, optional\n Diagonal offset (see `triu` for details).\n m : int, optional\n .. versionadded:: 1.9.0\n The column dimension of the arrays for which the returned\n arrays will be valid.\n By default `m` is taken equal to `n`.\n Returns\n -------\n inds : tuple, shape(2) of ndarrays, shape(`n`)\n The indices for the triangle. The returned tuple contains two arrays,\n each with the indices along one dimension of the array. Can be used\n to slice a ndarray of shape(`n`, `n`).\n See also\n --------\n tril_indices : similar function, for lower-triangular.\n mask_indices : generic function accepting an arbitrary mask function.\n triu, tril\n Examples\n --------\n Compute two different sets of indices to access 4x4 arrays, one for the\n upper triangular part starting at the main diagonal, and one starting two\n diagonals further right:\n >>> iu1 = np.triu_indices(4)\n >>> iu2 = np.triu_indices(4, 2)\n Here is how they can be used with a sample array:\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n Both for indexing:\n >>> a[iu1]\n array([ 0, 1, 2, ..., 10, 11, 15])\n And for assigning values:\n >>> a[iu1] = -1\n >>> a\n array([[-1, -1, -1, -1],\n [ 4, -1, -1, -1],\n [ 8, 9, -1, -1],\n [12, 13, 14, -1]])\n These cover only a small part of the whole array (two diagonals right\n of the main one):\n >>> a[iu2] = -10\n >>> a\n array([[ -1, -1, -10, -10],\n [ 4, -1, -1, -10],\n [ 8, 9, -1, -1],\n [ 12, 13, 14, -1]])\n \"\"\"\n return _mx_nd_np.triu_indices(n, k, m, device)\n\n\n@set_module('mxnet.numpy')\ndef triu_indices_from(arr, k=0):\n \"\"\"\n Return the indices for the upper-triangle of arr.\n See `triu_indices` for full details.\n Parameters\n ----------\n arr : ndarray, shape(N, N)\n The indices will be valid for square arrays.\n k : int, optional\n Diagonal offset (see `triu` for details).\n Returns\n -------\n triu_indices_from : tuple, shape(2) of ndarray, shape(N)\n Indices for the upper-triangle of `arr`.\n See Also\n --------\n triu_indices, triu\n \"\"\"\n return _mx_nd_np.triu_indices_from(arr, k)\n\n\n@set_module('mxnet.numpy')\ndef tril_indices(n, k=0, m=None):\n \"\"\"\n Return the indices for the lower-triangle of an (n, m) array.\n\n Parameters\n ----------\n n : int\n The row dimension of the arrays for which the returned\n indices will be valid.\n k : int, optional\n Diagonal offset (see `tril` for details).\n m : int, optional\n .. versionadded:: 1.9.0\n\n The column dimension of the arrays for which the returned\n arrays will be valid.\n By default `m` is taken equal to `n`.\n\n Returns\n -------\n inds : tuple of arrays\n The indices for the triangle. The returned tuple contains two arrays,\n each with the indices along one dimension of the array.\n\n See also\n --------\n triu_indices : similar function, for upper-triangular.\n mask_indices : generic function accepting an arbitrary mask function.\n tril, triu\n\n Examples\n --------\n Compute two different sets of indices to access 4x4 arrays, one for the\n lower triangular part starting at the main diagonal, and one starting two\n diagonals further right:\n\n >>> il1 = np.tril_indices(4)\n >>> il2 = np.tril_indices(4, 2)\n\n Here is how they can be used with a sample array:\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\n Both for indexing:\n\n >>> a[il1]\n array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])\n\n And for assigning values:\n\n >>> a[il1] = -1\n >>> a\n array([[-1, 1, 2, 3],\n [-1, -1, 6, 7],\n [-1, -1, -1, 11],\n [-1, -1, -1, -1]])\n\n These cover almost the whole array (two diagonals right of the main one):\n\n >>> a[il2] = -10\n >>> a\n array([[-10, -10, -10, 3],\n [-10, -10, -10, -10],\n [-10, -10, -10, -10],\n [-10, -10, -10, -10]])\n\n \"\"\"\n if m is None:\n m = n\n return _mx_nd_np.tril_indices(n, k, m)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef triu(m, k=0):\n r\"\"\"\n Upper triangle of an array.\n\n Return a copy of a matrix with the elements below the `k`-th diagonal\n zeroed.\n\n Please refer to the documentation for `tril` for further details.\n\n See Also\n --------\n tril : lower triangle of an array\n\n Examples\n --------\n >>> np.triu(np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]), -1)\n array([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 0, 8, 9],\n [ 0, 0, 12]])\n \"\"\"\n return _mx_nd_np.triu(m, k)\n\n\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef arange(start, stop=None, step=1, dtype=None, device=None):\n \"\"\"Return evenly spaced values within a given interval.\n\n Values are generated within the half-open interval ``[start, stop)``\n (in other words, the interval including `start` but excluding `stop`).\n For integer arguments the function is equivalent to the Python built-in\n `range` function, but returns an ndarray rather than a list.\n\n Parameters\n ----------\n start : number, optional\n Start of interval. The interval includes this value. The default\n start value is 0.\n stop : number\n End of interval. The interval does not include this value, except\n in some cases where `step` is not an integer and floating point\n round-off affects the length of `out`.\n step : number, optional\n Spacing between values. For any output `out`, this is the distance\n between two adjacent values, ``out[i+1] - out[i]``. The default\n step size is 1. If `step` is specified as a position argument,\n `start` must also be given.\n dtype : dtype\n The type of the output array.\n Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.\n * When npx.is_np_default_dtype() returns False, default dtype is float32;\n * When npx.is_np_default_dtype() returns True, default dtype is int64.\n device : device context, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n arange : ndarray\n Array of evenly spaced values.\n\n For floating point arguments, the length of the result is\n ``ceil((stop - start)/step)``. Because of floating point overflow,\n this rule may result in the last element of `out` being greater\n than `stop`.\n\n Examples\n --------\n >>> np.arange(3)\n array([0., 1., 2.])\n\n >>> np.arange(3.0)\n array([0., 1., 2.])\n\n >>> np.arange(3,7)\n array([3., 4., 5., 6.])\n\n >>> np.arange(3,7,2)\n array([3., 5.])\n\n >>> np.arange(3).dtype\n dtype('float32')\n >>> npx.set_np(dtype=True)\n >>> np.arange(3).dtype\n dtype('int64')\n \"\"\"\n return _mx_nd_np.arange(start, stop, step, dtype, device)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.numpy')\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Split an array into multiple sub-arrays.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n\n * ary[:2]\n * ary[2:3]\n * ary[3:]\n\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n\n See Also\n --------\n hsplit : Split array into multiple sub-arrays horizontally (column-wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> np.split(x, [3, 5, 6, 8])\n [array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]\n \"\"\"\n return _mx_nd_np.split(ary, indices_or_sections, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Split an array into multiple sub-arrays.\n\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an array of length l that should be split into n sections, it returns\n l % n sub-arrays of size l//n + 1 and the rest of size l//n.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example, ``[2, 3]``\n would, for ``axis=0``, result in\n * ary[:2]\n * ary[2:3]\n * ary[3:]\n\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D Python tuple, list or set.\n Param used to determine the number and size of the subarray.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n Examples\n --------\n >>> x = np.arange(9.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> np.array_split(x, [3, 5, 6, 8])\n [array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]\n\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]\n\n >>> x = np.arange(7.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]\n \"\"\"\n return _mx_nd_np.array_split(ary, indices_or_sections, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef vsplit(ary, indices_or_sections):\n r\"\"\"Split an array into multiple sub-arrays vertically (row-wise).\n\n ``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split\n along the first axis regardless of the array dimension.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 0. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 0 the array is split. For example, ``[2, 3]`` would result in\n\n * ary[:2]\n * ary[2:3]\n * ary[3:]\n\n If an index exceeds the dimension of the array along axis 0, an error will be thrown.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n .. note::\n This function differs from the original `numpy.vsplit\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.vsplit.html>`_ in\n the following aspects:\n\n * Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,\n tuple and list.\n * In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,\n an error will be thrown.\n\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [ 12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n\n >>> # With a higher dimensional array the split is still along the first axis.\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]), array([[[4., 5.],\n [6., 7.]]])]\n\n \"\"\"\n return _mx_nd_np.vsplit(ary, indices_or_sections)\n\n\n@set_module('mxnet.numpy')\ndef dsplit(ary, indices_or_sections):\n r\"\"\"\n Split array into multiple sub-arrays along the 3rd axis (depth).\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1 - D Python tuple, list or set.\n If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays\n along axis 2. If such a split is not possible, an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where\n along axis 2 the array is split. For example, ``[2, 3]`` would result in\n\n * ary[:, :, :2]\n * ary[:, :, 2:3]\n * ary[:, :, 3:]\n\n If an index exceeds the dimension of the array along axis 2, an error will be thrown.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n .. note::\n This function differs from the original `numpy.dsplit\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.dsplit.html>`_ in\n the following aspects:\n * Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,\n tuple and list.\n * In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 2,\n an error will be thrown.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [12., 13.]]]), array([[[ 2., 3.],\n [ 6., 7.]],\n [[10., 11.],\n [14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[11.],\n [15.]]]),\n array([], shape=(2, 2, 0), dtype=float64)]\n\n \"\"\"\n return _mx_nd_np.dsplit(ary, indices_or_sections)\n\n@set_module('mxnet.numpy')\ndef concat(seq, axis=0, out=None):\n \"\"\"Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n Note\n --------\n `concate` is a alias for `concatante`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/manipulation_functions.html#concat-arrays-axis-0\n instead of an official NumPy operator.\n\n See Also\n --------\n split : Split array into a list of multiple sub-arrays of equal size.\n hsplit : Split array into multiple sub-arrays horizontally (column wise)\n vsplit : Split array into multiple sub-arrays vertically (row wise)\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n stack : Stack a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise)\n vstack : Stack arrays in sequence vertically (row wise)\n dstack : Stack arrays in sequence depth wise (along third dimension)\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concat((a, b), axis=0)\n array([[1., 2.],\n [3., 4.],\n [5., 6.]])\n\n >>> np.concat((a, b.T), axis=1)\n array([[1., 2., 5.],\n [3., 4., 6.]])\n\n >>> np.concat((a, b), axis=None)\n array([1., 2., 3., 4., 5., 6.])\n \"\"\"\n return _mx_nd_np.concatenate(seq, axis=axis, out=out)\n\n@set_module('mxnet.numpy')\ndef concatenate(seq, axis=0, out=None):\n \"\"\"Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n See Also\n --------\n split : Split array into a list of multiple sub-arrays of equal size.\n hsplit : Split array into multiple sub-arrays horizontally (column wise)\n vsplit : Split array into multiple sub-arrays vertically (row wise)\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n stack : Stack a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise)\n vstack : Stack arrays in sequence vertically (row wise)\n dstack : Stack arrays in sequence depth wise (along third dimension)\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concatenate((a, b), axis=0)\n array([[1., 2.],\n [3., 4.],\n [5., 6.]])\n\n >>> np.concatenate((a, b.T), axis=1)\n array([[1., 2., 5.],\n [3., 4., 6.]])\n\n >>> np.concatenate((a, b), axis=None)\n array([1., 2., 3., 4., 5., 6.])\n \"\"\"\n return _mx_nd_np.concatenate(seq, axis=axis, out=out)\n\n\n@set_module('mxnet.numpy')\ndef append(arr, values, axis=None): # pylint: disable=redefined-outer-name\n \"\"\"\n Append values to the end of an array.\n\n Parameters\n ----------\n arr : ndarray\n Values are appended to a copy of this array.\n values : ndarray\n These values are appended to a copy of `arr`. It must be of the\n correct shape (the same shape as `arr`, excluding `axis`). If\n `axis` is not specified, `values` can be any shape and will be\n flattened before use.\n axis : int, optional\n The axis along which `values` are appended. If `axis` is not\n given, both `arr` and `values` are flattened before use.\n\n Returns\n -------\n append : ndarray\n A copy of `arr` with `values` appended to `axis`. Note that\n `append` does not occur in-place: a new array is allocated and\n filled. If `axis` is None, `out` is a flattened array.\n\n Examples\n --------\n >>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))\n array([1., 2., 3., 4., 5., 6., 7., 8., 9.])\n\n When `axis` is specified, `values` must have the correct shape.\n\n >>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)\n array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n \"\"\"\n return _mx_nd_np.append(arr, values, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef stack(arrays, axis=0, out=None):\n \"\"\"Join a sequence of arrays along a new axis.\n The axis parameter specifies the index of the new axis in the dimensions of the result.\n For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.\n\n Parameters\n ----------\n arrays : sequence of array_like\n Each array must have the same shape.\n axis : int, optional\n The axis in the result array along which the input arrays are stacked.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be correct,\n matching that of what stack would have returned if no out argument were specified.\n\n Returns\n -------\n stacked : ndarray\n The stacked array has one more dimension than the input arrays.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n split : Split array into a list of multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> arrays = [np.random.rand(3, 4) for _ in range(10)]\n >>> np.stack(arrays, axis=0).shape\n (10, 3, 4)\n\n >>> np.stack(arrays, axis=1).shape\n (3, 10, 4)\n\n >>> np.stack(arrays, axis=2).shape\n (3, 4, 10)\n\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.stack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n\n >>> np.stack((a, b), axis=-1)\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _mx_nd_np.stack(arrays, axis=axis, out=out)\n\n\n@set_module('mxnet.numpy')\ndef vstack(arrays, out=None):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n return _mx_nd_np.vstack(arrays)\n\n\n@set_module('mxnet.numpy')\ndef row_stack(arrays):\n r\"\"\"Stack arrays in sequence vertically (row wise).\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate` and `stack`\n provide more general stacking and concatenation operations.\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([2, 3, 4])\n >>> np.vstack((a, b))\n array([[1., 2., 3.],\n [2., 3., 4.]])\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[2], [3], [4]])\n >>> np.vstack((a, b))\n array([[1.],\n [2.],\n [3.],\n [2.],\n [3.],\n [4.]])\n \"\"\"\n return _mx_nd_np.row_stack(arrays)\n\n\n@set_module('mxnet.numpy')\ndef column_stack(tup):\n \"\"\"\n Stack 1-D arrays as columns into a 2-D array.\n\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Parameters\n ----------\n tup : sequence of 1-D or 2-D arrays.\n Arrays to stack. All of them must have the same first dimension.\n\n Returns\n --------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack, hstack, vstack, concatenate\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _mx_nd_np.column_stack(tup)\n\n\n@set_module('mxnet.numpy')\ndef hstack(arrays):\n \"\"\"\n Stack arrays in sequence horizontally (column wise).\n This is equivalent to concatenation along the second axis,\n except for 1-D arrays where it concatenates along the first axis.\n Rebuilds arrays divided by hsplit.\n This function makes most sense for arrays with up to 3 dimensions.\n For instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions concatenate,\n stack and block provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n Examples\n --------\n >>> from mxnet import np,npx\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.hstack((a,b))\n array([1., 2., 3., 2., 3., 4.])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.hstack((a,b))\n array([[1., 2.],\n [2., 3.],\n [3., 4.]])\n \"\"\"\n return _mx_nd_np.hstack(arrays)\n\n\n@set_module('mxnet.numpy')\ndef dstack(arrays):\n \"\"\"\n Stack arrays in sequence depth wise (along third axis).\n\n This is equivalent to concatenation along the third axis after 2-D arrays\n of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape\n `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by\n `dsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of arrays\n The arrays must have the same shape along all but the third axis.\n 1-D or 2-D arrays must have the same shape.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 3-D.\n\n Examples\n --------\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n \"\"\"\n return _npi.dstack(*arrays)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef maximum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise maximum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))\n array([2., 5., 4.])\n\n >>> np.maximum(np.eye(2), np.array([0.5, 2])) # broadcasting\n array([[1. , 2. ],\n [0.5, 2. ]])\n \"\"\"\n return _mx_nd_np.maximum(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef fmax(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise maximum of the input arrays with broadcasting. (Ignores NaNs)\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.fmax(np.array([2, 3, 4]), np.array([1, 5, 2]))\n array([2., 5., 4.])\n\n >>> np.fmax(np.eye(2), np.array([0.5, 2])) # broadcasting\n array([[1. , 2. ],\n [0.5, 2. ]])\n \"\"\"\n return _mx_nd_np.fmax(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef minimum(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise minimum of the input arrays with broadcasting.\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.minimum(np.array([2, 3, 4]), np.array([1, 5, 2]))\n array([1., 3., 2.])\n\n >>> np.minimum(np.eye(2), np.array([0.5, 2])) # broadcasting\n array([[0.5, 0. ],\n [0. , 1. ]])\n \"\"\"\n return _mx_nd_np.minimum(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef fmin(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns element-wise minimum of the input arrays with broadcasting. (Ignores NaNs)\n\n Parameters\n ----------\n x1, x2 : scalar or mxnet.numpy.ndarray\n The arrays holding the elements to be compared. They must have the same shape,\n or shapes that can be broadcast to a single shape.\n\n Returns\n -------\n out : mxnet.numpy.ndarray or scalar\n The fmin of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> np.fmin(np.array([2, 3, 4]), np.array([1, 5, 2]))\n array([1., 3., 2.])\n\n >>> np.fmin(np.eye(2), np.array([0.5, 2])) # broadcasting\n array([[0.5, 0. ],\n [0. , 1. ]])\n \"\"\"\n return _mx_nd_np.fmin(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\ndef max(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Return the maximum of an array or maximum along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : int, optional\n Axis along which to operate. By default, flattened input is used.\n out : ndarray, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n See `doc.ufuncs` (Section \"Output arguments\") for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n max : ndarray\n Maximum of `a`. If `axis` is None, the result is an array of dimension 1.\n If `axis` is given, the result is an array of dimension\n ``a.ndim - 1``.\n\n See Also\n --------\n min :\n The minimum value of an array along a given axis, ignoring any nan.\n maximum :\n Element-wise maximum of two arrays, ignoring any nan.\n argmax :\n Return the indices of the maximum values.\n\n Notes\n -----\n NaN in the orginal `numpy` is denoted as nan and will be ignored.\n\n Don't use `max` for element-wise comparison of 2 arrays; when\n ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than\n ``max(a, axis=0)``.\n\n Examples\n --------\n >>> a = np.arange(4).reshape((2,2))\n >>> a\n array([[0., 1.],\n [2., 3.]])\n >>> np.max(a) # Maximum of the flattened array\n array(3.)\n >>> np.max(a, axis=0) # Maxima along the first axis\n array([2., 3.])\n >>> np.max(a, axis=1) # Maxima along the second axis\n array([1., 3.])\n\n >>> b = np.arange(5, dtype=np.float32)\n >>> b[2] = np.nan\n >>> np.max(b)\n array(4.)\n \"\"\"\n return _mx_nd_np.max(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@set_module('mxnet.numpy')\ndef min(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Return the minimum of an array or minimum along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : int, optional\n Axis along which to operate. By default, flattened input is used.\n out : ndarray, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n See `doc.ufuncs` (Section \"Output arguments\") for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n min : ndarray\n Minimum of `a`. If `axis` is None, the result is an array of dimension 1.\n If `axis` is given, the result is an array of dimension\n ``a.ndim - 1``.\n\n See Also\n --------\n max :\n The maximum value of an array along a given axis, ignoring any nan.\n minimum :\n Element-wise minimum of two arrays, ignoring any nan.\n\n Notes\n -----\n NaN in the orginal `numpy` is denoted as nan and will be ignored.\n\n Don't use `min` for element-wise comparison of 2 arrays; when\n ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than\n ``min(a, axis=0)``.\n\n Examples\n --------\n >>> a = np.arange(4).reshape((2,2))\n >>> a\n array([[0., 1.],\n [2., 3.]])\n >>> np.min(a) # Minimum of the flattened array\n array(0.)\n >>> np.min(a, axis=0) # Minima along the first axis\n array([0., 1.])\n >>> np.min(a, axis=1) # Minima along the second axis\n array([0., 2.])\n >>> b = np.arange(5, dtype=np.float32)\n >>> b[2] = np.nan\n >>> np.min(b)\n array(0.) # nan will be ignored\n \"\"\"\n return _mx_nd_np.min(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@set_module('mxnet.numpy')\ndef swapaxes(a, axis1, axis2):\n \"\"\"Interchange two axes of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis1 : int\n First axis.\n axis2 : int\n Second axis.\n\n Returns\n -------\n a_swapped : ndarray\n Swapped array. This is always a copy of the input array.\n\n Examples\n --------\n >>> x = np.array([[1,2,3]])\n >>> np.swapaxes(x,0,1)\n array([[1.],\n [2.],\n [3.]])\n\n >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n\n [[4., 5.],\n [6., 7.]]])\n\n >>> np.swapaxes(x,0,2)\n array([[[0., 4.],\n [2., 6.]],\n\n [[1., 5.],\n [3., 7.]]])\n \"\"\"\n return _npi.swapaxes(a, dim1=axis1, dim2=axis2)\n\n\n@set_module('mxnet.numpy')\ndef clip(a, a_min, a_max, out=None):\n \"\"\"clip(a, a_min, a_max, out=None)\n\n Clip (limit) the values in an array.\n Given an interval, values outside the interval are clipped to\n the interval edges. For example, if an interval of ``[0, 1]``\n is specified, values smaller than 0 become 0, and values larger\n than 1 become 1.\n\n Parameters\n ----------\n a : ndarray\n Array containing elements to clip.\n a_min : scalar or `None`\n Minimum value. If `None`, clipping is not performed on lower\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n a_max : scalar or `None`\n Maximum value. If `None`, clipping is not performed on upper\n interval edge. Not more than one of `a_min` and `a_max` may be\n `None`.\n out : ndarray, optional\n The results will be placed in this array. It may be the input\n array for in-place clipping. `out` must be of the right shape\n to hold the output. Its type is preserved.\n\n Returns\n -------\n clipped_array : ndarray\n An array with the elements of `a`, but where values\n < `a_min` are replaced with `a_min`, and those > `a_max`\n with `a_max`.\n\n Notes\n -----\n array_like `a_min` and `a_max` are not supported.\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> np.clip(a, 1, 8)\n array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.])\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\n >>> np.clip(a, 3, 6, out=a)\n array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])\n \"\"\"\n from numbers import Number\n if isinstance(a, Number):\n # In case input is a scalar, the computation would fall back to native numpy.\n # The value returned would be a python scalar.\n return _np.clip(a, a_min, a_max, out=None)\n return _mx_nd_np.clip(a, a_min, a_max, out=out)\n\n\n@set_module('mxnet.numpy')\ndef argmax(a, axis=None, out=None, keepdims=False):\n r\"\"\"\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n keepdims : bool\n If True, the reduced axes (dimensions) must be included in the result as\n singleton dimensions, and, accordingly, the result must be compatible with\n the input array. Otherwise, if False, the reduced axes (dimensions) must\n not be included in the result. Default: False .\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n .. note::\n ``keepdims`` param is part of request in data-api-standard\n <https://data-apis.org/array-api/latest/API_specification/searching_functions.html#argmax-x-axis-none-keepdims-false>`_,\n which is not the parameter in official NumPy\n\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmax\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in\n the following aspects:\n\n * Input type does not support Python native iterables(list, tuple, ...).\n * ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be\n the same as the expected output.\n * ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the\n same as the expected output.\n * ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmax(a)\n array(5.)\n >>> np.argmax(a, axis=0)\n array([1., 1., 1.])\n >>> np.argmax(a, axis=1)\n array([2., 2.])\n\n >>> b = np.arange(6)\n >>> b[1] = 5\n >>> b\n array([0., 5., 2., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(1.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmax(a, axis=1, out=b)\n array([2., 2.])\n >>> b\n array([2., 2.])\n \"\"\"\n return _mx_nd_np.argmax(a, axis, out, keepdims)\n\n\n@set_module('mxnet.numpy')\ndef argmin(a, axis=None, out=None, keepdims=False):\n r\"\"\"\n Returns the indices of the minimum values along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : ndarray or None, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n keepdims : bool\n If True, the reduced axes (dimensions) must be included in the result as\n singleton dimensions, and, accordingly, the result must be compatible with\n the input array. Otherwise, if False, the reduced axes (dimensions) must\n not be included in the result. Default: False .\n\n Returns\n -------\n index_array : ndarray of indices whose dtype is same as the input ndarray.\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n .. note::\n ``keepdims`` param is part of request in data-api-standard\n <https://data-apis.org/array-api/latest/API_specification/searching_functions.html#argmin-x-axis-none-keepdims-false>`_,\n which is not the parameter in official NumPy\n\n In case of multiple occurrences of the minimum values, the indices\n corresponding to the first occurrence are returned.\n\n This function differs from the original `numpy.argmin\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html>`_ in\n the following aspects:\n\n * Input type does not support Python native iterables(list, tuple, ...).\n * ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be\n the same as the expected output.\n * ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the\n same as the expected output.\n * ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10., 11., 12.],\n [13., 14., 15.]])\n >>> np.argmin(a)\n array(0.)\n >>> np.argmin(a, axis=0)\n array([0., 0., 0.])\n >>> np.argmin(a, axis=1)\n array([0., 0.])\n\n >>> b = np.arange(6)\n >>> b[2] = 0\n >>> b\n array([0., 1., 0., 3., 4., 5.])\n >>> np.argmax(b) # Only the first occurrence is returned.\n array(0.)\n\n Specify ``out`` ndarray:\n\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> b = np.zeros((2,))\n >>> np.argmin(a, axis=1, out=b)\n array([0., 0.])\n >>> b\n array([0., 0.])\n \"\"\"\n return _mx_nd_np.argmin(a, axis, out, keepdims)\n\n\n@set_module('mxnet.numpy')\ndef amax(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Return the maximum of an array or maximum along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : int, optional\n Axis along which to operate. By default, flattened input is used.\n out : ndarray, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n See `doc.ufuncs` (Section \"Output arguments\") for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n max : ndarray\n Maximum of `a`. If `axis` is None, the result is an array of dimension 1.\n If `axis` is given, the result is an array of dimension\n ``a.ndim - 1``.\n\n See Also\n --------\n min :\n The minimum value of an array along a given axis, ignoring any nan.\n maximum :\n Element-wise maximum of two arrays, ignoring any nan.\n argmax :\n Return the indices of the maximum values.\n\n Notes\n -----\n NaN in the orginal `numpy` is denoted as nan and will be ignored.\n\n Don't use `max` for element-wise comparison of 2 arrays; when\n ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than\n ``max(a, axis=0)``.\n\n Examples\n --------\n >>> a = np.arange(4).reshape((2,2))\n >>> a\n array([[0., 1.],\n [2., 3.]])\n >>> np.max(a) # Maximum of the flattened array\n array(3.)\n >>> np.max(a, axis=0) # Maxima along the first axis\n array([2., 3.])\n >>> np.max(a, axis=1) # Maxima along the second axis\n array([1., 3.])\n\n >>> b = np.arange(5, dtype=np.float32)\n >>> b[2] = np.nan\n >>> np.max(b)\n array(4.)\n \"\"\"\n return _mx_nd_np.amax(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@set_module('mxnet.numpy')\ndef amin(a, axis=None, out=None, keepdims=False):\n \"\"\"\n Return the minimum of an array or minimum along an axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : int, optional\n Axis along which to operate. By default, flattened input is used.\n out : ndarray, optional\n Alternative output array in which to place the result. Must\n be of the same shape and buffer length as the expected output.\n See `doc.ufuncs` (Section \"Output arguments\") for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n min : ndarray\n Minimum of `a`. If `axis` is None, the result is an array of dimension 1.\n If `axis` is given, the result is an array of dimension\n ``a.ndim - 1``.\n\n See Also\n --------\n max :\n The maximum value of an array along a given axis, ignoring any nan.\n minimum :\n Element-wise minimum of two arrays, ignoring any nan.\n\n Notes\n -----\n NaN in the orginal `numpy` is denoted as nan and will be ignored.\n\n Don't use `min` for element-wise comparison of 2 arrays; when\n ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than\n ``min(a, axis=0)``.\n\n Examples\n --------\n >>> a = np.arange(4).reshape((2,2))\n >>> a\n array([[0., 1.],\n [2., 3.]])\n >>> np.min(a) # Minimum of the flattened array\n array(0.)\n >>> np.min(a, axis=0) # Minima along the first axis\n array([0., 1.])\n >>> np.min(a, axis=1) # Minima along the second axis\n array([0., 2.])\n >>> b = np.arange(5, dtype=np.float32)\n >>> b[2] = np.nan\n >>> np.min(b)\n array(0.) # nan will be ignored\n \"\"\"\n return _mx_nd_np.amin(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@set_module('mxnet.numpy')\ndef average(a, axis=None, weights=None, returned=False, out=None):\n \"\"\"\n Compute the weighted average along the specified axis.\n\n Parameters\n --------\n a : ndarray\n Array containing data to be averaged.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to average a.\n The default, axis=None, will average over\n all of the elements of the input array.\n If axis is negative it counts from the last to the first axis.\n New in version 1.7.0.\n If axis is a tuple of ints, averaging is\n performed on all of the axes specified in the tuple\n instead of a single axis or all the axes as before.\n weights : ndarray, optional\n An array of weights associated with the values in a, must be the same dtype with a.\n Each value in a contributes to the average according to its associated weight.\n The weights array can either be 1-D (in which case its length must be\n the size of a along the given axis) or of the same shape as a.\n If weights=None, then all data in a are assumed to have a weight equal to one.\n The 1-D calculation is: avg = sum(a * weights) / sum(weights)\n The only constraint on weights is that sum(weights) must not be 0.\n returned : bool, optional\n Default is False.\n If True, the tuple (average, sum_of_weights) is returned,\n otherwise only the average is returned.\n If weights=None, sum_of_weights is equivalent to\n the number of elements over which the average is taken.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n\n Returns\n --------\n retval, [sum_of_weights] : ndarray\n Return the average along the specified axis.\n When returned is True, return a tuple with the average as the first element\n and the sum of the weights as the second element. sum_of_weights is of the same type as retval.\n If a is integral, the result dtype will be current default dtype,\n When npx.is_np_default_dtype() returns False, default dtype is float32,\n When npx.is_np_default_dtype() returns True, default dtype is float64;\n otherwise it will be the same as dtype of a.\n\n Raises\n --------\n MXNetError\n * When all weights along axis sum to zero.\n * When the length of 1D weights is not the same as the shape of a along axis.\n * When given 1D weights, the axis is not specified or is not int.\n * When the shape of weights and a differ, but weights are not 1D.\n\n See also\n --------\n mean\n\n .. note::\n This function differs from the original `numpy.average`\n <https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in\n the following way(s):\n\n * Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens\n * Does not support complex dtype\n * The dtypes of a and weights must be the same\n * Integral a results in float32 or float64 returned dtype:\n\n * When npx.is_np_default_dtype() returns False, default dtype is float32,\n * When npx.is_np_default_dtype() returns True, default dtype is float64;\n\n Examples\n --------\n >>> data = np.arange(1, 5)\n >>> data\n array([1., 2., 3., 4.])\n >>> np.average(data)\n array(2.5)\n >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))\n array(4.)\n >>> data = np.arange(6).reshape((3,2))\n >>> data\n array([[0., 1.],\n [2., 3.],\n [4., 5.]])\n >>> weights = np.array([0.25, 0.75])\n array([0.25, 0.75])\n >>> np.average(data, axis=1, weights=weights)\n array([0.75, 2.75, 4.75])\n \"\"\"\n return _mx_nd_np.average(a, axis=axis, weights=weights, returned=returned, out=out)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ\n \"\"\"\n Compute the arithmetic mean along the specified axis.\n Returns the average of the array elements.\n The average is taken over the flattened array by default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : ndarray\n ndarray containing numbers whose mean is desired.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.\n If this is a tuple of ints, a mean is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the mean.\n For integer inputs, the default is of your current default dtype,\n When npx.is_np_default_dtype() returns False, default dtype is float32,\n When npx.is_np_default_dtype() returns True, default dtype is float64;\n For floating point inputs, it is the same as the input dtype.\n out : ndarray, optional\n Alternate output array in which to place the result. The default is None; if provided,\n it must have the same shape and type as the expected output.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result\n as dimensions with size one. With this option, the result will broadcast correctly\n against the input array.\n If the default value is passed, then keepdims will not be passed through to the mean\n method of sub-classes of ndarray, however any non-default value will be. If the sub-class\n method does not implement keepdims any exceptions will be raised.\n\n Returns\n -------\n m : ndarray, see dtype parameter above\n If out=None, returns a new array containing the mean values,\n otherwise a reference to the output array is returned.\n\n .. note::\n\n This function differs from the original `numpy.mean\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in\n the following way(s):\n\n * only ndarray is accepted as valid input, python iterables or scalar is not supported\n * default data type for integer input is float32 or float64, which depends on your current default dtype\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.mean(a)\n array(2.5)\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0,:] = 1.0\n >>> a[1,:] = 0.1\n >>> np.mean(a)\n array(0.55)\n >>> np.mean(a, dtype=np.float64)\n array(0.55, dtype=float64)\n \"\"\"\n return _mx_nd_np.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_data_api_statical_func\ndef std(a, axis=None, dtype=None, out=None, correction=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the standard deviation along the specified axis.\n Returns the standard deviation, a measure of the spread of a distribution,\n of the array elements. The standard deviation is computed for the\n flattened array by default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : array_like\n Calculate the standard deviation of these values.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the standard deviation is computed. The\n default is to compute the standard deviation of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a standard deviation is performed over\n multiple axes, instead of a single axis or all the axes as before.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it is\n the same as the array type.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output but the type (of the calculated\n values) will be cast if necessary.\n correction : int, optional\n Means Delta Degrees of Freedom. The divisor used in calculations\n is ``N - correction``, where ``N`` represents the number of elements.\n By default `correction` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `std` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n standard_deviation : ndarray, see dtype parameter above.\n If `out` is None, return a new array containing the standard deviation,\n otherwise return a reference to the output array.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.std(a)\n 1.1180339887498949 # may vary\n >>> np.std(a, axis=0)\n array([1., 1.])\n >>> np.std(a, axis=1)\n array([0.5, 0.5])\n In single precision, std() can be inaccurate:\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.std(a)\n array(0.45)\n >>> np.std(a, dtype=np.float64)\n array(0.45, dtype=float64)\n \"\"\"\n return _mx_nd_np.std(a, axis=axis, dtype=dtype, ddof=correction, keepdims=keepdims, out=out)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.numpy')\ndef delete(arr, obj, axis=None):\n \"\"\"\n Return a new array with sub-arrays along an axis deleted. For a one\n dimensional array, this returns those entries not returned by\n `arr[obj]`.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n obj : slice, int or ndarray of ints\n Indicate indices of sub-arrays to remove along the specified axis.\n axis : int, optional\n The axis along which to delete the subarray defined by `obj`.\n If `axis` is None, `obj` is applied to the flattened array.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with the elements specified by `obj` removed. Note\n that `delete` does not occur in-place. If `axis` is None, `out` is\n a flattened array.\n\n Examples\n --------\n >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\n >>> arr\n array([[ 1., 2., 3., 4.],\n [ 5., 6., 7., 8.],\n [ 9., 10., 11., 12.]])\n\n >>> np.delete(arr, 1, 0)\n array([[ 1., 2., 3., 4.],\n [ 9., 10., 11., 12.]])\n\n >>> np.delete(arr, slice(None, None, 2), 1)\n array([[ 2., 4.],\n [ 6., 8.],\n [10., 12.]])\n\n >>> np.delete(arr, np.array([1,3,5]), None)\n array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])\n >>> np.delete(arr, np.array([1,1,5]), None)\n array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])\n \"\"\"\n return _mx_nd_np.delete(arr, obj, axis=axis)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_data_api_statical_func\ndef var(a, axis=None, dtype=None, out=None, correction=0, keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the variance along the specified axis.\n Returns the variance of the array elements, a measure of the spread of a\n distribution. The variance is computed for the flattened array by\n default, otherwise over the specified axis.\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose variance is desired. If `a` is not an\n array, a conversion is attempted.\n axis : None or int or tuple of ints, optional\n Axis or axes along which the variance is computed. The default is to\n compute the variance of the flattened array.\n .. versionadded:: 1.7.0\n If this is a tuple of ints, a variance is performed over multiple axes,\n instead of a single axis or all the axes as before.\n dtype : data-type, optional\n Type to use in computing the variance.\n For arrays of integer type, the default is of your current default dtype,\n When npx.is_np_default_dtype() returns False, default dtype is float32,\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n For arrays of float types it is the same as the array type.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output, but the type is cast if\n necessary.\n correction : int, optional\n \"Delta Degrees of Freedom\": the divisor used in the calculation is\n ``N - correction``, where ``N`` represents the number of elements. By\n default `correction` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `var` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n\n Returns\n -------\n variance : ndarray, see dtype parameter above\n If ``out=None``, returns a new array containing the variance;\n otherwise, a reference to the output array is returned.\n\n Examples\n --------\n >>> a = np.array([[1, 2], [3, 4]])\n >>> np.var(a)\n array(1.25)\n >>> np.var(a, axis=0)\n array([1., 1.])\n >>> np.var(a, axis=1)\n array([0.25, 0.25])\n\n >>> a = np.zeros((2, 512*512), dtype=np.float32)\n >>> a[0, :] = 1.0\n >>> a[1, :] = 0.1\n >>> np.var(a)\n array(0.2025)\n >>> np.var(a, dtype=np.float64)\n array(0.2025, dtype=float64)\n >>> ((1-0.55)**2 + (0.1-0.55)**2)/2\n 0.2025\n \"\"\"\n return _mx_nd_np.var(a, axis=axis, dtype=dtype, ddof=correction, keepdims=keepdims, out=out)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef indices(dimensions, dtype=None, device=None):\n \"\"\"Return an array representing the indices of a grid.\n\n Compute an array where the subarrays contain index values 0,1,...\n varying only along the corresponding axis.\n\n Parameters\n ----------\n dimensions : sequence of ints\n The shape of the grid.\n dtype : data-type, optional\n The desired data-type for the array. Default is `int64`.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n grid : ndarray\n The array of grid indices,\n ``grid.shape = (len(dimensions),) + tuple(dimensions)``.\n\n Notes\n -----\n The output shape is obtained by prepending the number of dimensions\n in front of the tuple of dimensions, i.e. if `dimensions` is a tuple\n ``(r0, ..., rN-1)`` of length ``N``, the output shape is\n ``(N,r0,...,rN-1)``.\n\n The subarrays ``grid[k]`` contains the N-D array of indices along the\n ``k-th`` axis. Explicitly::\n\n grid[k,i0,i1,...,iN-1] = ik\n\n Examples\n --------\n >>> grid = np.indices((2, 3))\n >>> grid.shape\n (2, 2, 3)\n >>> grid[0] # row indices\n array([[0, 0, 0],\n [1, 1, 1]], dtype=int64)\n >>> grid[1] # column indices\n array([[0, 0, 0],\n [1, 1, 1]], dtype=int64)\n\n The indices can be used as an index into an array.\n\n >>> x = np.arange(20).reshape(5, 4)\n >>> row, col = np.indices((2, 3))\n >>> x[row, col]\n array([[0., 1., 2.],\n [4., 5., 6.]])\n\n Note that it would be more straightforward in the above example to\n extract the required elements directly with ``x[:2, :3]``.\n \"\"\"\n return _mx_nd_np.indices(dimensions=dimensions, dtype=dtype, device=device)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef copysign(x1, x2, out=None, **kwargs):\n r\"\"\"\n Change the sign of x1 to that of x2, element-wise.\n\n If `x2` is a scalar, its sign will be copied to all elements of `x1`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Values to change the sign of.\n x2 : ndarray or scalar\n The sign of `x2` is copied to `x1`.\n out : ndarray or None, optional\n A location into which the result is stored. It must be of the\n right shape and right type to hold the output. If not provided\n or `None`,a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n The values of `x1` with the sign of `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n\n .. note::\n This function differs from the original `numpy.copysign\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in\n the following aspects:\n\n * ``where`` param is not supported.\n\n Examples\n --------\n >>> np.copysign(1.3, -1)\n -1.3\n >>> 1/np.copysign(0, 1)\n inf\n >>> 1/np.copysign(0, -1)\n -inf\n\n >>> a = np.array([-1, 0, 1])\n >>> np.copysign(a, -1.1)\n array([-1., -0., -1.])\n >>> np.copysign(a, np.arange(3)-1)\n array([-1., 0., 1.])\n \"\"\"\n return _mx_nd_np.copysign(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\ndef ravel(x, order='C'):\n r\"\"\"\n ravel(x)\n\n Return a contiguous flattened array.\n A 1-D array, containing the elements of the input, is returned. A copy is\n made only if needed.\n\n Parameters\n ----------\n x : ndarray\n Input array. The elements in `x` are read in row-major, C-style order and\n packed as a 1-D array.\n order : `C`, optional\n Only support row-major, C-style order.\n\n Returns\n -------\n y : ndarray\n y is an array of the same subtype as `x`, with shape ``(x.size,)``.\n Note that matrices are special cased for backward compatibility, if `x`\n is a matrix, then y is a 1-D ndarray.\n\n .. note::\n This function differs from the original numpy.arange in the following aspects:\n\n * Only support row-major, C-style order.\n\n Examples\n --------\n It is equivalent to ``reshape(x, -1)``.\n\n >>> x = np.array([[1, 2, 3], [4, 5, 6]])\n >>> print(np.ravel(x))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(x.reshape(-1))\n [1. 2. 3. 4. 5. 6.]\n\n >>> print(np.ravel(x.T))\n [1. 4. 2. 5. 3. 6.]\n \"\"\"\n return _mx_nd_np.ravel(x, order)\n\n\n@set_module('mxnet.numpy')\ndef unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name\n \"\"\"\n Converts a flat index or array of flat indices into a tuple of coordinate arrays.\n\n Parameters\n ----------\n indices : array_like\n An integer array whose elements are indices into the flattened version of an array of dimensions shape.\n Before version 1.6.0, this function accepted just one index value.\n shape : tuple of ints\n The shape of the array to use for unraveling indices.\n order : Only row-major is supported currently.\n\n Returns\n -------\n unraveled_coords : ndarray\n Each row in the ndarray has the same shape as the indices array.\n Each column in the ndarray represents the unravelled index\n\n Examples:\n -------------\n >>> np.unravel_index([22, 41, 37], (7,6))\n [[3. 6. 6.]\n [4. 5. 1.]]\n >>> np.unravel_index(1621, (6,7,8,9))\n [3, 1, 4, 1]\n \"\"\"\n return _mx_nd_np.unravel_index(indices, shape, order=order)\n\n\n@set_module('mxnet.numpy')\ndef flatnonzero(a):\n r\"\"\"\n Return indices that are non-zero in the flattened version of a.\n\n This is equivalent to np.nonzero(np.ravel(a))[0].\n\n Parameters\n ----------\n a : array_like\n Input data.\n\n Returns\n -------\n res : ndarray\n Output array, containing the indices of the elements of `a.ravel()`\n that are non-zero.\n\n See Also\n --------\n nonzero : Return the indices of the non-zero elements of the input array.\n ravel : Return a 1-D array containing the elements of the input array.\n\n Examples\n --------\n >>> x = np.arange(-2, 3)\n >>> x\n array([-2, -1, 0, 1, 2])\n >>> np.flatnonzero(x)\n array([0, 1, 3, 4])\n\n Use the indices of the non-zero elements as an index array to extract\n these elements:\n\n >>> x.ravel()[np.flatnonzero(x)]\n array([-2, -1, 1, 2])\n \"\"\"\n return _mx_nd_np.flatnonzero(a)\n\n\n@set_module('mxnet.numpy')\ndef diag_indices_from(arr):\n \"\"\"\n This returns a tuple of indices that can be used to access the main diagonal of an array\n a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is\n the usual diagonal, for a.ndim > 2 this is the set of indices to access\n a[i, i, ..., i] for i = [0..n-1].\n\n Parameters\n ----------\n arr : ndarray\n Input array for acessing the main diagonal. All dimensions\n should have equal length.\n\n Return:\n -------------\n diag: tuple of ndarray\n indices of the main diagonal.\n\n Examples:\n -------------\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> idx = np.diag_indices_from(a)\n >>> idx\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n >>> a[idx] = 100\n >>> a\n array([[100, 1, 2, 3],\n [ 4, 100, 6, 7],\n [ 8, 9, 100, 11],\n [ 12, 13, 14, 100]])\n \"\"\"\n return _mx_nd_np.diag_indices_from(arr)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef hanning(M, dtype=None, device=None):\n r\"\"\"Return the Hanning window.\n\n The Hanning window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that you need select numpy.float32 or float64 in this operator.\n\n See Also\n --------\n blackman, hamming\n\n Notes\n -----\n The Hanning window is defined as\n\n .. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hanning was named for Julius von Hann, an Austrian meteorologist.\n It is also known as the Cosine Bell. Some authors prefer that it be\n called a Hann window, to help avoid confusion with the very similar\n Hamming window.\n\n Most references to the Hanning window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\",\n The University of Alberta Press, 1975, pp. 106-108.\n .. [3] Wikipedia, \"Window function\",\n http://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hanning(12)\n array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,\n 0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,\n 0.07937312, 0. ])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hanning(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Hann window\")\n Text(0.5, 1.0, 'Hann window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n return _mx_nd_np.hanning(M, dtype=dtype, device=device)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef hamming(M, dtype=None, device=None):\n r\"\"\"Return the hamming window.\n\n The hamming window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that you need select numpy.float32 or float64 in this operator.\n\n See Also\n --------\n blackman, hanning\n\n Notes\n -----\n The Hamming window is defined as\n\n .. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)\n \\qquad 0 \\leq n \\leq M-1\n\n The Hamming was named for R. W. Hamming, an associate of J. W. Tukey\n and is described in Blackman and Tukey. It was recommended for\n smoothing the truncated autocovariance function in the time domain.\n Most references to the Hamming window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\", The\n University of Alberta Press, 1975, pp. 109-110.\n .. [3] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hamming(12)\n array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,\n 0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,\n 0.15302327, 0.08000001])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.hamming(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"hamming window\")\n Text(0.5, 1.0, 'hamming window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n return _mx_nd_np.hamming(M, dtype=dtype, device=device)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef blackman(M, dtype=None, device=None):\n r\"\"\"Return the Blackman window.\n\n The Blackman window is a taper formed by using the first three\n terms of a summation of cosines. It was designed to have close to the\n minimal leakage possible. It is close to optimal, only slightly worse\n than a Kaiser window.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n\n Returns\n -------\n out : ndarray\n The window, with the maximum value normalized to one (the value one\n appears only if the number of samples is odd).\n When npx.is_np_default_dtype() returns False, default dtype is float32;\n When npx.is_np_default_dtype() returns True, default dtype is float64.\n Note that you need select numpy.float32 or float64 in this operator.\n\n See Also\n --------\n hamming, hanning\n\n Notes\n -----\n The Blackman window is defined as\n\n .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/{M-1}) + 0.08 \\cos(4\\pi n/{M-1})\n\n Most references to the Blackman window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function. It is known as a\n \"near optimal\" tapering function, almost as good (by some measures)\n as the kaiser window.\n\n References\n ----------\n Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,\n Dover Publications, New York.\n\n Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.\n Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.\n\n Examples\n --------\n >>> np.blackman(12)\n array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,\n 7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,\n 4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> window = np.blackman(51)\n >>> plt.plot(window.asnumpy())\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"blackman window\")\n Text(0.5, 1.0, 'blackman window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n \"\"\"\n return _mx_nd_np.blackman(M, dtype=dtype, device=device)\n\n\n@set_module('mxnet.numpy')\ndef flip(m, axis=None, out=None):\n r\"\"\"\n flip(m, axis=None, out=None)\n\n Reverse the order of elements in an array along the given axis.\n\n The shape of the array is preserved, but the elements are reordered.\n\n Parameters\n ----------\n m : ndarray or scalar\n Input array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to flip over. The default,\n axis=None, will flip over all of the axes of the input array.\n If axis is negative it counts from the last to the first axis.\n\n If axis is a tuple of ints, flipping is performed on all of the axes\n specified in the tuple.\n out : ndarray or scalar, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n out : ndarray or scalar\n A view of `m` with the entries of axis reversed. Since a view is\n returned, this operation is done in constant time.\n\n Examples\n --------\n >>> A = np.arange(8).reshape((2,2,2))\n >>> A\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> np.flip(A, 0)\n array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n >>> np.flip(A, 1)\n array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n >>> np.flip(A)\n array([[[7, 6],\n [5, 4]],\n [[3, 2],\n [1, 0]]])\n >>> np.flip(A, (0, 2))\n array([[[5, 4],\n [7, 6]],\n [[1, 0],\n [3, 2]]])\n \"\"\"\n return _mx_nd_np.flip(m, axis, out=out)\n\n\n@set_module('mxnet.numpy')\ndef flipud(m):\n r\"\"\"\n flipud(*args, **kwargs)\n\n Flip array in the up/down direction.\n\n Flip the entries in each column in the up/down direction.\n Rows are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array.\n\n Returns\n -------\n out : array_like\n A view of `m` with the rows reversed. Since a view is\n returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n fliplr : Flip array in the left/right direction.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to ``m[::-1,...]``.\n Does not require the array to be two-dimensional.\n\n Examples\n --------\n >>> A = np.diag(np.array([1.0, 2, 3]))\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.flipud(A)\n array([[0., 0., 3.],\n [0., 2., 0.],\n [1., 0., 0.]])\n\n >>> A = np.random.randn(2,3,5)\n >>> np.all(np.flipud(A) == A[::-1,...])\n array(True)\n\n >>> np.flipud(np.array([1,2]))\n array([2., 1.])\n \"\"\"\n return flip(m, 0)\n\n\n@set_module('mxnet.numpy')\ndef fliplr(m):\n r\"\"\"\n fliplr(*args, **kwargs)\n\n Flip array in the left/right direction.\n\n Flip the entries in each row in the left/right direction.\n Columns are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array, must be at least 2-D.\n\n Returns\n -------\n f : ndarray\n A view of `m` with the columns reversed. Since a view\n is returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n flipud : Flip array in the up/down direction.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to m[:,::-1]. Requires the array to be at least 2-D.\n\n Examples\n --------\n >>> A = np.diag([1.,2.,3.])\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.fliplr(A)\n array([[0., 0., 1.],\n [0., 2., 0.],\n [3., 0., 0.]])\n\n >>> A = np.random.randn(2,3,5)\n >>> np.all(np.fliplr(A) == A[:,::-1,...])\n array(True)\n \"\"\"\n return flip(m, 1)\n\n\n@set_module('mxnet.numpy')\ndef around(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n around(x, decimals=0, out=None)\n\n Evenly round to the given number of decimals.\n\n Parameters\n ----------\n x : ndarray or scalar\n Input data.\n decimals : int, optional\n Number of decimal places to round to (default: 0). If\n decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape and type as the expected output.\n\n Returns\n -------\n rounded_array : ndarray or scalar\n An array of the same type as `x`, containing the rounded values.\n A reference to the result is returned.\n\n .. note::\n For values exactly halfway between rounded decimal values, NumPy\n rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,\n -0.5 and 0.5 round to 0.0, etc.\n\n This function differs from the original numpy.prod in the following aspects:\n\n * Cannot cast type automatically. Dtype of `out` must be same as the expected one.\n * Cannot support complex-valued number.\n\n Examples\n --------\n >>> np.around([0.37, 1.64])\n array([ 0., 2.])\n >>> np.around([0.37, 1.64], decimals=1)\n array([ 0.4, 1.6])\n >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value\n array([ 0., 2., 2., 4., 4.])\n >>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned\n array([ 1, 2, 3, 11])\n >>> np.around([1, 2, 3, 11], decimals=-1)\n array([ 0, 0, 0, 10])\n \"\"\"\n return _mx_nd_np.around(x, decimals, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\ndef round(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n round(a, decimals=0, out=None)\n Round an array to the given number of decimals.\n\n See Also\n --------\n around : equivalent function; see for details.\n \"\"\"\n return _mx_nd_np.round(x, decimals, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\ndef round_(x, decimals=0, out=None, **kwargs):\n r\"\"\"\n round_(a, decimals=0, out=None)\n Round an array to the given number of decimals.\n\n See Also\n --------\n around : equivalent function; see for details.\n \"\"\"\n return _mx_nd_np.round_(x, decimals, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef arctan2(x1, x2, out=None, **kwargs):\n r\"\"\"\n Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.\n\n The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is\n the signed angle in radians between the ray ending at the origin and\n passing through the point (1,0), and the ray ending at the origin and\n passing through the point (`x2`, `x1`). (Note the role reversal: the\n \"`y`-coordinate\" is the first function parameter, the \"`x`-coordinate\"\n is the second.) By IEEE convention, this function is defined for\n `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see\n Notes for specific values).\n\n This function is not defined for complex-valued arguments; for the\n so-called argument of complex values, use `angle`.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n `y`-coordinates.\n x2 : ndarray or scalar\n `x`-coordinates. `x2` must be broadcastable to match the shape of\n `x1` or vice versa.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if\n `x1` and `x2` are scalars.\n\n .. notes::\n *arctan2* is identical to the ``atan2`` function of the underlying\n C library. The following special values are defined in the C\n standard: [1]_\n\n +========+========+==================+\n | `x1` | `x2` | `arctan2(x1,x2)` |\n +========+========+==================+\n | +/- 0 | +0 | +/- 0 |\n +========+========+==================+\n | +/- 0 | -0 | +/- pi |\n +========+========+==================+\n | > 0 | +/-inf | +0 / +pi |\n +========+========+==================+\n | < 0 | +/-inf | -0 / -pi |\n +========+========+==================+\n | +/-inf | +inf | +/- (pi/4) |\n +========+========+==================+\n | +/-inf | -inf | +/- (3*pi/4) |\n +========+========+==================+\n\n Note that +0 and -0 are distinct floating point numbers, as are +inf\n and -inf.\n\n This function differs from the original numpy.arange in the following aspects:\n\n * Only support float16, float32 and float64.\n\n References\n ----------\n .. [1] ISO/IEC standard 9899:1999, \"Programming language C.\"\n\n Examples\n --------\n Consider four points in different quadrants:\n\n >>> x = np.array([-1, +1, +1, -1])\n >>> y = np.array([-1, -1, +1, +1])\n >>> np.arctan2(y, x) * 180 / np.pi\n array([-135., -45., 45., 135.])\n\n Note the order of the parameters. `arctan2` is defined also when `x2` = 0\n and at several other special points, obtaining values in\n the range ``[-pi, pi]``:\n\n >>> x = np.array([1, -1])\n >>> y = np.array([0, 0])\n >>> np.arctan2(x, y)\n array([ 1.5707964, -1.5707964])\n \"\"\"\n return _mx_nd_np.arctan2(x1, x2, out=out)\n\natan2 = arctan2\natan2.__doc__ = \"\"\"\n Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.\n\n The quadrant (i.e., branch) is chosen so that ``atan2(x1, x2)`` is\n the signed angle in radians between the ray ending at the origin and\n passing through the point (1,0), and the ray ending at the origin and\n passing through the point (`x2`, `x1`). (Note the role reversal: the\n \"`y`-coordinate\" is the first function parameter, the \"`x`-coordinate\"\n is the second.) By IEEE convention, this function is defined for\n `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see\n Notes for specific values).\n\n This function is not defined for complex-valued arguments; for the\n so-called argument of complex values, use `angle`.\n \n >>>np.atan2 is np.arctan2\n True\n\n Parameters\n ----------\n x1 : ndarray or scalar\n `y`-coordinates.\n x2 : ndarray or scalar\n `x`-coordinates. `x2` must be broadcastable to match the shape of\n `x1` or vice versa.\n out : ndarray or None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray or scalar\n Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if\n `x1` and `x2` are scalars.\n\n .. notes::\n `atan2` is a alias for `arctan2`. It is a standard API in\n https://data-apis.org/array-api/latest/API_specification/elementwise_functions.html#atan2-x1-x2\n instead of an official NumPy operator.\n \n *atan2* is identical to the ``atan2`` function of the underlying\n C library. The following special values are defined in the C\n standard: [1]_\n\n +========+========+==================+\n | `x1` | `x2` | `atan2(x1,x2)` |\n +========+========+==================+\n | +/- 0 | +0 | +/- 0 |\n +========+========+==================+\n | +/- 0 | -0 | +/- pi |\n +========+========+==================+\n | > 0 | +/-inf | +0 / +pi |\n +========+========+==================+\n | < 0 | +/-inf | -0 / -pi |\n +========+========+==================+\n | +/-inf | +inf | +/- (pi/4) |\n +========+========+==================+\n | +/-inf | -inf | +/- (3*pi/4) |\n +========+========+==================+\n\n Note that +0 and -0 are distinct floating point numbers, as are +inf\n and -inf.\n\n This function differs from the original numpy.arange in the following aspects:\n\n * Only support float16, float32 and float64.\n\n References\n ----------\n .. [1] ISO/IEC standard 9899:1999, \"Programming language C.\"\n\n Examples\n --------\n Consider four points in different quadrants:\n\n >>> x = np.array([-1, +1, +1, -1])\n >>> y = np.array([-1, -1, +1, +1])\n >>> np.atan2(y, x) * 180 / np.pi\n array([-135., -45., 45., 135.])\n\n Note the order of the parameters. `atan2` is defined also when `x2` = 0\n and at several other special points, obtaining values in\n the range ``[-pi, pi]``:\n\n >>> x = np.array([1, -1])\n >>> y = np.array([0, 0])\n >>> np.atan2(x, y)\n array([ 1.5707964, -1.5707964])\n \"\"\"\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef hypot(x1, x2, out=None, **kwargs):\n r\"\"\"\n Given the \"legs\" of a right triangle, return its hypotenuse.\n\n Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or\n `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),\n it is broadcast for use with each element of the other argument.\n\n Parameters\n ----------\n x1, x2 : array_like\n Leg of the triangle(s).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n\n Returns\n -------\n z : ndarray\n The hypotenuse of the triangle(s).\n This is a scalar if both `x1` and `x2` are scalars.\n\n .. note::\n This function differs from the original numpy.arange in the following aspects:\n\n * Only support float16, float32 and float64.\n\n Examples\n --------\n >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n\n Example showing broadcast of scalar_like argument:\n\n >>> np.hypot(3*np.ones((3, 3)), [4])\n array([[ 5., 5., 5.],\n [ 5., 5., 5.],\n [ 5., 5., 5.]])\n \"\"\"\n return _mx_nd_np.hypot(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef bitwise_and(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_and(13, 17)\n 1\n\n >>> np.bitwise_and(14, 13)\n 12\n >>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)\n array([26, 5], dtype=int32)\n\n >>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))\n array([0, 1], dtype=int32)\n >>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))\n array([ 2, 4, 16], dtype=int32)\n >>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([False, True])\n \"\"\"\n return _mx_nd_np.bitwise_and(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef bitwise_xor(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise XOR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_xor(13, 17)\n 28\n\n >>> np.bitwise_xor(31, 5)\n 26\n >>> np.bitwise_xor(np.array([31,3], dtype=np.int32), 5)\n array([26, 6], dtype=int32)\n\n >>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([26, 5], dtype=int32)\n >>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, False])\n \"\"\"\n return _mx_nd_np.bitwise_xor(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef bitwise_or(x1, x2, out=None, **kwargs):\n r\"\"\"\n Compute the bit-wise OR of two arrays element-wise.\n\n Parameters\n ----------\n x1, x2 : ndarray or scalar\n Only integer and boolean types are handled. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.bitwise_or(13, 17)\n 29\n\n >>> np.bitwise_or(31, 5)\n 31\n >>> np.bitwise_or(np.array([31,3], dtype=np.int32), 5)\n array([31, 7])\n\n >>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))\n array([31, 7])\n >>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, True])\n \"\"\"\n return _mx_nd_np.bitwise_or(x1, x2, out=out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef ldexp(x1, x2, out=None, **kwargs):\n \"\"\"\n Returns x1 * 2**x2, element-wise.\n The mantissas `x1` and twos exponents `x2` are used to construct\n floating point numbers ``x1 * 2**x2``.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Array of multipliers.\n x2 : ndarray or scalar, int\n Array of twos exponents.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n The result of ``x1 * 2**x2``.\n This is a scalar if both `x1` and `x2` are scalars.\n\n Notes\n -----\n Complex dtypes are not supported, they will raise a TypeError.\n Different from numpy, we allow x2 to be float besides int.\n `ldexp` is useful as the inverse of `frexp`, if used by itself it is\n more clear to simply use the expression ``x1 * 2**x2``.\n\n Examples\n --------\n >>> np.ldexp(5, np.arange(4))\n array([ 5., 10., 20., 40.])\n \"\"\"\n return _mx_nd_np.ldexp(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef logaddexp(x1, x2, out=None, **kwargs):\n \"\"\"\n Logarithm of the sum of exponentiations of the inputs.\n\n Calculates log(exp(x1) + exp(x2)). This function is useful in statistics where\n the calculated probabilities of events may be so small as to exceed the range of\n normal floating point numbers. In such cases the logarithm of the calculate\n probability is stored. This function allows adding probabilities stored\n in such a fashion.\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Array of multipliers.\n x2 : ndarray or scalar, int\n Array of twos exponents.\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or scalar\n Logarithm of exp(x1) + exp(x2). This is a scalar if both x1 and x2 are scalars.\n\n Examples\n --------\n >>> prob1 = np.log(1e-50)\n >>> prob2 = np.log(2.5e-50)\n >>> prob12 = np.logaddexp(prob1, prob2)\n >>> prob12\n -113.87649168120691\n >>> np.exp(prob12)\n 3.5000000000000057e-50\n \"\"\"\n return _mx_nd_np.logaddexp(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef vdot(a, b):\n r\"\"\"\n Return the dot product of two vectors.\n Note that `vdot` handles multidimensional arrays differently than `dot`:\n it does *not* perform a matrix product, but flattens input arguments\n to 1-D vectors first. Consequently, it should only be used for vectors.\n\n Parameters\n ----------\n a : ndarray\n First argument to the dot product.\n b : ndarray\n Second argument to the dot product.\n\n Returns\n -------\n output : ndarray\n Dot product of `a` and `b`.\n\n See Also\n --------\n dot : Return the dot product without using the complex conjugate of the\n first argument.\n\n Examples\n --------\n Note that higher-dimensional arrays are flattened!\n\n >>> a = np.array([[1, 4], [5, 6]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.vdot(a, b)\n array(30.)\n >>> np.vdot(b, a)\n array(30.)\n >>> 1*4 + 4*1 + 5*2 + 6*2\n 30\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 1)\n\n\n@set_module('mxnet.numpy')\ndef inner(a, b):\n r\"\"\"Inner product of two arrays.\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : ndarray\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n `out.shape = a.shape[:-1] + b.shape[:-1]`\n\n Raises\n ------\n ValueError\n If the last dimension of `a` and `b` has different size.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n .. note::\n\n For vectors (1-D arrays) it computes the ordinary inner-product::\n\n np.inner(a, b) = sum(a[:]*b[:])\n\n More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::\n\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n\n or explicitly::\n\n np.inner(a, b)[i0,...,ir-1,j0,...,js-1]\n = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])\n\n In addition `a` or `b` may be scalars, in which case::\n\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n array(2.)\n\n A multidimensional example:\n\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> np.inner(a, b)\n array([[ 14., 38., 62.],\n [ 86., 110., 134.]])\n \"\"\"\n return tensordot(a, b, [-1, -1])\n\n\n@set_module('mxnet.numpy')\ndef outer(a, b):\n r\"\"\"Compute the outer product of two vectors.\n Given two vectors, ``a = [a0, a1, ..., aM]`` and\n ``b = [b0, b1, ..., bN]``,\n the outer product [1]_ is::\n [[a0*b0 a0*b1 ... a0*bN ]\n [a1*b0 .\n [ ... .\n [aM*b0 aM*bN ]]\n\n Parameters\n ----------\n a : (M,) ndarray\n First input vector. Input is flattened if\n not already 1-dimensional.\n b : (N,) ndarray\n Second input vector. Input is flattened if\n not already 1-dimensional.\n\n Returns\n -------\n out : (M, N) ndarray\n ``out[i, j] = a[i] * b[j]``\n\n See also\n --------\n inner\n einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.\n ufunc.outer : A generalization to N dimensions and other operations.\n ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.\n\n References\n ----------\n .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd\n ed., Baltimore, MD, Johns Hopkins University Press, 1996,\n pg. 8.\n\n Examples\n --------\n Make a (*very* coarse) grid for computing a Mandelbrot set:\n\n >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))\n >>> rl\n array([[-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.]])\n \"\"\"\n return tensordot(a.flatten(), b.flatten(), 0)\n\n\n@set_module('mxnet.numpy')\ndef cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return the cross product of two (arrays of) vectors.\n\n The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular\n to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors\n are defined by the last axis of `a` and `b` by default, and these axes\n can have dimensions 2 or 3. Where the dimension of either `a` or `b` is\n 2, the third component of the input vector is assumed to be zero and the\n cross product calculated accordingly. In cases where both input vectors\n have dimension 2, the z-component of the cross product is returned.\n\n Parameters\n ----------\n a : ndarray\n Components of the first vector(s).\n b : ndarray\n Components of the second vector(s).\n axisa : int, optional\n Axis of `a` that defines the vector(s). By default, the last axis.\n axisb : int, optional\n Axis of `b` that defines the vector(s). By default, the last axis.\n axisc : int, optional\n Axis of `c` containing the cross product vector(s). Ignored if\n both input vectors have dimension 2, as the return is scalar.\n By default, the last axis.\n axis : int, optional\n If defined, the axis of `a`, `b` and `c` that defines the vector(s)\n and cross product(s). Overrides `axisa`, `axisb` and `axisc`.\n\n Returns\n -------\n c : ndarray\n Vector cross product(s).\n\n Raises\n ------\n ValueError\n When the dimension of the vector(s) in `a` and/or `b` does not\n equal 2 or 3.\n\n Notes\n -----\n Supports full broadcasting of the inputs.\n\n Examples\n --------\n Vector cross-product.\n\n >>> x = np.array([1., 2., 3.])\n >>> y = np.array([4., 5., 6.])\n >>> np.cross(x, y)\n array([-3., 6., -3.])\n\n One vector with dimension 2.\n\n >>> x = np.array([1., 2.])\n >>> y = np.array([4., 5., 6.])\n >>> np.cross(x, y)\n array([12., -6., -3.])\n\n Equivalently:\n\n >>> x = np.array([1., 2., 0.])\n >>> y = np.array([4., 5., 6.])\n >>> np.cross(x, y)\n array([12., -6., -3.])\n\n Both vectors with dimension 2.\n\n >>> x = np.array([1., 2.])\n >>> y = np.array([4., 5.])\n >>> np.cross(x, y)\n array(-3.)\n\n Multiple vector cross-products. Note that the direction of the cross\n product vector is defined by the `right-hand rule`.\n\n >>> x = np.array([[1., 2., 3.], [4., 5., 6.]])\n >>> y = np.array([[4., 5., 6.], [1., 2., 3.]])\n >>> np.cross(x, y)\n array([[-3., 6., -3.],\n [ 3., -6., 3.]])\n\n The orientation of `c` can be changed using the `axisc` keyword.\n\n >>> np.cross(x, y, axisc=0)\n array([[-3., 3.],\n [ 6., -6.],\n [-3., 3.]])\n\n Change the vector definition of `x` and `y` using `axisa` and `axisb`.\n\n >>> x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])\n >>> y = np.array([[7., 8., 9.], [4., 5., 6.], [1., 2., 3.]])\n >>> np.cross(x, y)\n array([[ -6., 12., -6.],\n [ 0., 0., 0.],\n [ 6., -12., 6.]])\n >>> np.cross(x, y, axisa=0, axisb=0)\n array([[-24., 48., -24.],\n [-30., 60., -30.],\n [-36., 72., -36.]])\n \"\"\"\n return _mx_nd_np.cross(a, b, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef kron(a, b):\n r\"\"\"Kronecker product of two arrays.\n\n Computes the Kronecker product, a composite array made of blocks of the\n second array scaled by the first.\n\n Parameters\n ----------\n a, b : ndarray\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n outer : The outer product\n\n .. note::\n The function assumes that the number of dimensions of `a` and `b`\n are the same, if necessary prepending the smallest with ones.\n If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,\n the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.\n The elements are products of elements from `a` and `b`, organized\n explicitly by::\n\n kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]\n\n where::\n\n kt = it * st + jt, t = 0,...,N\n\n In the common 2-D case (N=1), the block structure can be visualized::\n\n [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],\n [ ... ... ],\n [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]\n\n\n Examples\n --------\n >>> np.kron([1,10,100], [5,6,7])\n array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])\n >>> np.kron([5,6,7], [1,10,100])\n array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])\n \"\"\"\n return _mx_nd_np.kron(a, b)\n\n\n@set_module('mxnet.numpy')\ndef equal(x1, x2, out=None):\n \"\"\"\n Return (x1 == x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n not_equal, greater_equal, less_equal, greater, less\n Examples\n --------\n >>> np.equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.equal(1, np.ones(1))\n array([ True])\n \"\"\"\n return _mx_nd_np.equal(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef not_equal(x1, x2, out=None):\n \"\"\"\n Return (x1 != x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.not_equal(1, np.ones(1))\n array([False])\n \"\"\"\n return _mx_nd_np.not_equal(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef greater(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 > x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater(1, np.ones(1))\n array([False])\n \"\"\"\n return _mx_nd_np.greater(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef less(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 < x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.less(1, np.ones(1))\n array([False])\n \"\"\"\n return _mx_nd_np.less(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef logical_and(x1, x2, out=None):\n r\"\"\"\n Compute the truth value of x1 AND x2 element-wise.\n Parameters\n ----------\n x1, x2 : array_like\n Logical AND is applied to the elements of `x1` and `x2`.\n If ``x1.shape != x2.shape``, they must be broadcastable to a common\n shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n Returns\n -------\n y : ndarray or bool\n Boolean result of the logical AND operation applied to the elements\n of `x1` and `x2`; the shape is determined by broadcasting.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n logical_or, logical_not, logical_xor, bitwise_or\n Examples\n --------\n >>> np.logical_and(True, False)\n False\n >>> np.logical_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([False, True])\n \"\"\"\n return _mx_nd_np.logical_and(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef logical_or(x1, x2, out=None):\n r\"\"\"\n Compute the truth value of x1 OR x2 element-wise.\n Parameters\n ----------\n x1, x2 : array_like\n Logical OR is applied to the elements of `x1` and `x2`.\n If ``x1.shape != x2.shape``, they must be broadcastable to a common\n shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n Returns\n -------\n y : ndarray or bool\n Boolean result of the logical OR operation applied to the elements\n of `x1` and `x2`; the shape is determined by broadcasting.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n logical_and, logical_not, logical_xor, bitwise_or\n Examples\n --------\n >>> np.logical_or(True, False)\n True\n >>> np.logical_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([True, True])\n \"\"\"\n return _mx_nd_np.logical_or(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_binary_func\ndef logical_xor(x1, x2, out=None):\n r\"\"\"\n Compute the truth value of x1 XOR x2 element-wise.\n Parameters\n ----------\n x1, x2 : array_like\n Logical XOR is applied to the elements of `x1` and `x2`.\n If ``x1.shape != x2.shape``, they must be broadcastable to a common\n shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned. A tuple (possible only as a\n keyword argument) must have length equal to the number of outputs.\n Returns\n -------\n y : ndarray or bool\n Boolean result of the logical XOR operation applied to the elements\n of `x1` and `x2`; the shape is determined by broadcasting.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n logical_and, logical_not, logical_or, bitwise_or\n Examples\n --------\n >>> np.logical_xor(True, False)\n True\n >>> np.logical_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))\n array([ True, False])\n \"\"\"\n return _mx_nd_np.logical_xor(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef greater_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 >= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[ True, True, True],\n [ True, True, True]])\n >>> np.greater_equal(1, np.ones(1))\n array([True])\n \"\"\"\n return _mx_nd_np.greater_equal(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef less_equal(x1, x2, out=None):\n \"\"\"\n Return the truth value of (x1 <= x2) element-wise.\n Parameters\n ----------\n x1, x2 : ndarrays or scalars\n Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to\n a common shape (which becomes the shape of the output).\n out : ndarray, None, or tuple of ndarray and None, optional\n A location into which the result is stored. If provided, it must have\n a shape that the inputs broadcast to. If not provided or `None`,\n a freshly-allocated array is returned.\n Returns\n -------\n out : ndarray or scalar\n Output array of type bool, element-wise comparison of `x1` and `x2`.\n This is a scalar if both `x1` and `x2` are scalars.\n See Also\n --------\n equal, greater, greater_equal, less, less_equal\n Examples\n --------\n >>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))\n array([[False, False, False],\n [False, False, False]])\n >>> np.less_equal(1, np.ones(1))\n array([True])\n \"\"\"\n return _mx_nd_np.less_equal(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef roll(a, shift, axis=None):\n \"\"\"\n Roll array elements along a given axis.\n\n Elements that roll beyond the last position are re-introduced at\n the first.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n shift : int or tuple of ints\n The number of places by which elements are shifted. If a tuple,\n then `axis` must be a tuple of the same size, and each of the\n given axes is shifted by the corresponding number. If an int\n while `axis` is a tuple of ints, then the same value is used for\n all given axes.\n axis : int or tuple of ints, optional\n Axis or axes along which elements are shifted. By default, the\n array is flattened before shifting, after which the original\n shape is restored.\n\n Returns\n -------\n res : ndarray\n Output array, with the same shape as `a`.\n\n Notes\n -----\n Supports rolling over multiple dimensions simultaneously.\n\n Examples\n --------\n >>> x = np.arange(10)\n >>> np.roll(x, 2)\n array([8., 9., 0., 1., 2., 3., 4., 5., 6., 7.])\n >>> np.roll(x, -2)\n array([2., 3., 4., 5., 6., 7., 8., 9., 0., 1.])\n\n >>> x2 = np.reshape(x, (2,5))\n >>> x2\n array([[0., 1., 2., 3., 4.],\n [5., 6., 7., 8., 9.]])\n >>> np.roll(x2, 1)\n array([[9., 0., 1., 2., 3.],\n [4., 5., 6., 7., 8.]])\n >>> np.roll(x2, -1)\n array([[1., 2., 3., 4., 5.],\n [6., 7., 8., 9., 0.]])\n >>> np.roll(x2, 1, axis=0)\n array([[5., 6., 7., 8., 9.],\n [0., 1., 2., 3., 4.]])\n >>> np.roll(x2, -1, axis=0)\n array([[5., 6., 7., 8., 9.],\n [0., 1., 2., 3., 4.]])\n >>> np.roll(x2, 1, axis=1)\n array([[4., 0., 1., 2., 3.],\n [9., 5., 6., 7., 8.]])\n >>> np.roll(x2, -1, axis=1)\n array([[1., 2., 3., 4., 0.],\n [6., 7., 8., 9., 5.]])\n \"\"\"\n return _mx_nd_np.roll(a, shift, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef rot90(m, k=1, axes=(0, 1)):\n \"\"\"\n Rotate an array by 90 degrees in the plane specified by axes.\n Rotation direction is from the first towards the second axis.\n\n Parameters\n ----------\n m : ndarray\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes: (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n Notes\n -----\n rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))\n rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))\n\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], 'int')\n >>> m\n array([[1, 2],\n [3, 4]], dtype=int64)\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]], dtype=int64)\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]], dtype=int64)\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1., 3.],\n [0., 2.]],\n\n [[5., 7.],\n [4., 6.]]])\n \"\"\"\n return _mx_nd_np.rot90(m, k=k, axes=axes)\n\n\n@set_module('mxnet.numpy')\ndef hsplit(ary, indices_or_sections):\n \"\"\"Split an array into multiple sub-arrays horizontally (column-wise).\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int, list of ints or tuple of ints.\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n If `indices_or_sections` is a list of sorted integers, the entries\n indicate where along `axis` the array is split.\n If an index exceeds the dimension of the array along `axis`,\n it will raises errors. so index must less than or euqal to\n the dimension of the array along axis.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays.\n\n .. note::\n * If `indices_or_sections` is given as an integer, but a split\n does not result in equal division.It will raises ValueErrors.\n * If indices_or_sections is an integer, and the number is 1, it will\n raises an error. Because single output from split is not supported yet...\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, [3, 6])\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float32)]\n With a higher dimensional array the split is still along the second axis.\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[ 0., 1.],\n [ 2., 3.]],\n [[ 4., 5.],\n [ 6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[ 0., 1.]],\n [[ 4., 5.]]]),\n array([[[ 2., 3.]],\n [[ 6., 7.]]])]\n If ``ary`` has one dimension, 'axis' = 0.\n >>> x = np.arange(4)\n array([0., 1., 2., 3.])\n >>> np.hsplit(x, 2)\n [array([0., 1.]), array([2., 3.])]\n If you want to produce an empty sub-array, you can see an example.\n >>> np.hsplit(x, [2, 2])\n [array([0., 1.]), array([], dtype=float32), array([2., 3.])]\n \"\"\"\n return _mx_nd_np.hsplit(ary, indices_or_sections)\n\n\n@set_module('mxnet.numpy')\ndef einsum(*operands, **kwargs):\n r\"\"\"\n einsum(subscripts, *operands, out=None, optimize=False)\n\n Evaluates the Einstein summation convention on the operands.\n\n Using the Einstein summation convention, many common multi-dimensional,\n linear algebraic array operations can be represented in a simple fashion.\n In *implicit* mode `einsum` computes these values.\n\n In *explicit* mode, `einsum` provides further flexibility to compute\n other array operations that might not be considered classical Einstein\n summation operations, by disabling, or forcing summation over specified\n subscript labels.\n\n See the notes and examples for clarification.\n\n Parameters\n ----------\n subscripts : str\n Specifies the subscripts for summation as comma separated list of\n subscript labels. An implicit (classical Einstein summation)\n calculation is performed unless the explicit indicator '->' is\n included as well as subscript labels of the precise output form.\n operands : list of ndarray\n These are the arrays for the operation.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n optimize : {False, True}, optional\n Controls if intermediate optimization should occur. No optimization\n will occur if False. Defaults to False.\n\n Returns\n -------\n output : ndarray\n The calculation based on the Einstein summation convention.\n\n Notes\n -----\n The Einstein summation convention can be used to compute\n many multi-dimensional, linear algebraic array operations. `einsum`\n provides a succinct way of representing these.\n\n A non-exhaustive list of these operations,\n which can be computed by `einsum`, is shown below along with examples:\n\n * Trace of an array, :py:func:`np.trace`.\n * Return a diagonal, :py:func:`np.diag`.\n * Array axis summations, :py:func:`np.sum`.\n * Transpositions and permutations, :py:func:`np.transpose`.\n * Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.\n * Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.\n * Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.\n * Tensor contractions, :py:func:`np.tensordot`.\n\n The subscripts string is a comma-separated list of subscript labels,\n where each label refers to a dimension of the corresponding operand.\n Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``\n is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label\n appears only once, it is not summed, so ``np.einsum('i', a)`` produces a\n view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``\n describes traditional matrix multiplication and is equivalent to\n :py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one\n operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent\n to :py:func:`np.trace(a) <np.trace>`.\n\n In *implicit mode*, the chosen subscripts are important\n since the axes of the output are reordered alphabetically. This\n means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while\n ``np.einsum('ji', a)`` takes its transpose. Additionally,\n ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,\n ``np.einsum('ij,jh', a, b)`` returns the transpose of the\n multiplication since subscript 'h' precedes subscript 'i'.\n\n In *explicit mode* the output can be directly controlled by\n specifying output subscript labels. This requires the\n identifier '->' as well as the list of output subscript labels.\n This feature increases the flexibility of the function since\n summing can be disabled or forced when required. The call\n ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,\n and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.\n The difference is that `einsum` does not allow broadcasting by default.\n Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the\n order of the output subscript labels and therefore returns matrix\n multiplication, unlike the example above in implicit mode.\n\n To enable and control broadcasting, use an ellipsis. Default\n NumPy-style broadcasting is done by adding an ellipsis\n to the left of each term, like ``np.einsum('...ii->...i', a)``.\n To take the trace along the first and last axes,\n you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix\n product with the left-most indices instead of rightmost, one can do\n ``np.einsum('ij...,jk...->ik...', a, b)``.\n\n When there is only one operand, no axes are summed, and no output\n parameter is provided, a view into the operand is returned instead\n of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``\n produces a view.\n\n The ``optimize`` argument which will optimize the contraction order\n of an einsum expression. For a contraction with three or more operands this\n can greatly increase the computational efficiency at the cost of a larger\n memory footprint during computation.\n\n Typically a 'greedy' algorithm is applied which empirical tests have shown\n returns the optimal path in the majority of cases. 'optimal' is not supported\n for now.\n\n .. note::\n This function differs from the original `numpy.einsum\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in\n the following way(s):\n\n * Does not support 'optimal' strategy\n * Does not support the alternative subscript like\n `einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`\n * Does not produce view in any cases\n\n Examples\n --------\n >>> a = np.arange(25).reshape(5,5)\n >>> b = np.arange(5)\n >>> c = np.arange(6).reshape(2,3)\n\n Trace of a matrix:\n\n >>> np.einsum('ii', a)\n array(60.)\n\n Extract the diagonal (requires explicit form):\n\n >>> np.einsum('ii->i', a)\n array([ 0., 6., 12., 18., 24.])\n\n Sum over an axis (requires explicit form):\n\n >>> np.einsum('ij->i', a)\n array([ 10., 35., 60., 85., 110.])\n >>> np.sum(a, axis=1)\n array([ 10., 35., 60., 85., 110.])\n\n For higher dimensional arrays summing a single axis can be done with ellipsis:\n\n >>> np.einsum('...j->...', a)\n array([ 10., 35., 60., 85., 110.])\n\n Compute a matrix transpose, or reorder any number of axes:\n\n >>> np.einsum('ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.einsum('ij->ji', c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n >>> np.transpose(c)\n array([[0., 3.],\n [1., 4.],\n [2., 5.]])\n\n Vector inner products:\n\n >>> np.einsum('i,i', b, b)\n array(30.)\n\n Matrix vector multiplication:\n\n >>> np.einsum('ij,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.dot(a, b)\n array([ 30., 80., 130., 180., 230.])\n >>> np.einsum('...j,j', a, b)\n array([ 30., 80., 130., 180., 230.])\n\n Broadcasting and scalar multiplication:\n\n >>> np.einsum('..., ...', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.einsum(',ij', np.array(3), c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n >>> np.multiply(3, c)\n array([[ 0., 3., 6.],\n [ 9., 12., 15.]])\n\n Vector outer product:\n\n >>> np.einsum('i,j', np.arange(2)+1, b)\n array([[0., 1., 2., 3., 4.],\n [0., 2., 4., 6., 8.]])\n\n Tensor contraction:\n\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> np.einsum('ijk,jil->kl', a, b)\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n\n Example of ellipsis use:\n\n >>> a = np.arange(6).reshape((3,2))\n >>> b = np.arange(12).reshape((4,3))\n >>> np.einsum('ki,jk->ij', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('ki,...k->i...', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n >>> np.einsum('k...,jk', a, b)\n array([[10., 28., 46., 64.],\n [13., 40., 67., 94.]])\n\n Chained array operations. For more complicated contractions, speed ups\n might be achieved by repeatedly computing a 'greedy' path. Performance\n improvements can be particularly significant with larger arrays:\n\n >>> a = np.ones(64).reshape(2,4,8)\n # Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)\n # Greedy `einsum` (faster optimal path approximation): ~0.117ms\n >>> for iteration in range(500):\n ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)\n \"\"\"\n return _mx_nd_np.einsum(*operands, **kwargs)\n\n\n@set_module('mxnet.numpy')\ndef insert(arr, obj, values, axis=None):\n r\"\"\"Insert values along the given axis before the given indices.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n obj : int, slice or ndarray of int64\n Object that defines the index or indices before which `values` is\n inserted.\n Support for multiple insertions when `obj` is a single scalar or a\n sequence with one element (only support int32 and int64 element).\n values : ndarray\n Values to insert into `arr`.\n If the type of values is different from that of arr, values is converted\n to the type of arr.\n axis : int, optional\n Axis along which to insert `values`. If `axis` is None then `arr`\n is flattened first.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with `values` inserted. Note that `insert`\n does not occur in-place: a new array is returned. If\n `axis` is None, `out` is a flattened array.\n\n .. note::\n * Note that for higher dimensional inserts `obj=0` behaves very different\n from `obj=[0]` just like `arr[:,0,:] = values` is different from\n `arr[:,[0],:] = values`.\n * If obj is a ndarray, it's dtype only supports int64\n\n Examples\n --------\n >>> a = np.array([[1, 1], [2, 2], [3, 3]])\n >>> a\n array([[1., 1.],\n [2., 2.],\n [3., 3.]])\n >>> np.insert(a, 1, np.array(5))\n array([1., 5., 1., 2., 2., 3., 3.])\n >>> np.insert(a, 1, np.array(5), axis=1)\n array([[1., 5., 1.],\n [2., 5., 2.],\n [3., 5., 3.]])\n\n Difference between sequence and scalars:\n\n >>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)\n array([[1., 1., 1.],\n [2., 2., 2.],\n [3., 3., 3.]])\n >>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)\n array([[1., 1., 1.],\n [2., 2., 2.],\n [3., 3., 3.]])\n\n >>> b = a.flatten()\n >>> b\n array([1., 1., 2., 2., 3., 3.])\n >>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))\n array([1., 1., 5., 6., 2., 2., 3., 3.])\n\n >>> np.insert(b, slice(2, 4), np.array([5, 6]))\n array([1., 1., 5., 2., 6., 2., 3., 3.])\n\n # type casting\n >>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))\n array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)\n\n >>> x = np.arange(8).reshape(2, 4)\n >>> idx = np.array([1, 3], dtype=np.int64)\n >>> np.insert(x, idx, np.array([999]), axis=1)\n array([[ 0., 999., 1., 2., 999., 3.],\n [ 4., 999., 5., 6., 999., 7.]])\n \"\"\"\n return _mx_nd_np.insert(arr, obj, values, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef nonzero(a):\n \"\"\"\n Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of `a`,\n containing the indices of the non-zero elements in that\n dimension. The values in `a` are always returned in\n row-major, C-style order.\n\n To group the indices by element, rather than dimension, use `argwhere`,\n which returns a row for each non-zero element.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n\n Returns\n -------\n tuple_of_arrays : tuple\n Indices of elements that are non-zero.\n\n See Also\n --------\n ndarray.nonzero :\n Equivalent ndarray method.\n\n Notes\n -----\n While the nonzero values can be obtained with ``a[nonzero(a)]``, it is\n recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which\n will correctly handle 0-d arrays.\n\n Examples\n --------\n >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])\n >>> x\n array([[3, 0, 0],\n [0, 4, 0],\n [5, 6, 0]], dtype=int32)\n >>> np.nonzero(x)\n (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))\n\n >>> x[np.nonzero(x)]\n array([3, 4, 5, 6])\n >>> np.transpose(np.stack(np.nonzero(x)))\n array([[0, 0],\n [1, 1],\n [2, 0],\n [2, 1]], dtype=int64)\n\n A common use for ``nonzero`` is to find the indices of an array, where\n a condition is True. Given an array `a`, the condition `a` > 3 is a\n boolean array and since False is interpreted as 0, np.nonzero(a > 3)\n yields the indices of the `a` where the condition is true.\n\n >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)\n >>> a > 3\n array([[False, False, False],\n [ True, True, True],\n [ True, True, True]])\n >>> np.nonzero(a > 3)\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n\n Using this result to index `a` is equivalent to using the mask directly:\n\n >>> a[np.nonzero(a > 3)]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n >>> a[a > 3]\n array([4, 5, 6, 7, 8, 9], dtype=int32)\n\n ``nonzero`` can also be called as a method of the array.\n\n >>> (a > 3).nonzero()\n (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))\n \"\"\"\n return _mx_nd_np.nonzero(a)\n\n\n@set_module('mxnet.numpy')\ndef percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"\n Compute the q-th percentile of the data along the specified axis.\n Returns the q-th percentile(s) of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array\n q : array_like\n Percentile or sequence of percentiles to compute.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the percentiles are computed. The default is to\n compute the percentile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have the same\n shape and buffer length as the expected output, but the type (of the output)\n will be cast if necessary.\n overwrite_input : bool, optional (Not supported yet)\n If True, then allow the input array a to be modified by intermediate calculations,\n to save memory. In this case, the contents of the input a after this function\n completes is undefined.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use when the\n desired percentile lies between two data points i < j:\n 'linear': i + (j - i) * fraction, where fraction is the fractional part of the\n index surrounded by i and j.\n 'lower': i.\n 'higher': j.\n 'nearest': i or j, whichever is nearest.\n 'midpoint': (i + j) / 2.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result as\n dimensions with size one. With this option, the result will broadcast\n correctly against the original array a.\n\n Returns\n -------\n percentile : scalar or ndarray\n Output array.\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.percentile(a, np.array(50))\n array(3.5)\n >>> np.percentile(a, np.array(50), axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.percentile(a, np.array(50), axis=1)\n array([7., 2.])\n >>> np.percentile(a, np.array(50), axis=1, keepdims=True)\n array([[7.],\n [2.]])\n\n >>> m = np.percentile(a, np.array(50), axis=0)\n >>> out = np.zeros_like(m)\n >>> np.percentile(a, np.array(50), axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> m\n array([6.5, 4.5, 2.5])\n \"\"\"\n return _mx_nd_np.percentile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation=interpolation, keepdims=keepdims)\n\n\n@set_module('mxnet.numpy')\ndef median(a, axis=None, out=None, overwrite_input=None, keepdims=False):\n r\"\"\"Compute the median along the specified axis.\n Returns the median of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : {int, sequence of int, None}, optional\n Axis or axes along which the medians are computed. The default\n is to compute the median along a flattened version of the array.\n A sequence of axes is supported since version 1.9.0.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n Returns\n -------\n median : ndarray\n A new array holding the result. If the input contains integers\n or floats smaller than ``float32``, then the output data-type is\n ``np.float32``. Otherwise, the data-type of the output is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n\n See Also\n --------\n mean, percentile\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.median(a)\n 3.5\n >>> np.median(a, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.median(a, axis=1)\n array([7., 2.])\n \"\"\"\n return _mx_nd_np.median(a, axis=axis, overwrite_input=overwrite_input,\n keepdims=keepdims, out=out)\n\n\n@set_module('mxnet.numpy')\ndef quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments\n \"\"\"Compute the q-th quantile of the data along the specified axis.\n New in version 1.15.0.\n\n Parameters\n ----------\n a : ndarray\n Input array or object that can be converted to an array.\n q : ndarray\n Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the quantiles are computed.\n The default is to compute the quantile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result.\n It must have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to use\n when the desired quantile lies between two data points i < j:\n\n * linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.\n * lower: i.\n * higher: j.\n * nearest: i or j, whichever is nearest.\n * midpoint: (i + j) / 2.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the result as dimensions with size one.\n With this option, the result will broadcast correctly against the original array a.\n\n Returns\n -------\n quantile : ndarray\n If q is a single quantile and axis=None, then the result is a scalar.\n If multiple quantiles are given, first axis of the result corresponds to the quantiles.\n The other axes are the axes that remain after the reduction of a.\n If out is specified, that array is returned instead.\n\n See also\n --------\n mean\n\n .. note::\n Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum\n to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors\n as well as the interpolation parameter will determine the quantile if the normalized ranking\n does not match the location of q exactly. This function is the same as the median if q=0.5,\n the same as the minimum if q=0.0 and the same as the maximum if q=1.0.\n This function differs from the original `numpy.quantile\n <https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in\n the following aspects:\n\n * q must be ndarray type even if it is a scalar\n * do not support overwrite_input\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10., 7., 4.],\n [3., 2., 1.]])\n >>> q = np.array(0.5)\n >>> q\n array(0.5)\n >>> np.quantile(a, q)\n array(3.5)\n >>> np.quantile(a, q, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.quantile(a, q, axis=1)\n array([7., 2.])\n >>> np.quantile(a, q, axis=1, keepdims=True)\n array([[7.],\n [2.]])\n >>> m = np.quantile(a, q, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.quantile(a, q, axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> out\n array([6.5, 4.5, 2.5])\n \"\"\"\n return _mx_nd_np.quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation=interpolation, keepdims=keepdims)\n\n\n@set_module('mxnet.numpy')\ndef shares_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays share memory\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n may_share_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n\n .. note::\n This function differs from the original `numpy.shares_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in\n the following way(s):\n\n * Does not support `max_work`, it is a dummy argument\n * Actually it is same as `may_share_memory` in MXNet np\n \"\"\"\n return _mx_nd_np.shares_memory(a, b, max_work)\n\n\n@set_module('mxnet.numpy')\ndef may_share_memory(a, b, max_work=None):\n \"\"\"\n Determine if two arrays might share memory\n\n A return of True does not necessarily mean that the two arrays\n share any element. It just means that they *might*.\n\n Only the memory bounds of a and b are checked by default.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n shares_memory\n\n Examples\n --------\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n >>> x = np.zeros([3, 4])\n >>> np.may_share_memory(x[:,0], x[:,1])\n True\n\n .. note::\n This function differs from the original `numpy.may_share_memory\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in\n the following way(s):\n\n * Does not support `max_work`, it is a dummy argument\n * Actually it is same as `shares_memory` in MXNet np\n \"\"\"\n return _mx_nd_np.may_share_memory(a, b, max_work)\n\n\n@set_module('mxnet.numpy')\ndef diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name\n r\"\"\"\n Calculate the n-th discrete difference along the given axis.\n\n Parameters\n ----------\n a : ndarray\n Input array\n n : int, optional\n The number of times values are differenced. If zero, the input is returned as-is.\n axis : int, optional\n The axis along which the difference is taken, default is the last axis.\n prepend, append : ndarray, optional\n Not supported yet\n\n Returns\n -------\n diff : ndarray\n The n-th differences.\n The shape of the output is the same as a except along axis where the dimension is smaller by n.\n The type of the output is the same as the type of the difference between any two elements of a.\n This is the same as the type of a in most cases.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.diff(x)\n array([ 1, 2, 3, -7])\n >>> np.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> np.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> np.diff(x, axis=0)\n array([[-1, 2, 0, -2]])\n\n Notes\n -----\n Optional inputs `prepend` and `append` are not supported yet\n \"\"\"\n if (prepend or append):\n raise NotImplementedError('prepend and append options are not supported yet')\n return _mx_nd_np.diff(a, n=n, axis=axis)\n\n\n@set_module('mxnet.numpy')\ndef ediff1d(ary, to_end=None, to_begin=None):\n \"\"\"\n The differences between consecutive elements of an array.\n\n Parameters\n ----------\n ary : ndarray\n If necessary, will be flattened before the differences are taken.\n to_end : ndarray or scalar, optional\n Number(s) to append at the end of the returned differences.\n to_begin : ndarray or scalar, optional\n Number(s) to prepend at the beginning of the returned differences.\n\n Returns\n -------\n ediff1d : ndarray\n The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.ediff1d(x)\n array([ 1., 2., 3., -7.])\n\n >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))\n rray([-99., 1., 2., 3., -7., 88., 99.])\n\n The returned array is always 1D.\n\n >>> y = np.array([[1, 2, 4], [1, 6, 24]])\n >>> np.ediff1d(y)\n array([ 1., 2., -3., 5., 18.])\n\n >>> np.ediff1d(x, to_begin=y)\n array([ 1., 2., 4., 1., 6., 24., 1., 2., 3., -7.])\n \"\"\"\n return _mx_nd_np.ediff1d(ary, to_end=to_end, to_begin=to_begin)\n\n\n@set_module('mxnet.numpy')\ndef resize(a, new_shape):\n \"\"\"\n Return a new array with the specified shape.\n If the new array is larger than the original array, then the new\n array is filled with repeated copies of `a`. Note that this behavior\n is different from a.resize(new_shape) which fills with zeros instead\n of repeated copies of `a`.\n\n Parameters\n ----------\n a : ndarray\n Array to be resized.\n new_shape : int or tuple of int\n Shape of resized array.\n\n Returns\n -------\n reshaped_array : ndarray\n The new array is formed from the data in the old array, repeated\n if necessary to fill out the required number of elements. The\n data are repeated in the order that they are stored in memory.\n\n See Also\n --------\n ndarray.resize : resize an array in-place.\n\n Notes\n -----\n Warning: This functionality does **not** consider axes separately,\n i.e. it does not apply interpolation/extrapolation.\n It fills the return array with the required number of elements, taken\n from `a` as they are laid out in memory, disregarding strides and axes.\n (This is in case the new shape is smaller. For larger, see above.)\n This functionality is therefore not suitable to resize images,\n or data where each axis represents a separate and distinct entity.\n\n Examples\n --------\n >>> a = np.array([[0, 1], [2, 3]])\n >>> np.resize(a, (2, 3))\n array([[0., 1., 2.],\n [3., 0., 1.]])\n >>> np.resize(a, (1, 4))\n array([[0., 1., 2., 3.]])\n >>> np.resize(a,(2, 4))\n array([[0., 1., 2., 3.],\n [0., 1., 2., 3.]])\n \"\"\"\n return _mx_nd_np.resize(a, new_shape)\n\n\n@set_module('mxnet.numpy')\ndef interp(x, xp, fp, left=None, right=None, period=None): # pylint: disable=too-many-arguments\n r\"\"\"One-dimensional linear interpolation.\n\n Returns the one-dimensional piecewise linear interpolant to a function\n with given values at discrete data-points.\n\n Parameters\n ----------\n x : ndarray\n The x-coordinates of the interpolated values.\n xp : 1-D array of floats\n The x-coordinates of the data points, must be increasing if argument\n `period` is not specified. Otherwise, `xp` is internally sorted after\n normalizing the periodic boundaries with ``xp = xp % period``.\n fp : 1-D array of floats\n The y-coordinates of the data points, same length as `xp`.\n left : optional float corresponding to fp\n Value to return for `x < xp[0]`, default is `fp[0]`.\n right : optional float corresponding to fp\n Value to return for `x > xp[-1]`, default is `fp[-1]`.\n period : None or float, optional\n A period for the x-coordinates. This parameter allows the proper\n interpolation of angular x-coordinates. Parameters `left` and `right`\n are ignored if `period` is specified.\n\n Returns\n -------\n y : float (corresponding to fp) or ndarray\n The interpolated values, same shape as `x`.\n\n Raises\n ------\n ValueError\n If `xp` and `fp` have different length\n If `xp` or `fp` are not 1-D sequences\n If `period == 0`\n\n .. note::\n Does not check that the x-coordinate sequence `xp` is increasing.\n If `xp` is not increasing, the results are nonsense.\n A simple check for increasing is::\n\n np.all(np.diff(xp) > 0)\n\n\n Examples\n --------\n >>> xp = [1, 2, 3]\n >>> fp = [3, 2, 0]\n >>> np.interp(2.5, xp, fp)\n 1.0\n >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)\n array([ 3. , 3. , 2.5 , 0.56, 0. ])\n >>> UNDEF = -99.0\n >>> np.interp(3.14, xp, fp, right=UNDEF)\n -99.0\n Plot an interpolant to the sine function:\n >>> x = np.linspace(0, 2*np.pi, 10)\n >>> y = np.sin(x)\n >>> xvals = np.linspace(0, 2*np.pi, 50)\n >>> yinterp = np.interp(xvals, x, y)\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(xvals, yinterp, '-x')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.show()\n Interpolation with periodic x-coordinates:\n >>> x = [-180, -170, -185, 185, -10, -5, 0, 365]\n >>> xp = [190, -190, 350, -350]\n >>> fp = [5, 10, 3, 4]\n >>> np.interp(x, xp, fp, period=360)\n array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])\n \"\"\"\n return _mx_nd_np.interp(x, xp, fp, left=left, right=right, period=period)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef full_like(a, fill_value, dtype=None, order='C', device=None, out=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return a full array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n fill_value : scalar\n Fill value.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the same shape and type as `a`.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> x = np.arange(6, dtype=int)\n >>> np.full_like(x, 1)\n array([1, 1, 1, 1, 1, 1], dtype=int64)\n >>> np.full_like(x, 0.1)\n array([0, 0, 0, 0, 0, 0], dtype=int64)\n >>> np.full_like(x, 0.1, dtype=np.float64)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)\n >>> np.full_like(x, np.nan, dtype=np.float64)\n array([nan, nan, nan, nan, nan, nan], dtype=float64)\n >>> y = np.arange(6, dtype=np.float32)\n >>> np.full_like(y, 0.1)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\n \"\"\"\n return _mx_nd_np.full_like(a, fill_value=fill_value, dtype=dtype, order=order, device=device, out=out)\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef zeros_like(a, dtype=None, order='C', device=None, out=None):\n \"\"\"\n Return an array of zeros with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of zeros with the same shape and type as a.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> np.zeros_like(x)\n array([[0., 0., 0.],\n [0., 0., 0.]])\n >>> np.zeros_like(x, int)\n array([[0, 0, 0],\n [0, 0, 0]], dtype=int64)\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.], dtype=float64)\n >>> np.zeros_like(y)\n array([0., 0., 0.], dtype=float64)\n \"\"\"\n return _mx_nd_np.full_like(a, fill_value=0, dtype=dtype, order=order, device=device, out=out)\n# pylint: enable=redefined-outer-name\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef ones_like(a, dtype=None, order='C', device=None, out=None):\n \"\"\"\n Return an array of ones with the same shape and type as a given array.\n\n Parameters\n ----------\n a : ndarray\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n Temporarily do not support boolean type.\n order : {'C'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. Currently only supports C order.\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Array of ones with the same shape and type as a.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n ones : Return a new array setting values to one.\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0., 1., 2.],\n [3., 4., 5.]])\n >>> np.ones_like(x)\n array([[1., 1., 1.],\n [1., 1., 1.]])\n >>> np.ones_like(x, int)\n array([[1, 1, 1],\n [1, 1, 1]], dtype=int64)\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.], dtype=float64)\n >>> np.ones_like(y)\n array([1., 1., 1.], dtype=float64)\n \"\"\"\n return _mx_nd_np.full_like(a, fill_value=1, dtype=dtype, order=order, device=device, out=out)\n# pylint: enable=redefined-outer-name\n\n\n@set_module('mxnet.numpy')\ndef fill_diagonal(a, val, wrap=False):\n \"\"\"\n Fill the main diagonal of the given array of any dimensionality.\n For an array `a` with ``a.ndim >= 2``, the diagonal is the list of\n locations with indices ``a[i, ..., i]`` all identical. This function\n modifies the input array in-place, it does not return a value.\n Parameters\n ----------\n a : array, at least 2-D.\n Array whose diagonal is to be filled, it gets modified in-place.\n val : scalar\n Value to be written on the diagonal, its type must be compatible with\n that of the array a.\n wrap : bool\n For tall matrices in NumPy version up to 1.6.2, the\n diagonal \"wrapped\" after N columns. You can have this behavior\n with this option. This affects only tall matrices.\n\n Examples\n --------\n >>> a = np.zeros((3, 3), int)\n >>> np.fill_diagonal(a, 5)\n >>> a\n array([[5, 0, 0],\n [0, 5, 0],\n [0, 0, 5]])\n The same function can operate on a 4-D array:\n >>> a = np.zeros((3, 3, 3, 3), int)\n >>> np.fill_diagonal(a, 4)\n We only show a few blocks for clarity:\n >>> a[0, 0]\n array([[4, 0, 0],\n [0, 0, 0],\n [0, 0, 0]])\n >>> a[1, 1]\n array([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 0]])\n >>> a[2, 2]\n array([[0, 0, 0],\n [0, 0, 0],\n [0, 0, 4]])\n The wrap option affects only tall matrices:\n >>> # tall matrices no wrap\n >>> a = np.zeros((5, 3), int)\n >>> np.fill_diagonal(a, 4)\n >>> a\n array([[4, 0, 0],\n [0, 4, 0],\n [0, 0, 4],\n [0, 0, 0],\n [0, 0, 0]])\n >>> # tall matrices wrap\n >>> a = np.zeros((5, 3), int)\n >>> np.fill_diagonal(a, 4, wrap=True)\n >>> a\n array([[4, 0, 0],\n [0, 4, 0],\n [0, 0, 4],\n [0, 0, 0],\n [4, 0, 0]])\n >>> # wide matrices\n >>> a = np.zeros((3, 5), int)\n >>> np.fill_diagonal(a, 4, wrap=True)\n >>> a\n array([[4, 0, 0, 0, 0],\n [0, 4, 0, 0, 0],\n [0, 0, 4, 0, 0]])\n The anti-diagonal can be filled by reversing the order of elements\n using either `numpy.flipud` or `numpy.fliplr`.\n >>> a = np.zeros((3, 3), int);\n >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip\n >>> a\n array([[0, 0, 1],\n [0, 2, 0],\n [3, 0, 0]])\n >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip\n >>> a\n array([[0, 0, 3],\n [0, 2, 0],\n [1, 0, 0]])\n Note that the order in which the diagonal is filled varies depending\n on the flip function.\n \"\"\"\n _mx_nd_np.fill_diagonal(a, val=val, wrap=wrap)\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):\n \"\"\"\n Replace NaN with zero and infinity with large finite numbers (default\n behaviour) or with the numbers defined by the user using the `nan`,\n `posinf` and/or `neginf` keywords.\n\n If `x` is inexact, NaN is replaced by zero or by the user defined value in\n `nan` keyword, infinity is replaced by the largest finite floating point\n values representable by ``x.dtype`` or by the user defined value in\n `posinf` keyword and -infinity is replaced by the most negative finite\n floating point values representable by ``x.dtype`` or by the user defined\n value in `neginf` keyword.\n\n For complex dtypes, the above is applied to each of the real and\n imaginary components of `x` separately.\n\n If `x` is not inexact, then no replacements are made.\n\n Parameters\n ----------\n x : scalar\n ndarray\n Input data.\n copy : bool, optional\n Whether to create a copy of `x` (True) or to replace values\n in-place (False). The in-place operation only occurs if\n casting to an array does not require a copy.\n Default is True.\n Gluon does not support copy = False.\n nan : int, float, optional\n Value to be used to fill NaN values. If no value is passed\n then NaN values will be replaced with 0.0.\n posinf : int, float, optional\n Value to be used to fill positive infinity values. If no value is\n passed then positive infinity values will be replaced with a very\n large number.\n neginf : int, float, optional\n Value to be used to fill negative infinity values. If no value is\n passed then negative infinity values will be replaced with a very\n small (or negative) number.\n\n .. versionadded:: 1.13\n\n Returns\n -------\n out : ndarray\n `x`, with the non-finite values replaced. If `copy` is False, this may\n be `x` itself.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754). This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.nan_to_num(np.inf)\n 1.7976931348623157e+308\n >>> np.nan_to_num(-np.inf)\n -1.7976931348623157e+308\n >>> np.nan_to_num(np.nan)\n 0.0\n >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])\n >>> np.nan_to_num(x)\n array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,\n 1.2800000e+02])\n >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)\n array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,\n 1.2800000e+02])\n >>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype=\"float64\")/0\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y)\n array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],\n [ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)\n >>> np.nan_to_num(y, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-inf, nan, inf],\n [ inf, inf, -inf]], dtype=float64)\n >>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n >>> y\n array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],\n [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)\n \"\"\"\n return _mx_nd_np.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)\n\n\n@set_module('mxnet.numpy')\ndef squeeze(x, axis=None):\n r\"\"\"Remove single-dimensional entries from the shape of an array.\n\n Parameters\n ----------\n a : array_like\n Input data.\n axis : None or int or tuple of ints, optional\n Selects a subset of the single-dimensional entries in the\n shape. If an axis is selected with shape entry greater than\n one, an error is raised.\n\n Returns\n -------\n squeezed : ndarray\n The input array, but with all or a subset of the\n dimensions of length 1 removed. This is always `a` itself\n or a view into `a`.\n\n Raises\n ------\n ValueError\n If `axis` is not `None`, and an axis being squeezed is not of length 1\n\n See Also\n --------\n expand_dims : The inverse operation, adding singleton dimensions\n reshape : Insert, remove, and combine dimensions, and resize existing ones\n\n Examples\n --------\n >>> x = np.array([[[0], [1], [2]]])\n >>> x.shape\n (1, 3, 1)\n >>> np.squeeze(x).shape\n (3,)\n >>> np.squeeze(x, axis=0).shape\n (3, 1)\n >>> np.squeeze(x, axis=1).shape\n Traceback (most recent call last):\n ...\n ValueError: cannot select an axis to squeeze out which has size not equal to one\n >>> np.squeeze(x, axis=2).shape\n (1, 3)\n \"\"\"\n return _mx_nd_np.squeeze(x, axis=axis)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef isnan(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for NaN and return result as a boolean array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is NaN, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n\n .. note::\n\n This function differs from the original `numpy.isinf\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in\n the following aspects:\n\n * Does not support complex number for now\n * Input type does not support Python native iterables(list, tuple, ...).\n * ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be\n the same as the expected output.\n * ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the\n same as the expected output.\n * ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> np.isnan(np.nan)\n True\n >>> np.isnan(np.inf)\n False\n >>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))\n array([ True, False, False])\n \"\"\"\n return _mx_nd_np.isnan(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef isinf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for positive or negative infinity.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is positive or negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n .. note::\n\n This function differs from the original `numpy.isnan\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in\n the following aspects:\n\n * Does not support complex number for now\n * Input type does not support Python native iterables(list, tuple, ...).\n * ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be\n the same as the expected output.\n * ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the\n same as the expected output.\n * ``out`` param does not support scalar input case.\n\n Examples\n --------\n >>> np.isinf(np.inf)\n True\n >>> np.isinf(np.nan)\n False\n >>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))\n array([ True, True, False, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool_)\n >>> np.isinf(x, y)\n array([ True, False, True])\n >>> y\n array([ True, False, True])\n \"\"\"\n return _mx_nd_np.isinf(x, out=out, **kwargs)\n\n\n@set_module('mxnet.ndarray.numpy')\n@wrap_np_unary_func\ndef isposinf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for positive infinity, return result as bool array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is positive infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.isposinf(np.inf)\n True\n >>> np.isposinf(-np.inf)\n False\n >>> np.isposinf(np.nan)\n False\n >>> np.isposinf(np.array([-np.inf, 0., np.inf]))\n array([False, False, True])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isposinf(x, y)\n array([False, False, True])\n >>> y\n array([False, False, True])\n \"\"\"\n return _mx_nd_np.isposinf(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef isneginf(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for negative infinity, return result as bool array.\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> np.isneginf(-np.inf)\n True\n >>> np.isneginf(np.inf)\n False\n >>> np.isneginf(float('-inf'))\n True\n >>> np.isneginf(np.array([-np.inf, 0., np.inf]))\n array([ True, False, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isneginf(x, y)\n array([ True, False, False])\n >>> y\n array([ True, False, False])\n \"\"\"\n return _mx_nd_np.isneginf(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\n@wrap_np_unary_func\ndef isfinite(x, out=None, **kwargs):\n \"\"\"\n Test element-wise for finiteness (not infinity or not Not a Number).\n\n Parameters\n ----------\n x : ndarray\n Input array.\n out : ndarray or None, optional\n A location into which the result is stored.\n If provided, it must have the same shape and dtype as input ndarray.\n If not provided or `None`, a freshly-allocated array is returned.\n\n Returns\n -------\n y : ndarray or bool\n True where x is negative infinity, false otherwise.\n This is a scalar if x is a scalar.\n\n Notes\n -----\n Not a Number, positive infinity and negative infinity are considered to be non-finite.\n\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).\n This means that Not a Number is not equivalent to infinity.\n Also that positive infinity is not equivalent to negative infinity.\n But infinity is equivalent to positive infinity. Errors result if the second argument\n is also supplied when x is a scalar input, or if first and second arguments have different shapes.\n\n Examples\n --------\n >>> np.isfinite(1)\n True\n >>> np.isfinite(0)\n True\n >>> np.isfinite(np.nan)\n False\n >>> np.isfinite(np.inf)\n False\n >>> np.isfinite(-np.inf)\n False\n >>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))\n array([False, True, False])\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([True, True, True], dtype=np.bool)\n >>> np.isfinite(x, y)\n array([False, True, False])\n >>> y\n array([False, True, False])\n \"\"\"\n return _mx_nd_np.isfinite(x, out=out, **kwargs)\n\n\n@set_module('mxnet.numpy')\ndef where(condition, x=None, y=None):\n \"\"\"where(condition, [x, y])\n Return elements chosen from `x` or `y` depending on `condition`.\n\n .. note::\n When only `condition` is provided, this function is a shorthand for\n ``np.asarray(condition).nonzero()``. The rest of this documentation\n covers only the case where all three arguments are provided.\n\n Parameters\n ----------\n condition : ndarray\n Where True, yield `x`, otherwise yield `y`.\n x, y : ndarray\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape. `x` and `y` must have the same dtype.\n\n Returns\n -------\n out : ndarray\n An array with elements from `x` where `condition` is True, and elements\n from `y` elsewhere.\n\n Notes\n -----\n If all the arrays are 1-D, `where` is equivalent to::\n\n [xv if c else yv\n for c, xv, yv in zip(condition, x, y)]\n\n Examples\n --------\n >>> a = np.arange(10)\n >>> a\n array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\n >>> np.where(a < 5, a, 10*a)\n array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])\n\n This can be used on multidimensional arrays too:\n\n >>> cond = np.array([[True, False], [True, True]])\n >>> x = np.array([[1, 2], [3, 4]])\n >>> y = np.array([[9, 8], [7, 6]])\n >>> np.where(cond, x, y)\n array([[1., 8.],\n [3., 4.]])\n\n The shapes of x, y, and the condition are broadcast together:\n\n >>> x, y = onp.ogrid[:3, :4]\n >>> x = np.array(x)\n >>> y = np.array(y)\n >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast\n array([[10, 0, 0, 0],\n [10, 11, 1, 1],\n [10, 11, 12, 2]], dtype=int64)\n\n >>> a = np.array([[0, 1, 2],\n ... [0, 2, 4],\n ... [0, 3, 6]])\n >>> np.where(a < 4, a, -1) # -1 is broadcast\n array([[ 0., 1., 2.],\n [ 0., 2., -1.],\n [ 0., 3., -1.]])\n \"\"\"\n return _mx_nd_np.where(condition, x, y)\n\n\n@set_module('mxnet.numpy')\ndef polyval(p, x):\n \"\"\"\n Evaluate a polynomial at specific values.\n If p is of length N, this function returns the value:\n p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]\n If x is a sequence, then p(x) is returned for each element of x.\n If x is another polynomial then the composite polynomial p(x(t)) is returned.\n\n Parameters\n ----------\n p : ndarray\n 1D array of polynomial coefficients (including coefficients equal to zero)\n from highest degree to the constant term.\n x : ndarray\n An array of numbers, at which to evaluate p.\n\n Returns\n -------\n values : ndarray\n Result array of polynomials\n\n .. note::\n This function differs from the original `numpy.polyval\n <https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in\n the following way(s):\n\n * Does not support poly1d.\n * X should be ndarray type even if it contains only one element.\n\n Examples\n --------\n >>> p = np.array([3, 0, 1])\n array([3., 0., 1.])\n >>> x = np.array([5])\n array([5.])\n >>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1\n array([76.])\n >>> x = np.array([5, 4])\n array([5., 4.])\n >>> np.polyval(p, x)\n array([76., 49.])\n \"\"\"\n return _mx_nd_np.polyval(p, x)\n\n\n@set_module('mxnet.numpy')\ndef bincount(x, weights=None, minlength=0):\n \"\"\"\n Count number of occurrences of each value in array of non-negative ints.\n\n Parameters\n ----------\n x : ndarray\n input array, 1 dimension, nonnegative ints.\n weights: ndarray\n input weigths same shape as x. (Optional)\n minlength: int\n A minimum number of bins for the output. (Optional)\n\n Returns\n --------\n out : ndarray\n the result of binning the input array. The length of out is equal to amax(x)+1.\n\n Raises\n --------\n Value Error\n If the input is not 1-dimensional, or contains elements with negative values,\n or if minlength is negative\n TypeError\n If the type of the input is float or complex.\n\n Examples\n --------\n >>> np.bincount(np.arange(5))\n array([1, 1, 1, 1, 1])\n >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))\n array([1, 3, 1, 1, 0, 0, 0, 1])\n\n >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])\n >>> np.bincount(x).size == np.amax(x)+1\n True\n\n >>> np.bincount(np.arange(5, dtype=float))\n Traceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n TypeError: array cannot be safely cast to required type\n\n >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights\n >>> x = np.array([0, 1, 1, 2, 2, 2])\n >>> np.bincount(x, weights=w)\n array([ 0.3, 0.7, 1.1])\n \"\"\"\n return _mx_nd_np.bincount(x, weights=weights, minlength=minlength)\n\n\n@set_module('mxnet.numpy')\ndef atleast_1d(*arys):\n \"\"\"\n Convert inputs to arrays with at least one dimension.\n\n Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved.\n\n Parameters\n ----------\n arys1, arys2, ... : ndarray\n One or more input arrays.\n\n Returns\n -------\n ret : ndarray\n An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary.\n\n See also\n --------\n atleast_2d, atleast_3d\n\n Examples\n --------\n >>> np.atleast_1d(1.0)\n array([1.])\n >>> x = np.arange(9.0).reshape(3,3)\n >>> np.atleast_1d(x)\n array([[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]])\n >>> np.atleast_1d(np.array(1), np.array([3, 4]))\n [array([1.]), array([3., 4.])]\n \"\"\"\n res = []\n for ary in arys:\n if not isinstance(ary, NDArray):\n ary = array(ary)\n res.append(ary)\n return _mx_nd_np.atleast_1d(*res)\n\n\n@set_module('mxnet.numpy')\ndef atleast_2d(*arys):\n \"\"\"\n Convert inputs to arrays with at least two dimensions.\n\n Parameters\n ----------\n arys1, arys2, ... : ndarray\n One or more input arrays.\n\n Returns\n -------\n ret : ndarray\n An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary.\n\n See also\n --------\n atleast_1d, atleast_3d\n\n Examples\n --------\n >>> np.atleast_2d(3.0)\n array([[3.]])\n >>> x = np.arange(3.0)\n >>> np.atleast_2d(x)\n array([[0., 1., 2.]])\n >>> np.atleast_2d(np.array(1), np.array([1, 2]), np.array([[1, 2]]))\n [array([[1.]]), array([[1., 2.]]), array([[1., 2.]])]\n \"\"\"\n res = []\n for ary in arys:\n if not isinstance(ary, NDArray):\n ary = array(ary)\n res.append(ary)\n return _mx_nd_np.atleast_2d(*res)\n\n\n@set_module('mxnet.numpy')\ndef atleast_3d(*arys):\n \"\"\"\n Convert inputs to arrays with at least three dimension.\n\n Parameters\n ----------\n arys1, arys2, ... : ndarray\n One or more input arrays.\n\n Returns\n -------\n ret : ndarray\n An array, or list of arrays, each with a.ndim >= 3.\n For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1),\n and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).\n\n See also\n --------\n atleast_1d, atleast_2d\n\n Examples\n --------\n >>> np.atleast_3d(3.0)\n array([[[3.]]])\n >>> x = np.arange(3.0)\n >>> np.atleast_3d(x).shape\n (1, 3, 1)\n >>> x = np.arange(12.0).reshape(4,3)\n >>> np.atleast_3d(x).shape\n (4, 3, 1)\n >>> for arr in np.atleast_3d(np.array([1, 2]), np.array([[1, 2]]), np.array([[[1, 2]]])):\n ... print(arr, arr.shape)\n ...\n [[[1.]\n [2.]]] (1, 2, 1)\n [[[1.]\n [2.]]] (1, 2, 1)\n [[[1. 2.]]] (1, 1, 2)\n \"\"\"\n res = []\n for ary in arys:\n if not isinstance(ary, NDArray):\n ary = array(ary)\n res.append(ary)\n return _mx_nd_np.atleast_3d(*res)\n\n\n@set_module('mxnet.numpy')\ndef pad(x, pad_width=None, mode=\"constant\", **kwargs): # pylint: disable=too-many-arguments\n # pylint: disable=too-many-return-statements\n \"\"\"\n Pad an array.\n\n Parameters\n ----------\n array : array_like of rank N\n The array to pad.\n pad_width : {sequence, array_like, int}\n Number of values padded to the edges of each axis.\n ((before_1, after_1), ... (before_N, after_N)) unique pad widths\n for each axis.\n ((before, after),) yields same before and after pad for each axis.\n (pad,) or int is a shortcut for before = after = pad width for all\n axes.\n mode : str or function, optional\n One of the following string values or a user supplied function.\n 'constant' (default)\n Pads with a constant value.\n 'edge'\n Pads with the edge values of array.\n 'linear_ramp'\n not supported yet\n 'maximum'\n Pads with the maximum value of all of the\n vector along each axis.\n 'mean'\n not supported yet\n 'median'\n not supported yet\n 'minimum'\n Pads with the minimum value of all of the\n vector along each axis.\n 'reflect'\n Pads with the reflection of the vector mirrored on\n the first and last values of the vector along each\n axis.\n 'symmetric'\n Pads with the reflection of the vector mirrored\n along the edge of the array.\n 'wrap'\n not supported yet.\n 'empty'\n not supported yet.\n <function>\n not supported yet.\n stat_length : not supported yet\n constant_values : scalar, optional\n Used in 'constant'. The values to set the padded values for each\n axis.\n Default is 0.\n\n end_values : not supported yet\n reflect_type : {'even', 'odd'}, optional\n only support even now\n\n Returns\n -------\n pad : ndarray\n Padded array of rank equal to `array` with shape increased\n according to `pad_width`.\n\n Examples\n --------\n >>> a = [1, 2, 3, 4, 5]\n >>> np.pad(a, (2, 3), 'edge')\n array([1, 1, 1, ..., 5, 5, 5])\n >>> np.pad(a, (2, 2), 'maximum')\n array([5, 5, 1, 2, 3, 4, 5, 5, 5])\n >>> np.pad(a, (2, 2), 'mean')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n >>> a = [[1, 2], [3, 4]]\n >>> np.pad(a, ((3, 2), (2, 3)), 'minimum')\n array([[1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [3, 3, 3, 4, 3, 3, 3],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1]])\n >>> a = [1, 2, 3, 4, 5]\n >>> np.pad(a, (2, 3), 'reflect')\n array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])\n >>> np.pad(a, (2, 3), 'symmetric')\n array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])\n >>> a = np.arange(6)\n >>> a = a.reshape((2, 3))\n >>> np.pad(a, ((2, 2), (2, 2)), pad_with)\n array([[10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 0, 1, 2, 10, 10],\n [10, 10, 3, 4, 5, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10]])\n \"\"\"\n return _mx_nd_np.pad(x, pad_width=pad_width, mode=mode, **kwargs)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef prod(a, axis=None, dtype=None, out=None, keepdims=False, initial=None): # pylint: disable=too-many-arguments\n \"\"\"\n Return the product of array elements over a given axis.\n\n Parameters\n ----------\n a : array_like\n Input data.\n axis : None or int or tuple of ints, optional\n Axis or axes along which a product is performed. The default,\n axis=None, will calculate the product of all the elements in the\n input array. If axis is negative it counts from the last to the\n first axis.\n .. versionadded:: 1.7.0\n If axis is a tuple of ints, a product is performed on all of the\n axes specified in the tuple instead of a single axis or all the\n axes as before.\n dtype : dtype, optional\n The type of the returned array, as well as of the accumulator in\n which the elements are multiplied. The dtype of `a` is used by\n default unless `a` has an integer dtype of less precision than the\n default platform integer. In that case, if `a` is signed then the\n platform integer is used while if `a` is unsigned then an unsigned\n integer of the same precision as the platform integer is used.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output, but the type of the output\n values will be cast if necessary.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in the\n result as dimensions with size one. With this option, the result\n will broadcast correctly against the input array.\n If the default value is passed, then `keepdims` will not be\n passed through to the `prod` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-class' method does not implement `keepdims` any\n exceptions will be raised.\n initial : scalar, optional\n The starting value for this product. See `~numpy.ufunc.reduce` for details.\n where : not supported\n\n Returns\n -------\n product_along_axis : ndarray, see `dtype` parameter above.\n An array shaped as `a` but with the specified axis removed.\n Returns a reference to `out` if specified.\n\n Examples\n --------\n By default, calculate the product of all elements:\n >>> np.prod([1.,2.])\n 2.0\n Even when the input array is two-dimensional:\n >>> np.prod([[1.,2.],[3.,4.]])\n 24.0\n But we can also specify the axis over which to multiply:\n >>> np.prod([[1.,2.],[3.,4.]], axis=1)\n array([ 2., 12.])\n Or select specific elements to include:\n >>> np.prod([1., np.nan, 3.], where=[True, False, True])\n 3.0\n If the type of `x` is unsigned, then the output type is\n the unsigned platform integer:\n >>> x = np.array([1, 2, 3], dtype=np.uint8)\n >>> np.prod(x).dtype == np.uint\n True\n If `x` is of a signed integer type, then the output type\n is the default platform integer:\n >>> x = np.array([1, 2, 3], dtype=np.int8)\n >>> np.prod(x).dtype == int\n True\n You can also start the product with a value other than one:\n >>> np.prod([1, 2], initial=5)\n 10\n \"\"\"\n return _mx_nd_np.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, out=out)\n\n@set_module('mxnet.numpy')\ndef dot(a, b, out=None):\n \"\"\"\n Dot product of two arrays. Specifically,\n\n * If both `a` and `b` are 1-D arrays, it is inner product of vectors\n\n * If both `a` and `b` are 2-D arrays, it is matrix multiplication,\n\n * If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`\n and using ``np.multiply(a, b)`` or ``a * b`` is preferred.\n\n * If `a` is an N-D array and `b` is a 1-D array, it is a sum product over\n the last axis of `a` and `b`.\n\n * If `a` is an N-D array and `b` is a 2-D array, it is a\n sum product over the last axis of `a` and the second-to-last axis of `b`::\n\n dot(a, b)[i,j,k] = sum(a[i,j,:] * b[:,k])\n\n Parameters\n ----------\n a : ndarray\n First argument.\n b : ndarray\n Second argument.\n\n out : ndarray, optional\n Output argument. It must have the same shape and type as the expected output.\n\n Returns\n -------\n output : ndarray\n Returns the dot product of `a` and `b`. If `a` and `b` are both\n scalars or both 1-D arrays then a scalar is returned; otherwise\n an array is returned.\n If `out` is given, then it is returned\n\n Examples\n --------\n >>> a = np.array(3)\n >>> b = np.array(4)\n >>> np.dot(a, b)\n array(12.)\n\n For 2-D arrays it is the matrix product:\n\n >>> a = np.array([[1, 0], [0, 1]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.dot(a, b)\n array([[4., 1.],\n [2., 2.]])\n\n >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))\n >>> b = np.arange(5*6)[::-1].reshape((6,5))\n >>> np.dot(a, b)[2,3,2,2]\n array(29884.)\n >>> np.sum(a[2,3,2,:] * b[:,2])\n array(29884.)\n \"\"\"\n return _mx_nd_np.dot(a, b, out=out)\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef cumsum(a, axis=None, dtype=None, out=None):\n \"\"\"\n Return the cumulative sum of the elements along a given axis.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int, optional\n Axis along which the cumulative sum is computed. The default\n (None) is to compute the cumsum over the flattened array.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If `dtype` is not specified, it defaults\n to the dtype of `a`, unless `a` has an integer dtype with a\n precision less than that of the default platform integer. In\n that case, the default platform integer is used.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output\n but the type will be cast if necessary. See `doc.ufuncs`\n (Section \"Output arguments\") for more details.\n\n Returns\n -------\n cumsum_along_axis : ndarray.\n A new array holding the result is returned unless `out` is\n specified, in which case a reference to `out` is returned. The\n result has the same size as `a`, and the same shape as `a` if\n `axis` is not None or `a` is a 1-d array.\n\n Examples\n --------\n >>> a = np.array([[1,2,3], [4,5,6]])\n >>> a\n array([[1, 2, 3],\n [4, 5, 6]])\n >>> np.cumsum(a)\n array([ 1, 3, 6, 10, 15, 21])\n >>> np.cumsum(a, dtype=float) # specifies type of output value(s)\n array([ 1., 3., 6., 10., 15., 21.])\n >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns\n array([[1, 2, 3],\n [5, 7, 9]])\n >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows\n array([[ 1, 3, 6],\n [ 4, 9, 15]])\n \"\"\"\n return _mx_nd_np.cumsum(a, axis=axis, dtype=dtype, out=out)\n\n@set_module('mxnet.numpy')\ndef reshape(a, newshape, order='C'):\n \"\"\"\n Gives a new shape to an array without changing its data.\n This function always returns a copy of the input array if\n ``out`` is not provided.\n\n Parameters\n ----------\n a : ndarray\n Array to be reshaped.\n\n newshape : int or tuple of ints\n The new shape should be compatible with the original shape. If\n an integer, then the result will be a 1-D array of that length.\n One shape dimension can be -1. In this case, the value is\n inferred from the length of the array and remaining dimensions.\n\n order : {'C'}, optional\n Read the elements of `a` using this index order, and place the\n elements into the reshaped array using this index order. 'C'\n means to read / write the elements using C-like index order,\n with the last axis index changing fastest, back to the first\n axis index changing slowest. Other order types such as 'F'/'A'\n may be added in the future.\n\n Returns\n -------\n reshaped_array : ndarray\n It will be always a copy of the original array. This behavior is different\n from the official NumPy ``reshape`` operator where views of the original array may be\n generated.\n\n See Also\n --------\n ndarray.reshape : Equivalent method.\n\n Examples\n --------\n >>> a = np.arange(6).reshape((3, 2))\n >>> a\n array([[0., 1.],\n [2., 3.],\n [4., 5.]])\n\n >>> np.reshape(a, (2, 3)) # C-like index ordering\n array([[0., 1., 2.],\n [3., 4., 5.]])\n\n >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape\n array([[0., 1., 2.],\n [3., 4., 5.]])\n\n >>> a = np.array([[1,2,3], [4,5,6]])\n >>> np.reshape(a, 6)\n array([1., 2., 3., 4., 5., 6.])\n\n >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2\n array([[1., 2.],\n [3., 4.],\n [5., 6.]])\n \"\"\"\n return _mx_nd_np.reshape(a, newshape, order)\n\n@set_module('mxnet.numpy')\ndef moveaxis(a, source, destination):\n \"\"\"Move axes of an array to new positions.\n Other axes remain in their original order.\n\n Parameters\n ----------\n a : ndarray\n The array whose axes should be reordered.\n source : int or sequence of int\n Original positions of the axes to move. These must be unique.\n destination : int or sequence of int\n Destination positions for each of the original axes. These must also be\n unique.\n\n Returns\n -------\n result : ndarray\n Array with moved axes. This array is a view of the input array.\n\n See Also\n --------\n transpose: Permute the dimensions of an array.\n swapaxes: Interchange two axes of an array.\n\n Examples\n --------\n >>> x = np.zeros((3, 4, 5))\n >>> np.moveaxis(x, 0, -1).shape\n (4, 5, 3)\n >>> np.moveaxis(x, -1, 0).shape\n (5, 3, 4)\n These all achieve the same result:\n >>> np.transpose(x).shape\n (5, 4, 3)\n >>> np.swapaxes(x, 0, -1).shape\n (5, 4, 3)\n >>> np.moveaxis(x, [0, 1], [-1, -2]).shape\n (5, 4, 3)\n >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape\n (5, 4, 3)\n \"\"\"\n return _mx_nd_np.moveaxis(a, source, destination)\n\n@set_module('mxnet.numpy')\ndef copy(a): # pylint: disable=redefined-outer-name\n \"\"\"\n Return an array copy of the given object.\n\n Parameters\n ----------\n a : _Symbol\n Input array.\n\n Returns\n -------\n arr : _Symbol\n Array interpretation of a.\n\n -----\n Examples\n --------\n >>> x = np.array([1, 2, 3])\n >>> y = x\n >>> z = np.copy(x)\n >>> x[0] = 10\n >>> x[0] == y[0]\n True\n >>> x[0] == z[0]\n False\n \"\"\"\n return _mx_nd_np.copy(a)\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef rollaxis(a, axis, start=0):\n \"\"\"\n Roll the specified axis backwards, until it lies in a given position.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis : integer\n The axis to roll backwards. The positions of the other axes do not\n change relative to one another.\n start: int, optional\n The axis is rolled until it lies before this position.\n The default, 0, results in a “complete” roll.\n\n Returns\n -------\n res : ndarray\n A view after applying rollaxis to `a` is returned.\n\n -----\n Examples\n --------\n >>> a = np.ones((3,4,5,6))\n >>> np.rollaxis(a, 3, 1).shape\n (3, 6, 4, 5)\n >>> np.rollaxis(a, 2).shape\n (5, 3, 4, 6)\n >>> np.rollaxis(a, 1, 4).shape\n (3, 5, 6, 4)\n \"\"\"\n return _mx_nd_np.rollaxis(a, axis, start)\n\n\n@set_module('mxnet.numpy')\ndef diag(v, k=0):\n \"\"\"\n Extracts a diagonal or constructs a diagonal array.\n * 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero.\n * 2-D arrays: extracts the k-th Diagonal\n\n Parameters\n ----------\n array : ndarray\n The array to apply diag method.\n k : offset\n extracts or constructs kth diagonal given input array\n\n Returns\n ----------\n out : ndarray\n The extracted diagonal or constructed diagonal array.\n\n Examples\n --------\n >>> x = np.arange(9).reshape((3,3))\n >>> x\n array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n >>> np.diag(x)\n array([0, 4, 8])\n >>> np.diag(x, k=1)\n array([1, 5])\n >>> np.diag(x, k=-1)\n array([3, 7])\n\n >>> np.diag(np.diag(x))\n array([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 8]])\n \"\"\"\n return _mx_nd_np.diag(v, k=k)\n\n\n@set_module('mxnet.numpy')\ndef diagflat(v, k=0):\n \"\"\"\n Create a two-dimensional array with the flattened input as a diagonal.\n\n Parameters\n ----------\n v : array_like\n Input data, which is flattened and set as the `k`-th\n diagonal of the output.\n k : int, optional\n Diagonal to set; 0, the default, corresponds to the \"main\" diagonal,\n a positive (negative) `k` giving the number of the diagonal above\n (below) the main.\n\n Returns\n -------\n out : ndarray\n The 2-D output array.\n\n See Also\n --------\n diag : MATLAB work-alike for 1-D and 2-D arrays.\n diagonal : Return specified diagonals.\n trace : Sum along diagonals.\n\n Examples\n --------\n >>> np.diagflat([[1,2], [3,4]])\n array([[1, 0, 0, 0],\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]])\n >>> np.diagflat([1,2], 1)\n array([[0, 1, 0],\n [0, 0, 2],\n [0, 0, 0]])\n \"\"\"\n return _mx_nd_np.diagflat(v, k=k)\n\n\n@set_module('mxnet.numpy')\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n \"\"\"\n If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of\n the form a[i, i+offset]. If a has more than two dimensions, then the axes specified by axis1 and\n axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the\n resulting array can be determined by removing axis1 and axis2 and appending an index to the\n right equal to the size of the resulting diagonals.\n\n Parameters\n ----------\n a : ndarray\n Input data from which diagonal are taken.\n offset: int, Optional\n Offset of the diagonal from the main diagonal\n axis1: int, Optional\n Axis to be used as the first axis of the 2-D sub-arrays\n axis2: int, Optional\n Axis to be used as the second axis of the 2-D sub-arrays\n\n Returns\n -------\n out : ndarray\n Output result\n\n Raises\n -------\n ValueError: If the dimension of a is less than 2.\n\n Examples\n --------\n >>> a = np.arange(4).reshape(2,2)\n >>> a\n array([[0, 1],\n [2, 3]])\n >>> np.diagonal(a)\n array([0, 3])\n >>> np.diagonal(a, 1)\n array([1])\n\n >>> a = np.arange(8).reshape(2,2,2)\n >>>a\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> np.diagonal(a, 0, 0, 1)\n array([[0, 6],\n [1, 7]])\n \"\"\"\n return _mx_nd_np.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n# pylint: disable=redefined-outer-name, too-many-arguments\n@set_module('mxnet.numpy')\ndef sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):\n r\"\"\"\n Sum of array elements over a given axis.\n\n Parameters\n ----------\n a : ndarray\n Input data.\n axis : None or int, optional\n Axis or axes along which a sum is performed. The default,\n axis=None, will sum all of the elements of the input array. If\n axis is negative it counts from the last to the first axis.\n dtype : dtype, optional\n The type of the returned array and of the accumulator in which the\n elements are summed. The default type is float32.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n\n If the default value is passed, then `keepdims` will not be\n passed through to the `sum` method of sub-classes of\n `ndarray`, however any non-default value will be. If the\n sub-classes `sum` method does not implement `keepdims` any\n exceptions will be raised.\n initial: Currently only supports None as input, optional\n Starting value for the sum.\n Currently not implemented. Please use ``None`` as input or skip this argument.\n out : ndarray or None, optional\n Alternative output array in which to place the result. It must have\n the same shape and dtype as the expected output.\n\n Returns\n -------\n sum_along_axis : ndarray\n An ndarray with the same shape as `a`, with the specified\n axis removed. If an output array is specified, a reference to\n `out` is returned.\n\n Notes\n -----\n * Input type does not support Python native iterables.\n * \"out\" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output.\n * \"initial\" param is not supported yet. Please use None as input.\n * Arithmetic is modular when using integer types, and no error is raised on overflow.\n * The sum of an empty array is the neutral element 0:\n\n >>> a = np.empty(1)\n >>> np.sum(a)\n array(0.)\n\n This function differs from the original `numpy.sum\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html>`_ in\n the following aspects:\n\n * Input type does not support Python native iterables(list, tuple, ...).\n * \"out\" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output.\n * \"initial\" param is not supported yet. Please use ``None`` as input or skip it.\n * The default type is float32.\n\n Examples\n --------\n >>> a = np.array([0.5, 1.5])\n >>> np.sum(a)\n array(2.)\n >>> a = np.array([0.5, 0.7, 0.2, 1.5])\n >>> np.sum(a, dtype=np.int32)\n array(2, dtype=int32)\n >>> a = np.array([[0, 1], [0, 5]])\n >>> np.sum(a)\n array(6.)\n >>> np.sum(a, axis=0)\n array([0., 6.])\n >>> np.sum(a, axis=1)\n array([1., 5.])\n\n With output ndarray:\n\n >>> a = np.array([[0, 1], [0, 5]])\n >>> b = np.ones((2,), dtype=np.float32)\n >>> np.sum(a, axis = 0, out=b)\n array([0., 6.])\n >>> b\n array([0., 6.])\n\n If the accumulator is too small, overflow occurs:\n\n >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)\n array(-128, dtype=int8)\n \"\"\"\n return _mx_nd_np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where)\n\n\n@set_module('mxnet.numpy')\ndef bitwise_left_shift(x1, x2, out=None):\n r\"\"\"\n Shift the bits of and integer to the left. Bits are shifted to the left by\n appending x2 0s at the right of x1. Since the internal representation of numbers\n is in binary format, this operation is equivalent to ``x1 * 2**x2``\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Input values.\n x2 : ndarray or scalar\n Number of zeros to append to x1. Has to be non-negative. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.binary_repr(5)\n '101'\n >>> np.left_shift(5, 2)\n 20\n >>> np.binary_repr(20)\n '10100'\n \"\"\"\n return _mx_nd_np.bitwise_left_shift(x1, x2, out)\n\n\n@set_module('mxnet.numpy')\ndef bitwise_right_shift(x1, x2, out=None):\n r\"\"\"\n Shift the bits of and integer to the right. Bits are shifted to the right by\n x2. Because the internal representation of numbers is in binary format,\n this operation is equivalent to ``x1 / 2**x2``\n\n Parameters\n ----------\n x1 : ndarray or scalar\n Input values.\n x1 : ndarray or scalar\n Number of bits to remove at the right of x1. If x1.shape != x2.shape,\n they must be broadcastable to a common shape (which becomes the shape of the output).\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have a shape that the\n inputs broadcast to. If not provided or None, a freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray\n Result.\n\n Examples\n --------\n >>> np.binary_repr(10)\n '1010'\n >>> np.right_shift(10, 1)\n 5\n >>> np.binary_repr(5)\n '101'\n >>> np.right_shift(10, np.array([1,2,3]))\n array([5, 2, 1])\n \"\"\"\n return _mx_nd_np.bitwise_right_shift(x1, x2, out)\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\n@wrap_ctx_to_device_func\ndef asarray(obj, dtype=None, device=None, copy=None):\n \"\"\"\n Convert the input to an array.\n\n Parameters\n ----------\n obj : <array>, bool, int, float, NestedSequence[ bool | int | float ]\n Object to be converted to an array. Can be a Python scalar,\n a (possibly nested) sequence of Python scalars,\n or an object supporting DLPack or the Python buffer protocol.\n dtype : dtype, Optional\n output array data type. Default: None .\n device : Device, optional\n Device context on which the memory is allocated. Default is\n `mxnet.device.current_device()`.\n copy : bool, Optional\n Whether or not to make a copy of the input.\n If True, always copies.\n If False, never copies for input which supports DLPack or the buffer protocol,\n and raises ValueError in case that would be necessary.\n If None, reuses existing memory buffer if possible, copies otherwise. Default: None .\n\n An array containing the data from obj.\n\n Examples\n --------\n >>> np.asarray([1, 2, 3])\n array([1., 2., 3.])\n\n >>> np.asarray([[1, 2], [3, 4]], dtype=np.int32)\n array([[1, 2],\n [3, 4]], dtype=int32)\n\n >>> np.asarray([1.2], device=mx.gpu())\n array([1.2], device=gpu(0))\n \"\"\"\n if isinstance(obj, numeric_types):\n dtype = dtype_from_number(obj) if dtype is None else dtype\n obj = _np.asarray(obj, dtype=dtype)\n elif isinstance(obj, _np.ndarray):\n if is_np_default_dtype():\n dtype = obj.dtype if dtype is None else dtype\n else:\n dtype = _np.float32 if dtype is None or obj.dtype is _np.float64 else dtype\n elif isinstance(obj, ndarray):\n if dtype is not None:\n obj = obj.astype(dtype, copy=copy)\n if device is not None:\n obj = obj.to_device(device)\n return obj\n elif hasattr(obj, '__dlpack__'):\n return from_dlpack(obj)\n else:\n if dtype is None:\n default_dtype = _np.float64 if is_np_default_dtype() else _np.float32\n dtype = obj.dtype if hasattr(obj, \"dtype\") else default_dtype\n try:\n obj = _np.array(obj, dtype=dtype)\n except Exception as e:\n # printing out the error raised by official NumPy's array function\n # for transparency on users' side\n raise TypeError('{}'.format(str(e)))\n if device is None:\n device = current_device()\n ret = empty(obj.shape, dtype=dtype, device=device)\n if len(obj.shape) == 0:\n ret[()] = obj\n else:\n ret[:] = obj\n return ret\n\n\n# pylint: disable=redefined-outer-name\n@set_module('mxnet.numpy')\ndef from_dlpack(x):\n \"\"\"\n Returns a np.ndarray backed by a dlpack tensor.\n\n Parameters\n ----------\n dlpack : an object with __dlpack__ method or PyCapsule (the pointer of DLManagedTensor)\n input data\n\n Returns\n -------\n out : np.ndarray\n an ndarray backed by a dlpack tensor\n\n Examples\n --------\n >>> x = mx.np.ones((2,3))\n >>> y = mx.np.from_dlpack(x)\n >>> y\n array([[1., 1., 1.],\n [1., 1., 1.]])\n >>> y += 1\n >>> x\n array([[2., 2., 2.],\n [2., 2., 2.]])\n \"\"\"\n from_dlpack = ndarray_from_dlpack(ndarray)\n return from_dlpack(x)\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.clip", "numpy.issubdtype", "numpy.dtype" ] ]
NickYi1990/torch_buddy
[ "b888f60b25e4f70b89960d158aaf893ab6183481" ]
[ "torch_buddy/utils/plot.py" ]
[ "import numpy as np\nimport matplotlib\n\nmatplotlib.use(\"TkAgg\")\n\nimport matplotlib.pyplot as plt\n\n\ndef subplots(data_for_plots, figsize=[12, 4]):\n \"\"\"\n data_for_plots = [[1,2,3], [4,5,6]]\n \"\"\"\n f, axes = plt.subplots(np.int(np.ceil(len(data_for_plots) / 2)), 2, figsize=figsize)\n for ax, data_for_plot in zip(axes.flat, data_for_plots):\n ax.plot(data_for_plot)\n" ]
[ [ "matplotlib.use" ] ]
rodonguyen/vres_code_2021
[ "cb49d941db4dfc5137e887b195f403fb4262cfd8" ]
[ "linear_regression_pandas/bin/lr_ds12.py" ]
[ "import pandas as pd\r\nfrom sklearn import linear_model\r\n\r\ndf = pd.read_csv('./dataset/dataset_12_pandas.csv')\r\nx = df.values\r\ny = df['y'].values\r\n\r\n# Create linear regression object\r\nregression = linear_model.LinearRegression()\r\n\r\n# Train the model using the training sets\r\nregression.fit(x, y)\r\n\r\nprint(regression.predict([[10000,10000]]))\r\nprint(\"Coefficients: \\n\", regression.coef_)\r\n" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LinearRegression" ] ]
theorist17/adapter-transformers
[ "17a1e3f24aca59e3b131a47685dcefdfc69fa090" ]
[ "src/transformers/trainer.py" ]
[ "import json\nimport logging\nimport math\nimport os\nimport random\nimport re\nimport shutil\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom packaging import version\nfrom torch import nn\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler\nfrom tqdm.auto import tqdm, trange\n\nfrom .adapter_bert import get_fusion_regularization_loss\nfrom .data.data_collator import DataCollator, DefaultDataCollator\nfrom .modeling_utils import PreTrainedModel\nfrom .optimization import AdamW, get_linear_schedule_with_warmup\nfrom .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutput\nfrom .training_args import TrainingArguments, is_tpu_available\n\n\ntry:\n from apex import amp\n\n _has_apex = True\nexcept ImportError:\n _has_apex = False\n\n\ndef is_apex_available():\n return _has_apex\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n import torch_xla.distributed.parallel_loader as pl\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\n\n _has_tensorboard = True\nexcept ImportError:\n try:\n from tensorboardX import SummaryWriter\n\n _has_tensorboard = True\n except ImportError:\n _has_tensorboard = False\n\n\ndef is_tensorboard_available():\n return _has_tensorboard\n\n\ntry:\n import wandb\n\n wandb.ensure_configured()\n if wandb.api.api_key is None:\n _has_wandb = False\n wandb.termwarn(\"W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable.\")\n else:\n _has_wandb = False if os.getenv(\"WANDB_DISABLED\") else True\nexcept ImportError:\n _has_wandb = False\n\n\ndef is_wandb_available():\n return _has_wandb\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef set_seed(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # ^^ safe to call this function even if cuda is not available\n\n\n@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n \"\"\"\n Decorator to make all processes in distributed training wait for each local_master to do something.\n \"\"\"\n if local_rank not in [-1, 0]:\n torch.distributed.barrier()\n yield\n if local_rank == 0:\n torch.distributed.barrier()\n\n\nclass SequentialDistributedSampler(Sampler):\n \"\"\"\n Distributed Sampler that subsamples indicies sequentially,\n making it easier to collate all results at the end.\n\n Even though we only use this sampler for eval and predict (no training),\n which means that the model params won't have to be synced (i.e. will not hang\n for synchronization even if varied number of forward passes), we still add extra\n samples to the sampler to make it evenly divisible (like in `DistributedSampler`)\n to make it easy to `gather` or `reduce` resulting tensors at the end of the loop.\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None):\n if num_replicas is None:\n if not torch.distributed.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = torch.distributed.get_world_size()\n if rank is None:\n if not torch.distributed.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = torch.distributed.get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n\n def __iter__(self):\n indices = list(range(len(self.dataset)))\n\n # add extra samples to make it evenly divisible\n indices += indices[: (self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n\ndef get_tpu_sampler(dataset: Dataset):\n if xm.xrt_world_size() <= 1:\n return RandomSampler(dataset)\n return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())\n\n\nclass Trainer:\n \"\"\"\n Trainer is a simple but feature-complete training and eval loop for PyTorch,\n optimized for Transformers.\n \"\"\"\n\n model: PreTrainedModel\n args: TrainingArguments\n data_collator: DataCollator\n train_dataset: Optional[Dataset]\n eval_dataset: Optional[Dataset]\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None\n prediction_loss_only: bool\n tb_writer: Optional[\"SummaryWriter\"] = None\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None\n global_step: Optional[int] = None\n epoch: Optional[float] = None\n\n def __init__(\n self,\n model: PreTrainedModel,\n args: TrainingArguments,\n data_collator: Optional[DataCollator] = None,\n train_dataset: Optional[Dataset] = None,\n eval_dataset: Optional[Dataset] = None,\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,\n prediction_loss_only=False,\n do_save_full_model: bool = True,\n do_save_adapters: bool = False,\n do_save_adapter_fusion: bool = False,\n adapter_names: Optional[List[List[str]]] = None,\n tb_writer: Optional[\"SummaryWriter\"] = None,\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None,\n ):\n \"\"\"\n Trainer is a simple but feature-complete training and eval loop for PyTorch,\n optimized for Transformers.\n\n Args:\n prediction_loss_only:\n (Optional) in evaluation and prediction, only return the loss\n \"\"\"\n self.model = model.to(args.device)\n self.args = args\n if data_collator is not None:\n self.data_collator = data_collator\n else:\n self.data_collator = DefaultDataCollator()\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.compute_metrics = compute_metrics\n self.prediction_loss_only = prediction_loss_only\n self.optimizers = optimizers\n # if tb_writer is not None:\n # self.tb_writer = tb_writer\n # elif is_tensorboard_available() and self.is_world_master():\n # self.tb_writer = SummaryWriter(log_dir=self.args.logging_dir)\n if not is_tensorboard_available():\n logger.warning(\n \"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it.\"\n )\n if is_wandb_available():\n self._setup_wandb()\n else:\n logger.info(\n \"You are instantiating a Trainer but W&B is not installed. To use wandb logging, \"\n \"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface.\"\n )\n set_seed(self.args.seed)\n # Create output directory if needed\n if self.is_world_master():\n os.makedirs(self.args.output_dir, exist_ok=True)\n # adapters used\n self.do_save_full_model = do_save_full_model\n self.do_save_adapters = do_save_adapters\n self.do_save_adapter_fusion = do_save_adapter_fusion\n self.adapter_names = adapter_names\n if is_tpu_available():\n # Set an xla_device flag on the model's config.\n # We'll find a more elegant and not need to do this in the future.\n self.model.config.xla_device = True\n\n def get_train_dataloader(self) -> DataLoader:\n if self.train_dataset is None:\n raise ValueError(\"Trainer: training requires a train_dataset.\")\n if is_tpu_available():\n train_sampler = get_tpu_sampler(self.train_dataset)\n else:\n train_sampler = (\n RandomSampler(self.train_dataset)\n if self.args.local_rank == -1\n else DistributedSampler(self.train_dataset)\n )\n\n data_loader = DataLoader(\n self.train_dataset,\n batch_size=self.args.train_batch_size,\n sampler=train_sampler,\n collate_fn=self.data_collator.collate_batch,\n )\n\n return data_loader\n\n def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:\n if eval_dataset is None and self.eval_dataset is None:\n raise ValueError(\"Trainer: evaluation requires an eval_dataset.\")\n\n eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset\n\n if is_tpu_available():\n sampler = SequentialDistributedSampler(\n eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()\n )\n elif self.args.local_rank != -1:\n sampler = SequentialDistributedSampler(eval_dataset)\n else:\n sampler = SequentialSampler(eval_dataset)\n\n data_loader = DataLoader(\n eval_dataset,\n sampler=sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator.collate_batch,\n )\n\n return data_loader\n\n def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:\n # We use the same batch_size as for eval.\n if is_tpu_available():\n sampler = SequentialDistributedSampler(\n test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()\n )\n elif self.args.local_rank != -1:\n sampler = SequentialDistributedSampler(test_dataset)\n else:\n sampler = SequentialSampler(test_dataset)\n\n data_loader = DataLoader(\n test_dataset,\n sampler=sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator.collate_batch,\n )\n\n return data_loader\n\n def get_optimizers(\n self, num_training_steps: int\n ) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:\n \"\"\"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well.\n If you want to use something else, you can pass a tuple in the Trainer's init,\n or override this method in a subclass.\n \"\"\"\n if self.optimizers is not None:\n return self.optimizers\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n if hasattr(self.model.config, \"adapter_fusion_models\"):\n no_decay += [f\"adapter_fusion_layer.{n}.value\" for n in self.model.config.adapter_fusion_models]\n\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps\n )\n return optimizer, scheduler\n\n def _setup_wandb(self):\n \"\"\"\n Setup the optional Weights & Biases (`wandb`) integration.\n\n One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface\n You can also override the following environment variables:\n\n Environment:\n WANDB_WATCH:\n (Optional, [\"gradients\", \"all\", \"false\"]) \"gradients\" by default, set to \"false\" to disable gradient logging\n or \"all\" to log gradients and parameters\n WANDB_PROJECT:\n (Optional): str - \"huggingface\" by default, set this to a custom string to store results in a different project\n WANDB_DISABLED:\n (Optional): boolean - defaults to false, set to \"true\" to disable wandb entirely\n \"\"\"\n logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ[\"WANDB_DISABLED\"] = \"true\"')\n wandb.init(project=os.getenv(\"WANDB_PROJECT\", \"huggingface\"), config=vars(self.args))\n # keep track of model topology and gradients\n if os.getenv(\"WANDB_WATCH\") != \"false\":\n wandb.watch(\n self.model, log=os.getenv(\"WANDB_WATCH\", \"gradients\"), log_freq=max(100, self.args.logging_steps)\n )\n\n def num_examples(self, dataloader: DataLoader) -> int:\n \"\"\"\n Helper to get num of examples from a DataLoader, by accessing its Dataset.\n \"\"\"\n return len(dataloader.dataset)\n\n def train(self, model_path: Optional[str] = None):\n \"\"\"\n Main training entry point.\n\n Args:\n model_path:\n (Optional) Local path to model if model to train has been instantiated from a local path\n If present, we will try reloading the optimizer/scheduler states from there.\n \"\"\"\n train_dataloader = self.get_train_dataloader()\n if self.args.max_steps > 0:\n t_total = self.args.max_steps\n num_train_epochs = (\n self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1\n )\n else:\n t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)\n num_train_epochs = self.args.num_train_epochs\n\n optimizer, scheduler = self.get_optimizers(num_training_steps=t_total)\n\n # Check if saved optimizer or scheduler states exist\n if (\n model_path is not None\n and os.path.isfile(os.path.join(model_path, \"optimizer.pt\"))\n and os.path.isfile(os.path.join(model_path, \"scheduler.pt\"))\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(\n torch.load(os.path.join(model_path, \"optimizer.pt\"), map_location=self.args.device)\n )\n scheduler.load_state_dict(torch.load(os.path.join(model_path, \"scheduler.pt\")))\n\n model = self.model\n if self.args.fp16:\n if not is_apex_available():\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=self.args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if self.args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.args.local_rank],\n output_device=self.args.local_rank,\n find_unused_parameters=True,\n )\n\n if self.tb_writer is not None:\n self.tb_writer.add_text(\"args\", self.args.to_json_string())\n self.tb_writer.add_hparams(self.args.to_sanitized_dict(), metric_dict={})\n\n # Train!\n if is_tpu_available():\n total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()\n else:\n total_train_batch_size = (\n self.args.train_batch_size\n * self.args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)\n )\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", self.num_examples(train_dataloader))\n logger.info(\" Num Epochs = %d\", num_train_epochs)\n logger.info(\" Instantaneous batch size per device = %d\", self.args.per_device_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, distributed & accumulation) = %d\", total_train_batch_size)\n logger.info(\" Gradient Accumulation steps = %d\", self.args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n self.global_step = 0\n self.epoch = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if model_path is not None:\n # set global_step to global_step of last saved checkpoint from model path\n try:\n self.global_step = int(model_path.split(\"-\")[-1].split(\"/\")[0])\n epochs_trained = self.global_step // (len(train_dataloader) // self.args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = self.global_step % (\n len(train_dataloader) // self.args.gradient_accumulation_steps\n )\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", self.global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n self.global_step = 0\n logger.info(\" Starting fine-tuning.\")\n\n tr_loss = 0.0\n logging_loss = 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(num_train_epochs), desc=\"Epoch\", disable=not self.is_local_master()\n )\n for epoch in train_iterator:\n if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n\n if is_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(\n self.args.device\n )\n epoch_iterator = tqdm(parallel_loader, desc=\"Iteration\", disable=not self.is_local_master())\n else:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=not self.is_local_master())\n\n for step, inputs in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n tr_loss += self._training_step(model, inputs, optimizer)\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n len(epoch_iterator) <= self.args.gradient_accumulation_steps\n and (step + 1) == len(epoch_iterator)\n ):\n # apply adapter fusion weight regularization on the value matrix\n if hasattr(self.model.config, \"adapter_fusion\") and self.model.config.adapter_fusion[\"regularization\"]:\n fusion_reg_loss = get_fusion_regularization_loss(self.model)\n fusion_reg_loss.backward()\n \n if self.args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)\n\n if is_tpu_available():\n xm.optimizer_step(optimizer)\n else:\n optimizer.step()\n\n scheduler.step()\n model.zero_grad()\n self.global_step += 1\n self.epoch = epoch + (step + 1) / len(epoch_iterator)\n\n if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (\n self.global_step == 1 and self.args.logging_first_step\n ):\n logs: Dict[str, float] = {}\n logs[\"loss\"] = (tr_loss - logging_loss) / self.args.logging_steps\n # backward compatibility for pytorch schedulers\n logs[\"learning_rate\"] = (\n scheduler.get_last_lr()[0]\n if version.parse(torch.__version__) >= version.parse(\"1.4\")\n else scheduler.get_lr()[0]\n )\n logging_loss = tr_loss\n\n self._log(logs)\n\n if self.args.evaluate_during_training:\n self.evaluate()\n\n if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:\n # In all cases (even distributed/parallel), self.model is always a reference\n # to the model we want to save.\n if hasattr(model, \"module\"):\n assert model.module is self.model\n else:\n assert model is self.model\n # Save model checkpoint\n output_dir = os.path.join(self.args.output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.global_step}\")\n\n self.save_model(output_dir)\n\n if self.is_world_master():\n self._rotate_checkpoints()\n\n if is_tpu_available():\n xm.rendezvous(\"saving_optimizer_states\")\n xm.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n xm.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n elif self.is_world_master():\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n\n if self.args.max_steps > 0 and self.global_step > self.args.max_steps:\n epoch_iterator.close()\n break\n if self.args.max_steps > 0 and self.global_step > self.args.max_steps:\n train_iterator.close()\n break\n if self.args.tpu_metrics_debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n if self.tb_writer:\n self.tb_writer.close()\n\n if self.do_save_adapters:\n logger.info(\"\\n\\nTraining completed. Do not forget to share your adapters on https://adapterhub.ml =)\\n\\n\")\n else:\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n return TrainOutput(self.global_step, tr_loss / self.global_step)\n\n def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None:\n if self.epoch is not None:\n logs[\"epoch\"] = self.epoch\n if self.tb_writer:\n for k, v in logs.items():\n self.tb_writer.add_scalar(k, v, self.global_step)\n if is_wandb_available():\n wandb.log(logs, step=self.global_step)\n output = json.dumps({**logs, **{\"step\": self.global_step}})\n if iterator is not None:\n iterator.write(output)\n else:\n print(output)\n\n def _training_step(\n self, model: nn.Module, inputs: Dict[str, torch.Tensor], optimizer: torch.optim.Optimizer\n ) -> float:\n model.train()\n for k, v in inputs.items():\n inputs[k] = v.to(self.args.device)\n if self.adapter_names:\n inputs[\"adapter_names\"] = self.adapter_names\n\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if self.args.gradient_accumulation_steps > 1:\n loss = loss / self.args.gradient_accumulation_steps\n\n if self.args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n return loss.item()\n\n def is_local_master(self) -> bool:\n if is_tpu_available():\n return xm.is_master_ordinal(local=True)\n else:\n return self.args.local_rank in [-1, 0]\n\n def is_world_master(self) -> bool:\n \"\"\"\n This will be True only in one process, even in distributed mode,\n even when training on multiple machines.\n \"\"\"\n if is_tpu_available():\n return xm.is_master_ordinal(local=False)\n else:\n return self.args.local_rank == -1 or torch.distributed.get_rank() == 0\n\n def save_model(self, output_dir: Optional[str] = None):\n \"\"\"\n Saving best-practices: if you use default names for the model,\n you can reload it using from_pretrained().\n\n Will only save from the world_master process (unless in TPUs).\n \"\"\"\n\n if is_tpu_available():\n self._save_tpu(output_dir)\n elif self.is_world_master():\n self._save(output_dir)\n\n def _save_tpu(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if xm.is_master_ordinal():\n os.makedirs(output_dir, exist_ok=True)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel):\n raise ValueError(\"Trainer.model appears to not be a PreTrainedModel\")\n\n xm.rendezvous(\"saving_checkpoint\")\n if self.do_save_adapters:\n self.model.save_all_adapters(output_dir)\n if self.do_save_adapter_fusion:\n self.model.save_all_adapter_fusions(output_dir)\n if self.do_save_full_model:\n self.model.save_pretrained(output_dir)\n\n def _save(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel):\n raise ValueError(\"Trainer.model appears to not be a PreTrainedModel\")\n if self.do_save_adapters:\n self.model.save_all_adapters(output_dir)\n if self.do_save_adapter_fusion:\n self.model.save_all_adapter_fusions(output_dir)\n if self.do_save_full_model:\n self.model.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f\"{checkpoint_prefix}-*\")]\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(f\".*{checkpoint_prefix}-([0-9]+)\", path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n return checkpoints_sorted\n\n def _rotate_checkpoints(self, use_mtime=False) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n def evaluate(\n self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None,\n ) -> Dict[str, float]:\n \"\"\"\n Run evaluation and return metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are\n task-dependent.\n\n Args:\n eval_dataset: (Optional) Pass a dataset if you wish to override\n the one on the instance.\n Returns:\n A dict containing:\n - the eval loss\n - the potential metrics computed from the predictions\n \"\"\"\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n\n self._log(output.metrics)\n\n if self.args.tpu_metrics_debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n return output.metrics\n\n def predict(self, test_dataset: Dataset) -> PredictionOutput:\n \"\"\"\n Run prediction and return predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels.\n In that case, this method will also return metrics, like in evaluate().\n \"\"\"\n test_dataloader = self.get_test_dataloader(test_dataset)\n\n return self._prediction_loop(test_dataloader, description=\"Prediction\")\n\n def _prediction_loop(\n self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None\n ) -> PredictionOutput:\n \"\"\"\n Prediction/evaluation loop, shared by `evaluate()` and `predict()`.\n\n Works both with or without labels.\n \"\"\"\n\n prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only\n\n model = self.model\n # multi-gpu eval\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n else:\n model = self.model\n # Note: in torch.distributed mode, there's no point in wrapping the model\n # inside a DistributedDataParallel as we'll be under `no_grad` anyways.\n\n batch_size = dataloader.batch_size\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Num examples = %d\", self.num_examples(dataloader))\n logger.info(\" Batch size = %d\", batch_size)\n eval_losses: List[float] = []\n preds: torch.Tensor = None\n label_ids: torch.Tensor = None\n model.eval()\n\n if is_tpu_available():\n dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)\n\n for inputs in tqdm(dataloader, desc=description):\n has_labels = any(inputs.get(k) is not None for k in [\"labels\", \"lm_labels\", \"masked_lm_labels\"])\n\n for k, v in inputs.items():\n inputs[k] = v.to(self.args.device)\n if self.adapter_names:\n inputs[\"adapter_names\"] = self.adapter_names\n\n with torch.no_grad():\n outputs = model(**inputs)\n if has_labels:\n step_eval_loss, logits = outputs[:2]\n eval_losses += [step_eval_loss.mean().item()]\n else:\n logits = outputs[0]\n\n if not prediction_loss_only:\n if preds is None:\n preds = logits.detach()\n else:\n preds = torch.cat((preds, logits.detach()), dim=0)\n if inputs.get(\"labels\") is not None:\n if label_ids is None:\n label_ids = inputs[\"labels\"].detach()\n else:\n label_ids = torch.cat((label_ids, inputs[\"labels\"].detach()), dim=0)\n\n if self.args.local_rank != -1:\n # In distributed mode, concatenate all results from all nodes:\n if preds is not None:\n preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))\n if label_ids is not None:\n label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))\n elif is_tpu_available():\n # tpu-comment: Get all predictions and labels from all worker shards of eval dataset\n if preds is not None:\n preds = xm.mesh_reduce(\"eval_preds\", preds, torch.cat)\n if label_ids is not None:\n label_ids = xm.mesh_reduce(\"eval_label_ids\", label_ids, torch.cat)\n\n # Finally, turn the aggregated tensors into numpy arrays.\n if preds is not None:\n preds = preds.cpu().numpy()\n if label_ids is not None:\n label_ids = label_ids.cpu().numpy()\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n if len(eval_losses) > 0:\n metrics[\"eval_loss\"] = np.mean(eval_losses)\n\n # Prefix all keys with eval_\n for key in list(metrics.keys()):\n if not key.startswith(\"eval_\"):\n metrics[f\"eval_{key}\"] = metrics.pop(key)\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)\n\n def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor:\n assert self.args.local_rank != -1\n\n output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(output_tensors, tensor)\n\n concat = torch.cat(output_tensors, dim=0)\n\n # truncate the dummy elements added by SequentialDistributedSampler\n output = concat[:num_total_examples]\n return output\n" ]
[ [ "torch.distributed.get_world_size", "torch.cat", "torch.distributed.is_available", "torch.cuda.manual_seed_all", "torch.utils.data.sampler.RandomSampler", "torch.utils.data.dataloader.DataLoader", "numpy.random.seed", "torch.no_grad", "torch.distributed.all_gather", "torch.nn.parallel.DistributedDataParallel", "numpy.mean", "torch.manual_seed", "torch.utils.data.sampler.SequentialSampler", "torch.utils.data.distributed.DistributedSampler", "torch.distributed.get_rank", "torch.distributed.barrier", "torch.nn.DataParallel" ] ]
TheCodeWanderer/Blog-Scripts
[ "caeb5fe1118351f7889574d8649c580cfaebdaf8" ]
[ "Visualize HRTF.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nLoads a SOFA file and visualizes the HRTF amplitude and phase\r\n\r\nCreated on Mon Feb 24 23:08:19 2020\r\n@author: Ivan\r\n\"\"\"\r\n#%% Load SOFA file\r\n\r\nfrom SOFASonix import SOFAFile\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.fft\r\n\r\nfilename='hrtf_M_hrtf B.sofa'\r\nsofa = SOFAFile.load(filename)\r\n\r\n#Get params/data\r\nSR = sofa.Data_SamplingRate\r\ndelay = sofa.Data_Delay\r\npos = sofa.SourcePosition\r\nIR = sofa.Data_IR\r\nN = sofa._N\r\n\r\n#%% FFT along equator\r\nind = pos[:,1]==0 #select where the elevation is zero\r\npos_pol = pos[ind,0] #only the polar plane (at constant radius and elevation)\r\nIR_pl = IR[ind,:,:] #Filter IR based on the above criteria\r\nind2 = np.argsort(pos_pol) #sort values to prevent artifcats during plotting\r\npos_pol = pos_pol[ind2]\r\nIR_pl = IR_pl[ind2,:,:]\r\nxf = scipy.fft.rfftfreq(N,1/SR)\r\nyf = scipy.fft.rfft(IR_pl)\r\n\r\n#%% amplitude\r\nplt.pcolormesh(xf,pos_pol,np.abs(yf[:,0,:]),shading='gouraud',antialiased=True)\r\nplt.colorbar()\r\nplt.title('Left Ear')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Azimuthal angle (deg.)')\r\nplt.xlim([0, 18000])\r\n\r\nplt.figure()\r\nplt.pcolormesh(xf,pos_pol,np.abs(yf[:,1,:]),shading='gouraud',antialiased=True)\r\nplt.colorbar()\r\nplt.title('Right Ear')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Azimuthal angle (deg.)')\r\nplt.xlim([0, 18000])\r\n\r\n#%% phase\r\nplt.figure()\r\nplt.pcolormesh(xf,pos_pol,np.arctan2(np.imag(yf[:,0,:]),np.real(yf[:,0,:])),shading='gouraud',antialiased=True)\r\nplt.colorbar()\r\nplt.title('Left Ear')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Azimuthal angle (deg.)')\r\nplt.xlim([0, 18000])\r\n\r\nplt.figure()\r\nplt.pcolormesh(xf,pos_pol,np.arctan2(np.imag(yf[:,1,:]),np.real(yf[:,1,:])),shading='gouraud',antialiased=True)\r\nplt.colorbar()\r\nplt.title('Right Ear')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Azimuthal angle (deg.)')\r\nplt.xlim([0, 18000])\r\n\r\n#%% FFT along polar (xz-plane)\r\nind = pos[:,0]==0 #select where the azimuth is zero\r\npos_pol = pos[ind,1] #only the polar plane (at constant radius and elevation)\r\n#IR_pl = IR[ind,:,:] #Filter IR based on the above criteria\r\nIR_pl = IR[ind,:,:] #Filter IR based on the above criteria\r\nind2 = np.argsort(pos_pol) #sort values to prevent artifcats during plotting\r\npos_pol = pos_pol[ind2]\r\nIR_pl = IR_pl[ind2,:,:]\r\nxf = scipy.fft.rfftfreq(N,1/SR)\r\nyf = scipy.fft.rfft(IR_pl)\r\n\r\n#%% amplitude\r\nplt.figure()\r\nplt.pcolormesh(xf,pos_pol,np.abs(yf[:,0,:]),shading='gouraud',antialiased=True)\r\nplt.colorbar()\r\nplt.title('Left Ear')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Polar angle (deg.)')\r\nplt.xlim([0, 18000])\r\n\r\nplt.figure()\r\nplt.pcolormesh(xf,pos_pol,np.abs(yf[:,1,:]),shading='gouraud',antialiased=True)\r\nplt.colorbar()\r\nplt.title('Right Ear')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Polar angle (deg.)')\r\nplt.xlim([0, 18000])\r\n\r\n#%% phase\r\nplt.figure()\r\nplt.pcolormesh(xf,pos_pol,np.arctan2(np.imag(yf[:,0,:]),np.real(yf[:,0,:])),shading='gouraud',antialiased=True)\r\nplt.colorbar()\r\nplt.title('Left Ear')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Polar angle (deg.)')\r\nplt.xlim([0, 18000])\r\n\r\nplt.figure()\r\nplt.pcolormesh(xf,pos_pol,np.arctan2(np.imag(yf[:,1,:]),np.real(yf[:,1,:])),shading='gouraud',antialiased=True)\r\nplt.colorbar()\r\nplt.title('Right Ear')\r\nplt.xlabel('Frequency (Hz)')\r\nplt.ylabel('Polar angle (deg.)')\r\nplt.xlim([0, 18000])\r\n" ]
[ [ "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "numpy.real", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "numpy.argsort", "numpy.abs", "numpy.imag" ] ]
white-hat-vaibhs/malaria
[ "4110fa3bd4dabf573aa50451c776e807d18025b8" ]
[ "src/visualization.py" ]
[ "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 1 18:16:05 2017\r\n\r\n@author: Carlos Atico Ariza, PhD\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport itertools\r\n\r\ndef plot_confusion_matrix(cm1, classes, normalize=False, \r\n title='Confusion matrix', cmap=plt.cm.Blues, \r\n gradientbar=False, font={'size':12}):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if normalize:\r\n cm1 = cm1.astype('float') / cm1.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n# else:\r\n# pass\r\n# print('Confusion matrix, without normalization')\r\n\r\n# print(cm1)\r\n plt.imshow(cm1, interpolation='nearest', cmap=cmap)\r\n plt.title(title, )\r\n if gradientbar:\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes) #rotation=45\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm1.max() / 2.\r\n for i, j in itertools.product(range(cm1.shape[0]), range(cm1.shape[1])):\r\n plt.text(j, i, format(cm1[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm1[i, j] > thresh else \"black\", fontdict = font)\r\n\r\n# plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')" ]
[ [ "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.yticks", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.imshow" ] ]
ananiask8/FFWM
[ "117f593783da67da9dc910a751910760497ef37f" ]
[ "ffwm/data/face_dataset.py" ]
[ "import os, cv2, torch\nimport numpy as np\nfrom os.path import basename, join\nfrom ffwm.data.base_dataset import BaseDataset\n\n\"\"\"\nFace Dataset\n\"\"\"\n\ndef s2f(file):\n \"\"\"\n get corresponding frontal image name: only for multipie\n \"\"\"\n path, name = os.path.split(file)\n ss = name.split('_')\n name = '{}_{}_{}_{}_{}'.format(ss[0], ss[1], ss[2], '051', ss[4])\n return name\n\n\nclass FaceDataset(BaseDataset):\n def __init__(self, opt, isval=False):\n BaseDataset.__init__(self, opt)\n self.preload = opt.preload\n self.load_size = opt.load_size\n self.opt = opt\n self.isval = isval # train or test dataset\n self.image_dict = {} # for preload\n self.mask_dict = {} # for preload\n self.pairs = self.get_pairs()\n\n def __getitem__(self, index):\n if self.isval:\n return self.get_test_item(index)\n else:\n return self.get_train_item(index)\n\n def get_test_item(self, index):\n path_S, path_F = self.pairs[index]\n img_S = self.image_transform(path_S, preload=self.preload)\n img_F = self.image_transform(path_F, preload=self.preload)\n img_S = torch.from_numpy(img_S.transpose((2, 0, 1)).astype('float32')).div(255)\n img_F = torch.from_numpy(img_F.transpose((2, 0, 1)).astype('float32')).div(255)\n return {'img_S': img_S, 'img_F': img_F, 'input_path': path_S}\n\n def get_train_item(self, index):\n # Flip Augment\n if index >= len(self.pairs):\n _index = index % len(self.pairs)\n else:\n _index = index\n\n path_S, path_F = self.pairs[_index]\n key_S, key_F = path_S[:-7], path_F[:-7]\n\n lm_S = self.lm_dicts['lm_S'][key_S].copy()\n lm_F = self.lm_dicts['lm_F'][key_F].copy()\n gate = self.lm_dicts['gate'][key_S].copy()\n\n img_S = self.image_transform(path_S, preload=self.preload)\n img_F = self.image_transform(path_F, preload=self.preload)\n mask_S = self.mask_transform(path_S, preload=self.preload)\n mask_F = self.mask_transform(path_F, preload=self.preload)\n\n # Flip image, mask, and landmark\n if index >= len(self.pairs):\n lm_S = np.hstack((127 - lm_S[:, 0:1], lm_S[:, 1:2]))\n lm_F = np.hstack((127 - lm_F[:, 0:1], lm_F[:, 1:2]))\n img_S = img_S[:, ::-1, :]\n img_F = img_F[:, ::-1, :]\n mask_S = mask_S[:, ::-1, :]\n mask_F = mask_F[:, ::-1, :]\n\n # random rotation\n if self.opt.aug:\n img_S, mask_S, lm_S = self.aug_transform(img_S, mask_S, lm_S)\n\n img_S = torch.from_numpy(img_S.transpose((2, 0, 1)).astype('float32')).div(255)\n img_F = torch.from_numpy(img_F.transpose((2, 0, 1)).astype('float32')).div(255)\n mask_S = torch.from_numpy(mask_S.transpose((2, 0, 1)).astype('float32')).div(255)\n mask_F = torch.from_numpy(mask_F.transpose((2, 0, 1)).astype('float32')).div(255)\n\n lm_S = torch.from_numpy(lm_S).long()\n lm_S = torch.clamp(lm_S, 0, self.load_size - 1)\n lm_F = torch.from_numpy(lm_F).long()\n lm_F = torch.clamp(lm_F, 0, self.load_size - 1)\n gate = torch.from_numpy(gate.astype('float32')).unsqueeze(1)\n\n return {'img_S': img_S, 'img_F': img_F, 'input_path': path_S,\n 'lm_S': lm_S, 'lm_F': lm_F, 'gate': gate,\n 'mask_S': mask_S, 'mask_F': mask_F}\n\n def image_transform(self, file, preload=False):\n if preload:\n return self.image_dict[file].copy().astype('float32')\n else:\n image_path = join(self.base_path, 'images', file)\n img = cv2.imread(image_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img.astype('uint8')\n\n def mask_transform(self, file, preload=False):\n if preload:\n return self.mask_dict[file].copy().astype('float32')\n else:\n mask_path = join(self.base_path, 'masks', file)\n mask = cv2.imread(mask_path, 0)\n mask = mask[:, :, np.newaxis]\n return mask.astype('uint8')\n\n def aug_transform(self, img, mask, lm):\n ang = np.random.randint(-5, 5)\n ### rotation\n h, w = img.shape[:2]\n center = (w // 2, h // 2)\n mat = cv2.getRotationMatrix2D(center, int(ang), 1)\n img_aug = cv2.warpAffine(img, mat, (w, h))\n mask_aug = cv2.warpAffine(mask, mat, (w, h))\n mask_aug[mask_aug > 0] = 255\n mask_aug = mask_aug[:, :, np.newaxis]\n\n ### landmark\n lm_aug = lm.astype('float32')\n x0 = lm_aug[:, 0] - (self.load_size // 2)\n y0 = lm_aug[:, 1] - (self.load_size // 2)\n # Note: the angle shoud be -ang to ensure it is consistent with opencv\n ang_arc = -ang * np.pi / 180.0\n lm_aug[:, 0] = x0 * np.cos(ang_arc) - y0 * np.sin(ang_arc) + (self.load_size // 2)\n lm_aug[:, 1] = x0 * np.sin(ang_arc) + y0 * np.cos(ang_arc) + (self.load_size // 2)\n lm_aug = np.clip(lm_aug, 0, self.load_size)\n return img_aug, mask_aug, lm_aug\n\n def get_pairs(self):\n dataroot = join(self.opt.dataroot, self.opt.datamode)\n if self.opt.datamode == 'multipie':\n if self.isval:\n self.base_path = join(dataroot, 'test')\n self.files = os.listdir(join(self.base_path, 'images'))\n self.gallery_dict = self.get_gallery()\n else:\n self.base_path = join(dataroot, 'train')\n self.lm_dicts = np.load(join(self.base_path, 'landmarks.npy'), allow_pickle=True).item()\n self.files = os.listdir(join(self.base_path, 'images'))\n pairs = [(file, s2f(file)) for file in self.files]\n else: # LFW or others\n self.base_path = dataroot\n self.files = os.listdir(join(self.base_path, 'images'))\n pairs = [(file, file) for file in self.files] # no frontal file\n\n if self.preload: # preload images and masks to memory\n read_images(self)\n return pairs\n\n def get_gallery(self):\n if os.path.exists(join(self.base_path, 'gallery_list.npy')):\n gallery_list = np.load(join(self.base_path, 'gallery_list.npy'))\n else:\n _dict = {}\n np.random.shuffle(self.files)\n for k in self.files:\n if k[:3] not in _dict and k.strip().endswith('051_06.png'):\n _dict[k[:3]] = k\n gallery_list = _dict.values()\n gallery_dict = {}\n for g in gallery_list:\n gallery = self.image_transform(g)\n gallery = torch.from_numpy(gallery.transpose((2, 0, 1)).astype('float32')).div(255)\n gallery_dict[g[:3]] = torch.mean(gallery, (0, ), keepdim=True)\n return gallery_dict\n\n def __len__(self):\n if self.isval:\n return len(self.pairs)\n else:\n return len(self.pairs) * 2\n\n\n#### multiprocessing\n\ndef iter_obj(num, objs):\n for i in range(num):\n yield (i, objs)\n\ndef imreader(arg):\n i, obj = arg\n for _ in range(3):\n try:\n obj.image_dict[obj.files[i]] = obj.image_transform(obj.files[i])\n if not obj.isval:\n obj.mask_dict[obj.files[i]] = obj.mask_transform(obj.files[i])\n failed = False\n break\n except Exception as e:\n print(e)\n failed = True\n if failed: print('%s fails!' % obj.files[i])\n\ndef read_images(obj):\n # can change to `from multiprocessing import Pool`, but less efficient and\n # NOTE: `multiprocessing.Pool` will duplicate given object for each process\n # therefore using `multiprocessing.dummy.Pool` is more convenient/efficient\n from multiprocessing.dummy import Pool\n from tqdm import tqdm\n print('Starting to load images via multiple imreaders')\n pool = Pool() # use all threads by default\n for _ in tqdm(pool.imap(imreader, iter_obj(len(obj.files), obj)), total=len(obj.files)):\n pass\n pool.close()\n pool.join()" ]
[ [ "numpy.sin", "numpy.random.shuffle", "torch.clamp", "torch.from_numpy", "numpy.random.randint", "numpy.cos", "numpy.clip", "numpy.hstack", "torch.mean" ] ]
Surya2709/Steganography-With-QR-Codes
[ "e363d596c85af81646f72ce7b96f49aef51241cf" ]
[ "decoder.py" ]
[ "from pathlib import Path\nimport qrtools\nimport os\nimport numpy as np\nfrom PIL import Image\nfrom pyzbar.pyzbar import decode\n\nclass decoder:\n def __init__(self,shapekey,Folderpath):\n self.shapekey=shapekey\n self.Folderpath=Folderpath\n self.decode()\n\n def decode(self):\n \n one_d_array=[]\n paths = sorted(Path(self.Folderpath).iterdir(), key=os.path.getmtime)\n for i in range(len(paths)):\n filename=str(paths[i])\n if \"code\" in filename:\n file=filename\n result=decode(Image.open(file))\n \n for i in result:\n c=i.data.decode(\"utf-8\")\n d=len(c)\n cleaned_c=c[1:d-1]\n list=cleaned_c.split(\",\")\n for i in list:\n one_d_array.append(int(i))\n \n def extract_shapekey(c):\n d=c.split(\",\")\n e=d[0]\n f=d[1]\n g=e.split(\"(\")\n h=f.split(\")\")\n part_1=g[1]\n part_2=h[0]\n shapekey=(int(part_1),int(part_2))\n return shapekey \n\n shapekey=extract_shapekey(self.shapekey) \n cleaned_one_d_array=np.array(one_d_array)\n #reforming the array back to the original shape\n extracted_array=np.asarray(cleaned_one_d_array).reshape(shapekey)\n #convert back array to image and showing the image\n #extracted_array.astype(\"float64)\"\n extracted_image=Image.fromarray(np.uint8(extracted_array)).convert('RGB')\n extracted_image.show()\n extracted_image.save()\n\n\n\n \n" ]
[ [ "numpy.uint8", "numpy.array", "numpy.asarray" ] ]
MSadeghzadehG/pythonic_lucene
[ "3cb50a46b127d189d1e161bad0d73b280fb8810d" ]
[ "search_engine/search/similarity.py" ]
[ "import numpy as np\n\n\nclass Similarity:\n \"\"\"\n Similarity defines the components of Lucene scoring.\n\n Doc: https://lucene.apache.org/core/8_11_0/core/org/apache/lucene/search/similarities/Similarity.html\n Code: https://github.com/apache/lucene/blob/main/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java\n \"\"\"\n\n pass\n\n\nclass TFIDFSimilarity(Similarity):\n \"\"\"\n Doc: https://lucene.apache.org/core/8_11_0/core/org/apache/lucene/search/similarities/TFIDFSimilarity.html\n Code: https://github.com/apache/lucene/blob/main/lucene/core/src/java/org/apache/lucene/search/similarities/TFIDFSimilarity.java\n \"\"\"\n\n def tf(self, freq, norm):\n return np.log(1 + (freq / norm))\n\n def idf(self, doc_freq, all_doc_count):\n return np.log(all_doc_count / (doc_freq + 1))\n\n def score(self, boost, idf_val, freq, norm):\n query_weight = boost * idf_val\n return self.tf(freq, norm) * query_weight\n" ]
[ [ "numpy.log" ] ]
joneswong/AutoGraph
[ "62ea5dd250424ace5c6d6aa72cb196ed335dc2e3" ]
[ "code_submission/schedulers/genetic_optimization.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom scipy.stats import truncnorm\nimport numpy as np\nimport random\nimport copy\n\nfrom schedulers import Scheduler\nfrom spaces import Categoric, Numeric\n\n\nclass GeneticOptimizer(Scheduler):\n \"\"\"Apply Bayesian optimization for HPO\"\"\"\n\n def __init__(self,\n hyperparam_space,\n early_stopper,\n ensembler,\n working_folder,\n max_population=3):\n\n super(GeneticOptimizer, self).__init__(\n hyperparam_space, early_stopper, ensembler, working_folder)\n\n for key, value in self._hyperparam_space.items():\n if isinstance(value, Categoric):\n sorted_categories = sorted(value.categories)\n self._hyperparam_space[key].categories = sorted_categories\n elif isinstance(value, Numeric):\n assert value.low <= value.high\n else:\n raise NotImplementedError\n\n self._population = []\n self._max_population = max_population\n self._cur_config = self.get_default()\n\n def get_next_config(self):\n self._early_stopper.reset()\n\n if len(self._results) != 0:\n self.gen_next_population()\n crossover_config = self.crossover()\n self._cur_config = self.mutation(crossover_config)\n\n return self._cur_config\n\n def crossover(self):\n s1 = random.randint(0, len(self._population)-1)\n s2 = random.randint(0, len(self._population)-1)\n if s1 == s2:\n h_son = self._population[s1][0]\n else:\n h1 = self._population[s1][0]\n h2 = self._population[s2][0]\n h_son = {}\n for key in h1.keys():\n h_son[key] = h1[key] if random.random() <= 0.5 else h2[key]\n return h_son\n\n def mutation(self, h):\n for key, value in h.items():\n space = self._hyperparam_space[key]\n if isinstance(space, Categoric):\n if value not in list(space.categories):\n new_index = random.randint(0, len(space.categories)-1)\n else:\n cur_index = list(space.categories).index(value)\n if type(value) is int or type(value) is float:\n clip_a, clip_b, mean, std = 0, len(space.categories)-1e-10, cur_index+0.5, len(space.categories)/6\n a, b = (clip_a - mean) / std, (clip_b - mean) / std\n new_index = int(truncnorm.rvs(a, b, mean, std, random_state=random.randint(0, 1e5)))\n else:\n if random.random() < 0.5:\n new_index = random.randint(0, len(space.categories)-1)\n else:\n new_index = cur_index\n h[key] = space.categories[new_index]\n elif isinstance(space, Numeric):\n if space.high-space.low != 0:\n clip_a, clip_b, mean, std = space.low, space.high, value, (space.high-space.low)/6\n a, b = (clip_a - mean) / std, (clip_b - mean) / std\n new_value = truncnorm.rvs(a, b, mean, std, random_state=random.randint(0, 1e5))\n h[key] = new_value\n else:\n raise NotImplementedError\n return h\n\n def gen_next_population(self):\n performance = self._results[-1][2]['accuracy']\n if len(self._population) < self._max_population:\n self._population.append((copy.deepcopy(self._cur_config), performance))\n else:\n replaced_index = int(np.argmin([item[1] for item in self._population]))\n self._population[replaced_index] = (copy.deepcopy(self._cur_config), performance)\n\n def aug_hyperparam_space(self, hyperparam_name, hyperparam_desc, hyperparam_values=None):\n super(GeneticOptimizer, self).aug_hyperparam_space(hyperparam_name, hyperparam_desc, hyperparam_values)\n if hyperparam_name not in self._cur_config:\n self._cur_config[hyperparam_name] = hyperparam_desc.default_value\n" ]
[ [ "numpy.argmin" ] ]